diff --git a/config/kernel-blk-queue.m4 b/config/kernel-blk-queue.m4 index 1dced82ce686..ff5d2d370e98 100644 --- a/config/kernel-blk-queue.m4 +++ b/config/kernel-blk-queue.m4 @@ -1,302 +1,342 @@ dnl # dnl # 2.6.39 API change, dnl # blk_start_plug() and blk_finish_plug() dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_PLUG], [ ZFS_LINUX_TEST_SRC([blk_plug], [ #include ],[ struct blk_plug plug __attribute__ ((unused)); blk_start_plug(&plug); blk_finish_plug(&plug); ]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_PLUG], [ AC_MSG_CHECKING([whether struct blk_plug is available]) ZFS_LINUX_TEST_RESULT([blk_plug], [ AC_MSG_RESULT(yes) ],[ ZFS_LINUX_TEST_ERROR([blk_plug]) ]) ]) dnl # dnl # 2.6.32 - 4.11: statically allocated bdi in request_queue dnl # 4.12: dynamically allocated bdi in request_queue dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_BDI], [ ZFS_LINUX_TEST_SRC([blk_queue_bdi], [ #include ],[ struct request_queue q; struct backing_dev_info bdi; q.backing_dev_info = &bdi; ]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_BDI], [ AC_MSG_CHECKING([whether blk_queue bdi is dynamic]) ZFS_LINUX_TEST_RESULT([blk_queue_bdi], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_BDI_DYNAMIC, 1, [blk queue backing_dev_info is dynamic]) ],[ AC_MSG_RESULT(no) ]) ]) +dnl # +dnl # 5.9: added blk_queue_update_readahead(), +dnl # 5.15: renamed to disk_update_readahead() +dnl # +AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_UPDATE_READAHEAD], [ + ZFS_LINUX_TEST_SRC([blk_queue_update_readahead], [ + #include + ],[ + struct request_queue q; + blk_queue_update_readahead(&q); + ]) + + ZFS_LINUX_TEST_SRC([disk_update_readahead], [ + #include + ],[ + struct gendisk disk; + disk_update_readahead(&disk); + ]) +]) + +AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_UPDATE_READAHEAD], [ + AC_MSG_CHECKING([whether blk_queue_update_readahead() exists]) + ZFS_LINUX_TEST_RESULT([blk_queue_update_readahead], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_BLK_QUEUE_UPDATE_READAHEAD, 1, + [blk_queue_update_readahead() exists]) + ],[ + AC_MSG_CHECKING([whether disk_update_readahead() exists]) + ZFS_LINUX_TEST_RESULT([disk_update_readahead], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_DISK_UPDATE_READAHEAD, 1, + [disk_update_readahead() exists]) + ],[ + AC_MSG_RESULT(no) + ]) + ]) +]) + dnl # dnl # 2.6.32 API, dnl # blk_queue_discard() dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_DISCARD], [ ZFS_LINUX_TEST_SRC([blk_queue_discard], [ #include ],[ struct request_queue *q __attribute__ ((unused)) = NULL; int value __attribute__ ((unused)); value = blk_queue_discard(q); ]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_DISCARD], [ AC_MSG_CHECKING([whether blk_queue_discard() is available]) ZFS_LINUX_TEST_RESULT([blk_queue_discard], [ AC_MSG_RESULT(yes) ],[ ZFS_LINUX_TEST_ERROR([blk_queue_discard]) ]) ]) dnl # dnl # 4.8 API, dnl # blk_queue_secure_erase() dnl # dnl # 2.6.36 - 4.7 API, dnl # blk_queue_secdiscard() dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_SECURE_ERASE], [ ZFS_LINUX_TEST_SRC([blk_queue_secure_erase], [ #include ],[ struct request_queue *q __attribute__ ((unused)) = NULL; int value __attribute__ ((unused)); value = blk_queue_secure_erase(q); ]) ZFS_LINUX_TEST_SRC([blk_queue_secdiscard], [ #include ],[ struct request_queue *q __attribute__ ((unused)) = NULL; int value __attribute__ ((unused)); value = blk_queue_secdiscard(q); ]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_SECURE_ERASE], [ AC_MSG_CHECKING([whether blk_queue_secure_erase() is available]) ZFS_LINUX_TEST_RESULT([blk_queue_secure_erase], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_SECURE_ERASE, 1, [blk_queue_secure_erase() is available]) ],[ AC_MSG_RESULT(no) AC_MSG_CHECKING([whether blk_queue_secdiscard() is available]) ZFS_LINUX_TEST_RESULT([blk_queue_secdiscard], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_SECDISCARD, 1, [blk_queue_secdiscard() is available]) ],[ ZFS_LINUX_TEST_ERROR([blk_queue_secure_erase]) ]) ]) ]) dnl # dnl # 4.16 API change, dnl # Introduction of blk_queue_flag_set and blk_queue_flag_clear dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLAG_SET], [ ZFS_LINUX_TEST_SRC([blk_queue_flag_set], [ #include #include ],[ struct request_queue *q = NULL; blk_queue_flag_set(0, q); ]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_FLAG_SET], [ AC_MSG_CHECKING([whether blk_queue_flag_set() exists]) ZFS_LINUX_TEST_RESULT([blk_queue_flag_set], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_FLAG_SET, 1, [blk_queue_flag_set() exists]) ],[ AC_MSG_RESULT(no) ]) ]) AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLAG_CLEAR], [ ZFS_LINUX_TEST_SRC([blk_queue_flag_clear], [ #include #include ],[ struct request_queue *q = NULL; blk_queue_flag_clear(0, q); ]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_FLAG_CLEAR], [ AC_MSG_CHECKING([whether blk_queue_flag_clear() exists]) ZFS_LINUX_TEST_RESULT([blk_queue_flag_clear], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_FLAG_CLEAR, 1, [blk_queue_flag_clear() exists]) ],[ AC_MSG_RESULT(no) ]) ]) dnl # dnl # 2.6.36 API change, dnl # Added blk_queue_flush() interface, while the previous interface dnl # was available to all the new one is GPL-only. Thus in addition to dnl # detecting if this function is available we determine if it is dnl # GPL-only. If the GPL-only interface is there we implement our own dnl # compatibility function, otherwise we use the function. The hope dnl # is that long term this function will be opened up. dnl # dnl # 4.7 API change, dnl # Replace blk_queue_flush with blk_queue_write_cache dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLUSH], [ ZFS_LINUX_TEST_SRC([blk_queue_flush], [ #include ], [ struct request_queue *q = NULL; (void) blk_queue_flush(q, REQ_FLUSH); ], [$NO_UNUSED_BUT_SET_VARIABLE], [ZFS_META_LICENSE]) ZFS_LINUX_TEST_SRC([blk_queue_write_cache], [ #include #include ], [ struct request_queue *q = NULL; blk_queue_write_cache(q, true, true); ], [$NO_UNUSED_BUT_SET_VARIABLE], [ZFS_META_LICENSE]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_FLUSH], [ AC_MSG_CHECKING([whether blk_queue_flush() is available]) ZFS_LINUX_TEST_RESULT([blk_queue_flush], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_FLUSH, 1, [blk_queue_flush() is available]) AC_MSG_CHECKING([whether blk_queue_flush() is GPL-only]) ZFS_LINUX_TEST_RESULT([blk_queue_flush_license], [ AC_MSG_RESULT(no) ],[ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY, 1, [blk_queue_flush() is GPL-only]) ]) ],[ AC_MSG_RESULT(no) ]) dnl # dnl # 4.7 API change dnl # Replace blk_queue_flush with blk_queue_write_cache dnl # AC_MSG_CHECKING([whether blk_queue_write_cache() exists]) ZFS_LINUX_TEST_RESULT([blk_queue_write_cache], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_WRITE_CACHE, 1, [blk_queue_write_cache() exists]) AC_MSG_CHECKING([whether blk_queue_write_cache() is GPL-only]) ZFS_LINUX_TEST_RESULT([blk_queue_write_cache_license], [ AC_MSG_RESULT(no) ],[ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY, 1, [blk_queue_write_cache() is GPL-only]) ]) ],[ AC_MSG_RESULT(no) ]) ]) dnl # dnl # 2.6.34 API change dnl # blk_queue_max_hw_sectors() replaces blk_queue_max_sectors(). dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_HW_SECTORS], [ ZFS_LINUX_TEST_SRC([blk_queue_max_hw_sectors], [ #include ], [ struct request_queue *q = NULL; (void) blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); ], [$NO_UNUSED_BUT_SET_VARIABLE]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_MAX_HW_SECTORS], [ AC_MSG_CHECKING([whether blk_queue_max_hw_sectors() is available]) ZFS_LINUX_TEST_RESULT([blk_queue_max_hw_sectors], [ AC_MSG_RESULT(yes) ],[ ZFS_LINUX_TEST_ERROR([blk_queue_max_hw_sectors]) ]) ]) dnl # dnl # 2.6.34 API change dnl # blk_queue_max_segments() consolidates blk_queue_max_hw_segments() dnl # and blk_queue_max_phys_segments(). dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_SEGMENTS], [ ZFS_LINUX_TEST_SRC([blk_queue_max_segments], [ #include ], [ struct request_queue *q = NULL; (void) blk_queue_max_segments(q, BLK_MAX_SEGMENTS); ], [$NO_UNUSED_BUT_SET_VARIABLE]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS], [ AC_MSG_CHECKING([whether blk_queue_max_segments() is available]) ZFS_LINUX_TEST_RESULT([blk_queue_max_segments], [ AC_MSG_RESULT(yes) ], [ ZFS_LINUX_TEST_ERROR([blk_queue_max_segments]) ]) ]) AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE], [ ZFS_AC_KERNEL_SRC_BLK_QUEUE_PLUG ZFS_AC_KERNEL_SRC_BLK_QUEUE_BDI + ZFS_AC_KERNEL_SRC_BLK_QUEUE_UPDATE_READAHEAD ZFS_AC_KERNEL_SRC_BLK_QUEUE_DISCARD ZFS_AC_KERNEL_SRC_BLK_QUEUE_SECURE_ERASE ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLAG_SET ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLAG_CLEAR ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLUSH ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_HW_SECTORS ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_SEGMENTS ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE], [ ZFS_AC_KERNEL_BLK_QUEUE_PLUG ZFS_AC_KERNEL_BLK_QUEUE_BDI + ZFS_AC_KERNEL_BLK_QUEUE_UPDATE_READAHEAD ZFS_AC_KERNEL_BLK_QUEUE_DISCARD ZFS_AC_KERNEL_BLK_QUEUE_SECURE_ERASE ZFS_AC_KERNEL_BLK_QUEUE_FLAG_SET ZFS_AC_KERNEL_BLK_QUEUE_FLAG_CLEAR ZFS_AC_KERNEL_BLK_QUEUE_FLUSH ZFS_AC_KERNEL_BLK_QUEUE_MAX_HW_SECTORS ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS ]) diff --git a/include/os/linux/kernel/linux/blkdev_compat.h b/include/os/linux/kernel/linux/blkdev_compat.h index 87d541072015..019d5390adec 100644 --- a/include/os/linux/kernel/linux/blkdev_compat.h +++ b/include/os/linux/kernel/linux/blkdev_compat.h @@ -1,579 +1,582 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (C) 2011 Lawrence Livermore National Security, LLC. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Brian Behlendorf . * LLNL-CODE-403049. */ #ifndef _ZFS_BLKDEV_H #define _ZFS_BLKDEV_H #include #include #include #include #include /* for SECTOR_* */ #ifndef HAVE_BLK_QUEUE_FLAG_SET static inline void blk_queue_flag_set(unsigned int flag, struct request_queue *q) { queue_flag_set(flag, q); } #endif #ifndef HAVE_BLK_QUEUE_FLAG_CLEAR static inline void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) { queue_flag_clear(flag, q); } #endif /* * 4.7 API, * The blk_queue_write_cache() interface has replaced blk_queue_flush() * interface. However, the new interface is GPL-only thus we implement * our own trivial wrapper when the GPL-only version is detected. * * 2.6.36 - 4.6 API, * The blk_queue_flush() interface has replaced blk_queue_ordered() * interface. However, while the old interface was available to all the * new one is GPL-only. Thus if the GPL-only version is detected we * implement our own trivial helper. */ static inline void blk_queue_set_write_cache(struct request_queue *q, bool wc, bool fua) { #if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY) if (wc) blk_queue_flag_set(QUEUE_FLAG_WC, q); else blk_queue_flag_clear(QUEUE_FLAG_WC, q); if (fua) blk_queue_flag_set(QUEUE_FLAG_FUA, q); else blk_queue_flag_clear(QUEUE_FLAG_FUA, q); #elif defined(HAVE_BLK_QUEUE_WRITE_CACHE) blk_queue_write_cache(q, wc, fua); #elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY) if (wc) q->flush_flags |= REQ_FLUSH; if (fua) q->flush_flags |= REQ_FUA; #elif defined(HAVE_BLK_QUEUE_FLUSH) blk_queue_flush(q, (wc ? REQ_FLUSH : 0) | (fua ? REQ_FUA : 0)); #else #error "Unsupported kernel" #endif } static inline void blk_queue_set_read_ahead(struct request_queue *q, unsigned long ra_pages) { +#if !defined(HAVE_BLK_QUEUE_UPDATE_READAHEAD) && \ + !defined(HAVE_DISK_UPDATE_READAHEAD) #ifdef HAVE_BLK_QUEUE_BDI_DYNAMIC q->backing_dev_info->ra_pages = ra_pages; #else q->backing_dev_info.ra_pages = ra_pages; #endif +#endif } #ifdef HAVE_BIO_BVEC_ITER #define BIO_BI_SECTOR(bio) (bio)->bi_iter.bi_sector #define BIO_BI_SIZE(bio) (bio)->bi_iter.bi_size #define BIO_BI_IDX(bio) (bio)->bi_iter.bi_idx #define BIO_BI_SKIP(bio) (bio)->bi_iter.bi_bvec_done #define bio_for_each_segment4(bv, bvp, b, i) \ bio_for_each_segment((bv), (b), (i)) typedef struct bvec_iter bvec_iterator_t; #else #define BIO_BI_SECTOR(bio) (bio)->bi_sector #define BIO_BI_SIZE(bio) (bio)->bi_size #define BIO_BI_IDX(bio) (bio)->bi_idx #define BIO_BI_SKIP(bio) (0) #define bio_for_each_segment4(bv, bvp, b, i) \ bio_for_each_segment((bvp), (b), (i)) typedef int bvec_iterator_t; #endif static inline void bio_set_flags_failfast(struct block_device *bdev, int *flags) { #ifdef CONFIG_BUG /* * Disable FAILFAST for loopback devices because of the * following incorrect BUG_ON() in loop_make_request(). * This support is also disabled for md devices because the * test suite layers md devices on top of loopback devices. * This may be removed when the loopback driver is fixed. * * BUG_ON(!lo || (rw != READ && rw != WRITE)); */ if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) || (MAJOR(bdev->bd_dev) == MD_MAJOR)) return; #ifdef BLOCK_EXT_MAJOR if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR) return; #endif /* BLOCK_EXT_MAJOR */ #endif /* CONFIG_BUG */ *flags |= REQ_FAILFAST_MASK; } /* * Maximum disk label length, it may be undefined for some kernels. */ #if !defined(DISK_NAME_LEN) #define DISK_NAME_LEN 32 #endif /* DISK_NAME_LEN */ #ifdef HAVE_BIO_BI_STATUS static inline int bi_status_to_errno(blk_status_t status) { switch (status) { case BLK_STS_OK: return (0); case BLK_STS_NOTSUPP: return (EOPNOTSUPP); case BLK_STS_TIMEOUT: return (ETIMEDOUT); case BLK_STS_NOSPC: return (ENOSPC); case BLK_STS_TRANSPORT: return (ENOLINK); case BLK_STS_TARGET: return (EREMOTEIO); case BLK_STS_NEXUS: return (EBADE); case BLK_STS_MEDIUM: return (ENODATA); case BLK_STS_PROTECTION: return (EILSEQ); case BLK_STS_RESOURCE: return (ENOMEM); case BLK_STS_AGAIN: return (EAGAIN); case BLK_STS_IOERR: return (EIO); default: return (EIO); } } static inline blk_status_t errno_to_bi_status(int error) { switch (error) { case 0: return (BLK_STS_OK); case EOPNOTSUPP: return (BLK_STS_NOTSUPP); case ETIMEDOUT: return (BLK_STS_TIMEOUT); case ENOSPC: return (BLK_STS_NOSPC); case ENOLINK: return (BLK_STS_TRANSPORT); case EREMOTEIO: return (BLK_STS_TARGET); case EBADE: return (BLK_STS_NEXUS); case ENODATA: return (BLK_STS_MEDIUM); case EILSEQ: return (BLK_STS_PROTECTION); case ENOMEM: return (BLK_STS_RESOURCE); case EAGAIN: return (BLK_STS_AGAIN); case EIO: return (BLK_STS_IOERR); default: return (BLK_STS_IOERR); } } #endif /* HAVE_BIO_BI_STATUS */ /* * 4.3 API change * The bio_endio() prototype changed slightly. These are helper * macro's to ensure the prototype and invocation are handled. */ #ifdef HAVE_1ARG_BIO_END_IO_T #ifdef HAVE_BIO_BI_STATUS #define BIO_END_IO_ERROR(bio) bi_status_to_errno(bio->bi_status) #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x) #define BIO_END_IO(bio, error) bio_set_bi_status(bio, error) static inline void bio_set_bi_status(struct bio *bio, int error) { ASSERT3S(error, <=, 0); bio->bi_status = errno_to_bi_status(-error); bio_endio(bio); } #else #define BIO_END_IO_ERROR(bio) (-(bio->bi_error)) #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x) #define BIO_END_IO(bio, error) bio_set_bi_error(bio, error) static inline void bio_set_bi_error(struct bio *bio, int error) { ASSERT3S(error, <=, 0); bio->bi_error = error; bio_endio(bio); } #endif /* HAVE_BIO_BI_STATUS */ #else #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x, int z) #define BIO_END_IO(bio, error) bio_endio(bio, error); #endif /* HAVE_1ARG_BIO_END_IO_T */ /* * 4.1 API, * 3.10.0 CentOS 7.x API, * blkdev_reread_part() * * For older kernels trigger a re-reading of the partition table by calling * check_disk_change() which calls flush_disk() to invalidate the device. * * For newer kernels (as of 5.10), bdev_check_media_change is used, in favor of * check_disk_change(), with the modification that invalidation is no longer * forced. */ #ifdef HAVE_CHECK_DISK_CHANGE #define zfs_check_media_change(bdev) check_disk_change(bdev) #ifdef HAVE_BLKDEV_REREAD_PART #define vdev_bdev_reread_part(bdev) blkdev_reread_part(bdev) #else #define vdev_bdev_reread_part(bdev) check_disk_change(bdev) #endif /* HAVE_BLKDEV_REREAD_PART */ #else #ifdef HAVE_BDEV_CHECK_MEDIA_CHANGE static inline int zfs_check_media_change(struct block_device *bdev) { #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK struct gendisk *gd = bdev->bd_disk; const struct block_device_operations *bdo = gd->fops; #endif if (!bdev_check_media_change(bdev)) return (0); #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK /* * Force revalidation, to mimic the old behavior of * check_disk_change() */ if (bdo->revalidate_disk) bdo->revalidate_disk(gd); #endif return (0); } #define vdev_bdev_reread_part(bdev) zfs_check_media_change(bdev) #else /* * This is encountered if check_disk_change() and bdev_check_media_change() * are not available in the kernel - likely due to an API change that needs * to be chased down. */ #error "Unsupported kernel: no usable disk change check" #endif /* HAVE_BDEV_CHECK_MEDIA_CHANGE */ #endif /* HAVE_CHECK_DISK_CHANGE */ /* * 2.6.27 API change * The function was exported for use, prior to this it existed but the * symbol was not exported. * * 4.4.0-6.21 API change for Ubuntu * lookup_bdev() gained a second argument, FMODE_*, to check inode permissions. * * 5.11 API change * Changed to take a dev_t argument which is set on success and return a * non-zero error code on failure. */ static inline int vdev_lookup_bdev(const char *path, dev_t *dev) { #if defined(HAVE_DEVT_LOOKUP_BDEV) return (lookup_bdev(path, dev)); #elif defined(HAVE_1ARG_LOOKUP_BDEV) struct block_device *bdev = lookup_bdev(path); if (IS_ERR(bdev)) return (PTR_ERR(bdev)); *dev = bdev->bd_dev; bdput(bdev); return (0); #elif defined(HAVE_MODE_LOOKUP_BDEV) struct block_device *bdev = lookup_bdev(path, FMODE_READ); if (IS_ERR(bdev)) return (PTR_ERR(bdev)); *dev = bdev->bd_dev; bdput(bdev); return (0); #else #error "Unsupported kernel" #endif } /* * Kernels without bio_set_op_attrs use bi_rw for the bio flags. */ #if !defined(HAVE_BIO_SET_OP_ATTRS) static inline void bio_set_op_attrs(struct bio *bio, unsigned rw, unsigned flags) { bio->bi_rw |= rw | flags; } #endif /* * bio_set_flush - Set the appropriate flags in a bio to guarantee * data are on non-volatile media on completion. * * 2.6.37 - 4.8 API, * Introduce WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags as a * replacement for WRITE_BARRIER to allow expressing richer semantics * to the block layer. It's up to the block layer to implement the * semantics correctly. Use the WRITE_FLUSH_FUA flag combination. * * 4.8 - 4.9 API, * REQ_FLUSH was renamed to REQ_PREFLUSH. For consistency with previous * OpenZFS releases, prefer the WRITE_FLUSH_FUA flag set if it's available. * * 4.10 API, * The read/write flags and their modifiers, including WRITE_FLUSH, * WRITE_FUA and WRITE_FLUSH_FUA were removed from fs.h in * torvalds/linux@70fd7614 and replaced by direct flag modification * of the REQ_ flags in bio->bi_opf. Use REQ_PREFLUSH. */ static inline void bio_set_flush(struct bio *bio) { #if defined(HAVE_REQ_PREFLUSH) /* >= 4.10 */ bio_set_op_attrs(bio, 0, REQ_PREFLUSH); #elif defined(WRITE_FLUSH_FUA) /* >= 2.6.37 and <= 4.9 */ bio_set_op_attrs(bio, 0, WRITE_FLUSH_FUA); #else #error "Allowing the build will cause bio_set_flush requests to be ignored." #endif } /* * 4.8 API, * REQ_OP_FLUSH * * 4.8-rc0 - 4.8-rc1, * REQ_PREFLUSH * * 2.6.36 - 4.7 API, * REQ_FLUSH * * in all cases but may have a performance impact for some kernels. It * has the advantage of minimizing kernel specific changes in the zvol code. * */ static inline boolean_t bio_is_flush(struct bio *bio) { #if defined(HAVE_REQ_OP_FLUSH) && defined(HAVE_BIO_BI_OPF) return ((bio_op(bio) == REQ_OP_FLUSH) || (bio->bi_opf & REQ_PREFLUSH)); #elif defined(HAVE_REQ_PREFLUSH) && defined(HAVE_BIO_BI_OPF) return (bio->bi_opf & REQ_PREFLUSH); #elif defined(HAVE_REQ_PREFLUSH) && !defined(HAVE_BIO_BI_OPF) return (bio->bi_rw & REQ_PREFLUSH); #elif defined(HAVE_REQ_FLUSH) return (bio->bi_rw & REQ_FLUSH); #else #error "Unsupported kernel" #endif } /* * 4.8 API, * REQ_FUA flag moved to bio->bi_opf * * 2.6.x - 4.7 API, * REQ_FUA */ static inline boolean_t bio_is_fua(struct bio *bio) { #if defined(HAVE_BIO_BI_OPF) return (bio->bi_opf & REQ_FUA); #elif defined(REQ_FUA) return (bio->bi_rw & REQ_FUA); #else #error "Allowing the build will cause fua requests to be ignored." #endif } /* * 4.8 API, * REQ_OP_DISCARD * * 2.6.36 - 4.7 API, * REQ_DISCARD * * In all cases the normal I/O path is used for discards. The only * difference is how the kernel tags individual I/Os as discards. */ static inline boolean_t bio_is_discard(struct bio *bio) { #if defined(HAVE_REQ_OP_DISCARD) return (bio_op(bio) == REQ_OP_DISCARD); #elif defined(HAVE_REQ_DISCARD) return (bio->bi_rw & REQ_DISCARD); #else #error "Unsupported kernel" #endif } /* * 4.8 API, * REQ_OP_SECURE_ERASE * * 2.6.36 - 4.7 API, * REQ_SECURE */ static inline boolean_t bio_is_secure_erase(struct bio *bio) { #if defined(HAVE_REQ_OP_SECURE_ERASE) return (bio_op(bio) == REQ_OP_SECURE_ERASE); #elif defined(REQ_SECURE) return (bio->bi_rw & REQ_SECURE); #else return (0); #endif } /* * 2.6.33 API change * Discard granularity and alignment restrictions may now be set. For * older kernels which do not support this it is safe to skip it. */ static inline void blk_queue_discard_granularity(struct request_queue *q, unsigned int dg) { q->limits.discard_granularity = dg; } /* * 4.8 API, * blk_queue_secure_erase() * * 2.6.36 - 4.7 API, * blk_queue_secdiscard() */ static inline int blk_queue_discard_secure(struct request_queue *q) { #if defined(HAVE_BLK_QUEUE_SECURE_ERASE) return (blk_queue_secure_erase(q)); #elif defined(HAVE_BLK_QUEUE_SECDISCARD) return (blk_queue_secdiscard(q)); #else return (0); #endif } /* * A common holder for vdev_bdev_open() is used to relax the exclusive open * semantics slightly. Internal vdev disk callers may pass VDEV_HOLDER to * allow them to open the device multiple times. Other kernel callers and * user space processes which don't pass this value will get EBUSY. This is * currently required for the correct operation of hot spares. */ #define VDEV_HOLDER ((void *)0x2401de7) static inline unsigned long blk_generic_start_io_acct(struct request_queue *q __attribute__((unused)), struct gendisk *disk __attribute__((unused)), int rw __attribute__((unused)), struct bio *bio) { #if defined(HAVE_DISK_IO_ACCT) return (disk_start_io_acct(disk, bio_sectors(bio), bio_op(bio))); #elif defined(HAVE_BIO_IO_ACCT) return (bio_start_io_acct(bio)); #elif defined(HAVE_GENERIC_IO_ACCT_3ARG) unsigned long start_time = jiffies; generic_start_io_acct(rw, bio_sectors(bio), &disk->part0); return (start_time); #elif defined(HAVE_GENERIC_IO_ACCT_4ARG) unsigned long start_time = jiffies; generic_start_io_acct(q, rw, bio_sectors(bio), &disk->part0); return (start_time); #else /* Unsupported */ return (0); #endif } static inline void blk_generic_end_io_acct(struct request_queue *q __attribute__((unused)), struct gendisk *disk __attribute__((unused)), int rw __attribute__((unused)), struct bio *bio, unsigned long start_time) { #if defined(HAVE_DISK_IO_ACCT) disk_end_io_acct(disk, bio_op(bio), start_time); #elif defined(HAVE_BIO_IO_ACCT) bio_end_io_acct(bio, start_time); #elif defined(HAVE_GENERIC_IO_ACCT_3ARG) generic_end_io_acct(rw, &disk->part0, start_time); #elif defined(HAVE_GENERIC_IO_ACCT_4ARG) generic_end_io_acct(q, rw, &disk->part0, start_time); #endif } #ifndef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS static inline struct request_queue * blk_generic_alloc_queue(make_request_fn make_request, int node_id) { #if defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN) return (blk_alloc_queue(make_request, node_id)); #elif defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH) return (blk_alloc_queue_rh(make_request, node_id)); #else struct request_queue *q = blk_alloc_queue(GFP_KERNEL); if (q != NULL) blk_queue_make_request(q, make_request); return (q); #endif } #endif /* !HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */ #endif /* _ZFS_BLKDEV_H */