diff --git a/sys/compat/linuxkpi/common/include/asm/unaligned.h b/sys/compat/linuxkpi/common/include/asm/unaligned.h
new file mode 100644
index 000000000000..7597f00f7596
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/unaligned.h
@@ -0,0 +1,78 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef	_ASM_UNALIGNED_H
+#define	_ASM_UNALIGNED_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+static __inline uint32_t
+get_unaligned_le32(const void *p)
+{
+
+	return (le32_to_cpup((const __le32 *)p));
+}
+
+static __inline void
+put_unaligned_le32(__le32 v, void *p)
+{
+	__le32 x;
+
+	x = cpu_to_le32(v);
+	memcpy(p, &x, sizeof(x));
+}
+
+static __inline void
+put_unaligned_le64(__le64 v, void *p)
+{
+	__le64 x;
+
+	x = cpu_to_le64(v);
+	memcpy(p, &x, sizeof(x));
+}
+
+static __inline uint16_t
+get_unaligned_be16(const void *p)
+{
+
+	return (be16_to_cpup((const __be16 *)p));
+}
+
+static __inline uint32_t
+get_unaligned_be32(const void *p)
+{
+
+	return (be32_to_cpup((const __be32 *)p));
+}
+
+#endif	/* _ASM_UNALIGNED_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/kernel.h b/sys/compat/linuxkpi/common/include/linux/kernel.h
index 9b0a2df72322..daef6216a151 100644
--- a/sys/compat/linuxkpi/common/include/linux/kernel.h
+++ b/sys/compat/linuxkpi/common/include/linux/kernel.h
@@ -1,596 +1,638 @@
 /*-
  * Copyright (c) 2010 Isilon Systems, Inc.
  * Copyright (c) 2010 iX Systems, Inc.
  * Copyright (c) 2010 Panasas, Inc.
  * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
  * Copyright (c) 2014-2015 François Tigeot
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice unmodified, this list of conditions, and the following
  *    disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * $FreeBSD$
  */
 #ifndef	_LINUX_KERNEL_H_
 #define	_LINUX_KERNEL_H_
 
 #include <sys/cdefs.h>
 #include <sys/types.h>
 #include <sys/systm.h>
 #include <sys/param.h>
 #include <sys/libkern.h>
 #include <sys/stat.h>
 #include <sys/smp.h>
 #include <sys/stddef.h>
 #include <sys/syslog.h>
 #include <sys/time.h>
 
 #include <linux/bitops.h>
 #include <linux/compiler.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/types.h>
 #include <linux/jiffies.h>
 #include <linux/log2.h>
 
 #include <asm/byteorder.h>
 #include <asm/uaccess.h>
 
 #include <machine/stdarg.h>
 
 #define KERN_CONT       ""
 #define	KERN_EMERG	"<0>"
 #define	KERN_ALERT	"<1>"
 #define	KERN_CRIT	"<2>"
 #define	KERN_ERR	"<3>"
 #define	KERN_WARNING	"<4>"
 #define	KERN_NOTICE	"<5>"
 #define	KERN_INFO	"<6>"
 #define	KERN_DEBUG	"<7>"
 
 #define	U8_MAX		((u8)~0U)
 #define	S8_MAX		((s8)(U8_MAX >> 1))
 #define	S8_MIN		((s8)(-S8_MAX - 1))
 #define	U16_MAX		((u16)~0U)
 #define	S16_MAX		((s16)(U16_MAX >> 1))
 #define	S16_MIN		((s16)(-S16_MAX - 1))
 #define	U32_MAX		((u32)~0U)
 #define	S32_MAX		((s32)(U32_MAX >> 1))
 #define	S32_MIN		((s32)(-S32_MAX - 1))
 #define	U64_MAX		((u64)~0ULL)
 #define	S64_MAX		((s64)(U64_MAX >> 1))
 #define	S64_MIN		((s64)(-S64_MAX - 1))
 
 #define	S8_C(x)  x
 #define	U8_C(x)  x ## U
 #define	S16_C(x) x
 #define	U16_C(x) x ## U
 #define	S32_C(x) x
 #define	U32_C(x) x ## U
 #define	S64_C(x) x ## LL
 #define	U64_C(x) x ## ULL
 
 #define	BUILD_BUG()			do { CTASSERT(0); } while (0)
 #define	BUILD_BUG_ON(x)			CTASSERT(!(x))
 #define	BUILD_BUG_ON_MSG(x, msg)	BUILD_BUG_ON(x)
 #define	BUILD_BUG_ON_NOT_POWER_OF_2(x)	BUILD_BUG_ON(!powerof2(x))
 #define	BUILD_BUG_ON_INVALID(expr)	while (0) { (void)(expr); }
 
 extern const volatile int lkpi_build_bug_on_zero;
 #define	BUILD_BUG_ON_ZERO(x)	((x) ? lkpi_build_bug_on_zero : 0)
 
 #define	BUG()			panic("BUG at %s:%d", __FILE__, __LINE__)
 #define	BUG_ON(cond)		do {				\
 	if (cond) {						\
 		panic("BUG ON %s failed at %s:%d",		\
 		    __stringify(cond), __FILE__, __LINE__);	\
 	}							\
 } while (0)
 
 #define	WARN_ON(cond) ({					\
       bool __ret = (cond);					\
       if (__ret) {						\
 		printf("WARNING %s failed at %s:%d\n",		\
 		    __stringify(cond), __FILE__, __LINE__);	\
 		linux_dump_stack();				\
       }								\
       unlikely(__ret);						\
 })
 
 #define	WARN_ON_SMP(cond)	WARN_ON(cond)
 
 #define	WARN_ON_ONCE(cond) ({					\
       static bool __warn_on_once;				\
       bool __ret = (cond);					\
       if (__ret && !__warn_on_once) {				\
 		__warn_on_once = 1;				\
 		printf("WARNING %s failed at %s:%d\n",		\
 		    __stringify(cond), __FILE__, __LINE__);	\
 		linux_dump_stack();				\
       }								\
       unlikely(__ret);						\
 })
 
 #define	oops_in_progress	SCHEDULER_STOPPED()
 
 #undef	ALIGN
 #define	ALIGN(x, y)		roundup2((x), (y))
 #undef PTR_ALIGN
 #define	PTR_ALIGN(p, a)		((__typeof(p))ALIGN((uintptr_t)(p), (a)))
 #define	IS_ALIGNED(x, a)	(((x) & ((__typeof(x))(a) - 1)) == 0)
 #define	DIV_ROUND_UP(x, n)	howmany(x, n)
 #define	__KERNEL_DIV_ROUND_UP(x, n)	howmany(x, n)
 #define	DIV_ROUND_UP_ULL(x, n)	DIV_ROUND_UP((unsigned long long)(x), (n))
 #define	DIV_ROUND_DOWN_ULL(x, n) (((unsigned long long)(x) / (n)) * (n))
 #define	FIELD_SIZEOF(t, f)	sizeof(((t *)0)->f)
 
 #define	printk(...)		printf(__VA_ARGS__)
 #define	vprintk(f, a)		vprintf(f, a)
 
 #define	asm			__asm
 
 extern void linux_dump_stack(void);
 #define	dump_stack()		linux_dump_stack()
 
 struct va_format {
 	const char *fmt;
 	va_list *va;
 };
 
 static inline int
 vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
 {
 	ssize_t ssize = size;
 	int i;
 
 	i = vsnprintf(buf, size, fmt, args);
 
 	return ((i >= ssize) ? (ssize - 1) : i);
 }
 
 static inline int
 scnprintf(char *buf, size_t size, const char *fmt, ...)
 {
 	va_list args;
 	int i;
 
 	va_start(args, fmt);
 	i = vscnprintf(buf, size, fmt, args);
 	va_end(args);
 
 	return (i);
 }
 
 /*
  * The "pr_debug()" and "pr_devel()" macros should produce zero code
  * unless DEBUG is defined:
  */
 #ifdef DEBUG
 extern int linuxkpi_debug;
 #define pr_debug(fmt, ...)					\
 	do {							\
 		if (linuxkpi_debug)				\
 			log(LOG_DEBUG, fmt, ##__VA_ARGS__);	\
 	} while (0)
 #define pr_devel(fmt, ...) \
 	log(LOG_DEBUG, pr_fmt(fmt), ##__VA_ARGS__)
 #else
 #define pr_debug(fmt, ...) \
 	({ if (0) log(LOG_DEBUG, fmt, ##__VA_ARGS__); 0; })
 #define pr_devel(fmt, ...) \
 	({ if (0) log(LOG_DEBUG, pr_fmt(fmt), ##__VA_ARGS__); 0; })
 #endif
 
 #ifndef pr_fmt
 #define pr_fmt(fmt) fmt
 #endif
 
 /*
  * Print a one-time message (analogous to WARN_ONCE() et al):
  */
 #define printk_once(...) do {			\
 	static bool __print_once;		\
 						\
 	if (!__print_once) {			\
 		__print_once = true;		\
 		printk(__VA_ARGS__);		\
 	}					\
 } while (0)
 
 /*
  * Log a one-time message (analogous to WARN_ONCE() et al):
  */
 #define log_once(level,...) do {		\
 	static bool __log_once;			\
 						\
 	if (unlikely(!__log_once)) {		\
 		__log_once = true;		\
 		log(level, __VA_ARGS__);	\
 	}					\
 } while (0)
 
 #define pr_emerg(fmt, ...) \
 	log(LOG_EMERG, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_alert(fmt, ...) \
 	log(LOG_ALERT, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_crit(fmt, ...) \
 	log(LOG_CRIT, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_err(fmt, ...) \
 	log(LOG_ERR, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_warning(fmt, ...) \
 	log(LOG_WARNING, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_warn(...) \
 	pr_warning(__VA_ARGS__)
 #define pr_warn_once(fmt, ...) \
 	log_once(LOG_WARNING, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_notice(fmt, ...) \
 	log(LOG_NOTICE, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_info(fmt, ...) \
 	log(LOG_INFO, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_info_once(fmt, ...) \
 	log_once(LOG_INFO, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_cont(fmt, ...) \
 	printk(KERN_CONT fmt, ##__VA_ARGS__)
 #define	pr_warn_ratelimited(...) do {		\
 	static linux_ratelimit_t __ratelimited;	\
 	if (linux_ratelimited(&__ratelimited))	\
 		pr_warning(__VA_ARGS__);	\
 } while (0)
 
 #ifndef WARN
 #define	WARN(condition, ...) ({			\
 	bool __ret_warn_on = (condition);	\
 	if (unlikely(__ret_warn_on))		\
 		pr_warning(__VA_ARGS__);	\
 	unlikely(__ret_warn_on);		\
 })
 #endif
 
 #ifndef WARN_ONCE
 #define	WARN_ONCE(condition, ...) ({		\
 	bool __ret_warn_on = (condition);	\
 	if (unlikely(__ret_warn_on))		\
 		pr_warn_once(__VA_ARGS__);	\
 	unlikely(__ret_warn_on);		\
 })
 #endif
 
 #define container_of(ptr, type, member)				\
 ({								\
 	const __typeof(((type *)0)->member) *__p = (ptr);	\
 	(type *)((uintptr_t)__p - offsetof(type, member));	\
 })
 
 #define	ARRAY_SIZE(x)	(sizeof(x) / sizeof((x)[0]))
 
 #define	u64_to_user_ptr(val)	((void *)(uintptr_t)(val))
 
 static inline unsigned long long
 simple_strtoull(const char *cp, char **endp, unsigned int base)
 {
 	return (strtouq(cp, endp, base));
 }
 
 static inline long long
 simple_strtoll(const char *cp, char **endp, unsigned int base)
 {
 	return (strtoq(cp, endp, base));
 }
 
 static inline unsigned long
 simple_strtoul(const char *cp, char **endp, unsigned int base)
 {
 	return (strtoul(cp, endp, base));
 }
 
 static inline long
 simple_strtol(const char *cp, char **endp, unsigned int base)
 {
 	return (strtol(cp, endp, base));
 }
 
 static inline int
 kstrtoul(const char *cp, unsigned int base, unsigned long *res)
 {
 	char *end;
 
 	*res = strtoul(cp, &end, base);
 
 	/* skip newline character, if any */
 	if (*end == '\n')
 		end++;
 	if (*cp == 0 || *end != 0)
 		return (-EINVAL);
 	return (0);
 }
 
 static inline int
 kstrtol(const char *cp, unsigned int base, long *res)
 {
 	char *end;
 
 	*res = strtol(cp, &end, base);
 
 	/* skip newline character, if any */
 	if (*end == '\n')
 		end++;
 	if (*cp == 0 || *end != 0)
 		return (-EINVAL);
 	return (0);
 }
 
 static inline int
 kstrtoint(const char *cp, unsigned int base, int *res)
 {
 	char *end;
 	long temp;
 
 	*res = temp = strtol(cp, &end, base);
 
 	/* skip newline character, if any */
 	if (*end == '\n')
 		end++;
 	if (*cp == 0 || *end != 0)
 		return (-EINVAL);
 	if (temp != (int)temp)
 		return (-ERANGE);
 	return (0);
 }
 
 static inline int
 kstrtouint(const char *cp, unsigned int base, unsigned int *res)
 {
 	char *end;
 	unsigned long temp;
 
 	*res = temp = strtoul(cp, &end, base);
 
 	/* skip newline character, if any */
 	if (*end == '\n')
 		end++;
 	if (*cp == 0 || *end != 0)
 		return (-EINVAL);
 	if (temp != (unsigned int)temp)
 		return (-ERANGE);
 	return (0);
 }
 
 static inline int
 kstrtou16(const char *cp, unsigned int base, u16 *res)
 {
 	char *end;
 	unsigned long temp;
 
 	*res = temp = strtoul(cp, &end, base);
 
 	/* skip newline character, if any */
 	if (*end == '\n')
 		end++;
 	if (*cp == 0 || *end != 0)
 		return (-EINVAL);
 	if (temp != (u16)temp)
 		return (-ERANGE);
 	return (0);
 }
 
 static inline int
 kstrtou32(const char *cp, unsigned int base, u32 *res)
 {
 	char *end;
 	unsigned long temp;
 
 	*res = temp = strtoul(cp, &end, base);
 
 	/* skip newline character, if any */
 	if (*end == '\n')
 		end++;
 	if (*cp == 0 || *end != 0)
 		return (-EINVAL);
 	if (temp != (u32)temp)
 		return (-ERANGE);
 	return (0);
 }
 
 static inline int
 kstrtou64(const char *cp, unsigned int base, u64 *res)
 {
        char *end;
 
        *res = strtouq(cp, &end, base);
 
        /* skip newline character, if any */
        if (*end == '\n')
                end++;
        if (*cp == 0 || *end != 0)
                return (-EINVAL);
        return (0);
 }
 
 static inline int
 kstrtobool(const char *s, bool *res)
 {
 	int len;
 
 	if (s == NULL || (len = strlen(s)) == 0 || res == NULL)
 		return (-EINVAL);
 
 	/* skip newline character, if any */
 	if (s[len - 1] == '\n')
 		len--;
 
 	if (len == 1 && strchr("yY1", s[0]) != NULL)
 		*res = true;
 	else if (len == 1 && strchr("nN0", s[0]) != NULL)
 		*res = false;
 	else if (strncasecmp("on", s, len) == 0)
 		*res = true;
 	else if (strncasecmp("off", s, len) == 0)
 		*res = false;
 	else
 		return (-EINVAL);
 
 	return (0);
 }
 
 static inline int
 kstrtobool_from_user(const char __user *s, size_t count, bool *res)
 {
 	char buf[8] = {};
 
 	if (count > (sizeof(buf) - 1))
 		count = (sizeof(buf) - 1);
 
 	if (copy_from_user(buf, s, count))
 		return (-EFAULT);
 
 	return (kstrtobool(buf, res));
 }
 
 #define min(x, y)	((x) < (y) ? (x) : (y))
 #define max(x, y)	((x) > (y) ? (x) : (y))
 
 #define min3(a, b, c)	min(a, min(b,c))
 #define max3(a, b, c)	max(a, max(b,c))
 
 #define	min_t(type, x, y) ({			\
 	type __min1 = (x);			\
 	type __min2 = (y);			\
 	__min1 < __min2 ? __min1 : __min2; })
 
 #define	max_t(type, x, y) ({			\
 	type __max1 = (x);			\
 	type __max2 = (y);			\
 	__max1 > __max2 ? __max1 : __max2; })
 
 #define offsetofend(t, m)	\
         (offsetof(t, m) + sizeof((((t *)0)->m)))
 
 #define clamp_t(type, _x, min, max)	min_t(type, max_t(type, _x, min), max)
 #define clamp(x, lo, hi)		min( max(x,lo), hi)
 #define	clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
 
 /*
  * This looks more complex than it should be. But we need to
  * get the type for the ~ right in round_down (it needs to be
  * as wide as the result!), and we want to evaluate the macro
  * arguments just once each.
  */
 #define __round_mask(x, y) ((__typeof__(x))((y)-1))
 #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
 #define round_down(x, y) ((x) & ~__round_mask(x, y))
 
 #define	smp_processor_id()	PCPU_GET(cpuid)
 #define	num_possible_cpus()	mp_ncpus
 #define	num_online_cpus()	mp_ncpus
 
 #if defined(__i386__) || defined(__amd64__)
 extern bool linux_cpu_has_clflush;
 #define	cpu_has_clflush		linux_cpu_has_clflush
 #endif
 
 typedef struct pm_message {
 	int event;
 } pm_message_t;
 
 /* Swap values of a and b */
 #define swap(a, b) do {			\
 	typeof(a) _swap_tmp = a;	\
 	a = b;				\
 	b = _swap_tmp;			\
 } while (0)
 
 #define	DIV_ROUND_CLOSEST(x, divisor)	(((x) + ((divisor) / 2)) / (divisor))
 
 #define	DIV_ROUND_CLOSEST_ULL(x, divisor) ({		\
 	__typeof(divisor) __d = (divisor);		\
 	unsigned long long __ret = (x) + (__d) / 2;	\
 	__ret /= __d;					\
 	__ret;						\
 })
 
 static inline uintmax_t
 mult_frac(uintmax_t x, uintmax_t multiplier, uintmax_t divisor)
 {
 	uintmax_t q = (x / divisor);
 	uintmax_t r = (x % divisor);
 
 	return ((q * multiplier) + ((r * multiplier) / divisor));
 }
 
 static inline int64_t
 abs64(int64_t x)
 {
 	return (x < 0 ? -x : x);
 }
 
 typedef struct linux_ratelimit {
 	struct timeval lasttime;
 	int counter;
 } linux_ratelimit_t;
 
 static inline bool
 linux_ratelimited(linux_ratelimit_t *rl)
 {
 	return (ppsratecheck(&rl->lasttime, &rl->counter, 1));
 }
 
 #define	struct_size(ptr, field, num) ({ \
 	const size_t __size = offsetof(__typeof(*(ptr)), field); \
 	const size_t __max = (SIZE_MAX - __size) / sizeof((ptr)->field[0]); \
 	((num) > __max) ? SIZE_MAX : (__size + sizeof((ptr)->field[0]) * (num)); \
 })
 
 #define	__is_constexpr(x) \
 	__builtin_constant_p(x)
 
 /*
  * The is_signed() macro below returns true if the passed data type is
  * signed. Else false is returned.
  */
 #define	is_signed(datatype) (((datatype)-1 / (datatype)2) == (datatype)0)
 
 /*
  * The type_max() macro below returns the maxium positive value the
  * passed data type can hold.
  */
 #define	type_max(datatype) ( \
   (sizeof(datatype) >= 8) ? (is_signed(datatype) ? INT64_MAX : UINT64_MAX) : \
   (sizeof(datatype) >= 4) ? (is_signed(datatype) ? INT32_MAX : UINT32_MAX) : \
   (sizeof(datatype) >= 2) ? (is_signed(datatype) ? INT16_MAX : UINT16_MAX) : \
 			    (is_signed(datatype) ? INT8_MAX : UINT8_MAX) \
 )
 
 /*
  * The type_min() macro below returns the minimum value the passed
  * data type can hold. For unsigned types the minimum value is always
  * zero. For signed types it may vary.
  */
 #define	type_min(datatype) ( \
   (sizeof(datatype) >= 8) ? (is_signed(datatype) ? INT64_MIN : 0) : \
   (sizeof(datatype) >= 4) ? (is_signed(datatype) ? INT32_MIN : 0) : \
   (sizeof(datatype) >= 2) ? (is_signed(datatype) ? INT16_MIN : 0) : \
 			    (is_signed(datatype) ? INT8_MIN : 0) \
 )
 
 #define	TAINT_WARN	0
 #define	test_taint(x)	(0)
 
+/*
+ * Checking if an option is defined would be easy if we could do CPP inside CPP.
+ * The defined case whether -Dxxx or -Dxxx=1 are easy to deal with.  In either
+ * case the defined value is "1". A more general -Dxxx=<c> case will require
+ * more effort to deal with all possible "true" values. Hope we do not have
+ * to do this as well.
+ * The real problem is the undefined case.  To avoid this problem we do the
+ * concat/varargs trick: "yyy" ## xxx can make two arguments if xxx is "1"
+ * by having a #define for yyy_1 which is "ignore,".
+ * Otherwise we will just get "yyy".
+ * Need to be careful about variable substitutions in macros though.
+ * This way we make a (true, false) problem a (don't care, true, false) or a
+ * (don't care true, false).  Then we can use a variadic macro to only select
+ * the always well known and defined argument #2.  And that seems to be
+ * exactly what we need.  Use 1 for true and 0 for false to also allow
+ * #if IS_*() checks pre-compiler checks which do not like #if true.
+ */
+#define ___XAB_1		dontcare,
+#define ___IS_XAB(_ignore, _x, ...)	(_x)
+#define	__IS_XAB(_x)		___IS_XAB(_x 1, 0)
+#define	_IS_XAB(_x)		__IS_XAB(__CONCAT(___XAB_, _x))
+
+/* This is if CONFIG_ccc=y. */
+#define	IS_BUILTIN(_x)		_IS_XAB(_x)
+/* This is if CONFIG_ccc=m. */
+#define	IS_MODULE(_x)		_IS_XAB(_x ## _MODULE)
+/* This is if CONFIG_ccc is compiled in(=y) or a module(=m). */
+#define	IS_ENABLED(_x)		(IS_BUILTIN(_x) || IS_MODULE(_x))
+/*
+ * This is weird case.  If the CONFIG_ccc is builtin (=y) this returns true;
+ * or if the CONFIG_ccc is a module (=m) and the caller is built as a module
+ * (-DMODULE defined) this returns true, but if the callers is not a module
+ * (-DMODULE not defined, which means caller is BUILTIN) then it returns
+ * false.  In other words, a module can reach the kernel, a module can reach
+ * a module, but the kernel cannot reach a module, and code never compiled
+ * cannot be reached either.
+ * XXX -- I'd hope the module-to-module case would be handled by a proper
+ * module dependency definition (MODULE_DEPEND() in FreeBSD).
+ */
+#define	IS_REACHABLE(_x)	(IS_BUILTIN(_x) || \
+				    (IS_MODULE(_x) && IS_BUILTIN(MODULE)))
+
 #endif	/* _LINUX_KERNEL_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/kobject.h b/sys/compat/linuxkpi/common/include/linux/kobject.h
index bd9e1c4ec6f5..403ec1495c32 100644
--- a/sys/compat/linuxkpi/common/include/linux/kobject.h
+++ b/sys/compat/linuxkpi/common/include/linux/kobject.h
@@ -1,154 +1,168 @@
 /*-
  * Copyright (c) 2010 Isilon Systems, Inc.
  * Copyright (c) 2010 iX Systems, Inc.
  * Copyright (c) 2010 Panasas, Inc.
  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice unmodified, this list of conditions, and the following
  *    disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * $FreeBSD$
  */
 #ifndef	_LINUX_KOBJECT_H_
 #define	_LINUX_KOBJECT_H_
 
 #include <machine/stdarg.h>
 
 #include <linux/kernel.h>
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/slab.h>
 
 struct kobject;
 struct sysctl_oid;
 
+#define	KOBJ_CHANGE		0x01
+
 struct kobj_type {
 	void (*release)(struct kobject *kobj);
 	const struct sysfs_ops *sysfs_ops;
 	struct attribute **default_attrs;
 };
 
 extern const struct kobj_type linux_kfree_type;
 
 struct kobject {
 	struct kobject		*parent;
 	char			*name;
 	struct kref		kref;
 	const struct kobj_type	*ktype;
 	struct list_head	entry;
 	struct sysctl_oid	*oidp;
 };
 
 extern struct kobject *mm_kobj;
 
 struct attribute {
 	const char	*name;
 	struct module	*owner;
 	mode_t		mode;
 };
 
 struct kobj_attribute {
 	struct attribute attr;
 	ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
 	    char *buf);
 	ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
 	    const char *buf, size_t count);
 };
 
 static inline void
 kobject_init(struct kobject *kobj, const struct kobj_type *ktype)
 {
 
 	kref_init(&kobj->kref);
 	INIT_LIST_HEAD(&kobj->entry);
 	kobj->ktype = ktype;
 	kobj->oidp = NULL;
 }
 
 void linux_kobject_release(struct kref *kref);
 
 static inline void
 kobject_put(struct kobject *kobj)
 {
 
 	if (kobj)
 		kref_put(&kobj->kref, linux_kobject_release);
 }
 
 static inline struct kobject *
 kobject_get(struct kobject *kobj)
 {
 
 	if (kobj)
 		kref_get(&kobj->kref);
 	return kobj;
 }
 
 int	kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list);
 int	kobject_add(struct kobject *kobj, struct kobject *parent,
 	    const char *fmt, ...);
 
 static inline struct kobject *
 kobject_create(void)
 {
 	struct kobject *kobj;
 
 	kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
 	if (kobj == NULL)
 		return (NULL);
 	kobject_init(kobj, &linux_kfree_type);
 
 	return (kobj);
 }
 
 static inline struct kobject *
 kobject_create_and_add(const char *name, struct kobject *parent)
 {
 	struct kobject *kobj;
 
 	kobj = kobject_create();
 	if (kobj == NULL)
 		return (NULL);
 	if (kobject_add(kobj, parent, "%s", name) == 0)
 		return (kobj);
 	kobject_put(kobj);
 
 	return (NULL);
 }
 
 static inline void
 kobject_del(struct kobject *kobj __unused)
 {
 }
 
 static inline char *
 kobject_name(const struct kobject *kobj)
 {
 
 	return kobj->name;
 }
 
 int	kobject_set_name(struct kobject *kobj, const char *fmt, ...);
 int	kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype,
 	    struct kobject *parent, const char *fmt, ...);
 
+static __inline void
+kobject_uevent_env(struct kobject *kobj, int action, char *envp[])
+{
+
+	/*
+	 * iwlwifi(4) sends an INACCESSIBLE event when it detects that the card
+	 * (pice endpoint) is gone and it attempts a removal cleanup.
+	 * Not sure if we do anything related to udev/sysfs at the moment or
+	 * need a shortcut or simply ignore it (for now).
+	 */
+}
+
 #endif /* _LINUX_KOBJECT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/lockdep.h b/sys/compat/linuxkpi/common/include/linux/lockdep.h
index d2b3d4485dde..a86157ba5924 100644
--- a/sys/compat/linuxkpi/common/include/linux/lockdep.h
+++ b/sys/compat/linuxkpi/common/include/linux/lockdep.h
@@ -1,87 +1,88 @@
 /*-
  * Copyright (c) 2010 Isilon Systems, Inc.
  * Copyright (c) 2010 iX Systems, Inc.
  * Copyright (c) 2010 Panasas, Inc.
  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice unmodified, this list of conditions, and the following
  *    disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * $FreeBSD$
  */
 
 #ifndef _LINUX_LOCKDEP_H_
 #define	_LINUX_LOCKDEP_H_
 
 #include <sys/lock.h>
 
 struct lock_class_key {
 };
 
 #define	lockdep_set_class(lock, key)
 #define	lockdep_set_subclass(lock, sub)
 #define	lockdep_set_class_and_name(lock, key, name)
 #define	lockdep_set_current_reclaim_state(g) do { } while (0)
 #define	lockdep_clear_current_reclaim_state() do { } while (0)
+#define	lockdep_init_map(_map, _name, _key, _x) do { } while(0)
 
 #ifdef INVARIANTS
 #define	lockdep_assert_held(m) do {					\
 	struct lock_object *__lock = (struct lock_object *)(m);		\
 	LOCK_CLASS(__lock)->lc_assert(__lock, LA_LOCKED);		\
 } while (0)
 
 #define	lockdep_assert_held_once(m) do {				\
 	struct lock_object *__lock = (struct lock_object *)(m);		\
 	LOCK_CLASS(__lock)->lc_assert(__lock, LA_LOCKED | LA_NOTRECURSED); \
 } while (0)
 
 static __inline bool
 lockdep_is_held(void *__m)
 {
 	struct lock_object *__lock;
 	struct thread *__td;
 
 	__lock = __m;
 	return (LOCK_CLASS(__lock)->lc_owner(__lock, &__td) != 0);
 }
 #define	lockdep_is_held_type(_m, _t) lockdep_is_held(_m)
 
 #else
 #define	lockdep_assert_held(m) do { } while (0)
 
 #define	lockdep_assert_held_once(m) do { } while (0)
 
 #define	lockdep_is_held(m)	1
 #define	lockdep_is_held_type(_m, _t)	1
 #endif
 
 #define	might_lock(m)	do { } while (0)
 #define	might_lock_read(m) do { } while (0)
 
 #define	lock_acquire(...) do { } while (0)
 #define	lock_release(...) do { } while (0)
 #define	lock_acquire_shared_recursive(...) do { } while (0)
 
 #define	mutex_acquire(...) do { } while (0)
 #define	mutex_release(...) do { } while (0)
 
 #endif /* _LINUX_LOCKDEP_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/pm.h b/sys/compat/linuxkpi/common/include/linux/pm.h
new file mode 100644
index 000000000000..6b8a7e768a8c
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/pm.h
@@ -0,0 +1,52 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef	_LINUXKPI_LINUX_PM_H
+#define	_LINUXKPI_LINUX_PM_H
+
+#ifdef CONFIG_PM_SLEEP
+#define	SIMPLE_DEV_PM_OPS(_name, _suspendfunc, _resumefunc)	\
+const struct dev_pm_ops _name = {				\
+        .suspend	= _suspendfunc,				\
+        .resume		= _resumefunc,				\
+        .freeze		= _suspendfunc,				\
+        .thaw		= _resumefunc,				\
+        .poweroff	= _suspendfunc,				\
+        .restore	= _resumefunc,				\
+}
+#else
+#define	SIMPLE_DEV_PM_OPS(_name, _suspendfunc, _resumefunc)	\
+const struct dev_pm_ops _name = {				\
+}
+#endif
+
+#endif	/* _LINUXKPI_LINUX_PM_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/scatterlist.h b/sys/compat/linuxkpi/common/include/linux/scatterlist.h
index 9104cb8dd78a..ebf0632f6f58 100644
--- a/sys/compat/linuxkpi/common/include/linux/scatterlist.h
+++ b/sys/compat/linuxkpi/common/include/linux/scatterlist.h
@@ -1,482 +1,536 @@
 /*-
  * Copyright (c) 2010 Isilon Systems, Inc.
  * Copyright (c) 2010 iX Systems, Inc.
  * Copyright (c) 2010 Panasas, Inc.
  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
  * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice unmodified, this list of conditions, and the following
  *    disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * $FreeBSD$
  */
 #ifndef	_LINUX_SCATTERLIST_H_
 #define	_LINUX_SCATTERLIST_H_
 
+#include <sys/types.h>
+#include <sys/sf_buf.h>
+
 #include <linux/page.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
 
 struct bus_dmamap;
 struct scatterlist {
 	unsigned long page_link;
 #define	SG_PAGE_LINK_CHAIN	0x1UL
 #define	SG_PAGE_LINK_LAST	0x2UL
 #define	SG_PAGE_LINK_MASK	0x3UL
 	unsigned int offset;
 	unsigned int length;
 	dma_addr_t dma_address;
 	struct bus_dmamap *dma_map;	/* FreeBSD specific */
 };
 
 CTASSERT((sizeof(struct scatterlist) & SG_PAGE_LINK_MASK) == 0);
 
 struct sg_table {
 	struct scatterlist *sgl;
 	unsigned int nents;
 	unsigned int orig_nents;
 };
 
 struct sg_page_iter {
 	struct scatterlist *sg;
 	unsigned int sg_pgoffset;
 	unsigned int maxents;
 	struct {
 		unsigned int nents;
 		int	pg_advance;
 	} internal;
 };
 
 struct sg_dma_page_iter {
 	struct sg_page_iter base;
 };
 
 #define	SCATTERLIST_MAX_SEGMENT	(-1U & ~(PAGE_SIZE - 1))
 
 #define	SG_MAX_SINGLE_ALLOC	(PAGE_SIZE / sizeof(struct scatterlist))
 
 #define	SG_MAGIC		0x87654321UL
 #define	SG_CHAIN		SG_PAGE_LINK_CHAIN
 #define	SG_END			SG_PAGE_LINK_LAST
 
 #define	sg_is_chain(sg)		((sg)->page_link & SG_PAGE_LINK_CHAIN)
 #define	sg_is_last(sg)		((sg)->page_link & SG_PAGE_LINK_LAST)
 #define	sg_chain_ptr(sg)	\
 	((struct scatterlist *) ((sg)->page_link & ~SG_PAGE_LINK_MASK))
 
 #define	sg_dma_address(sg)	(sg)->dma_address
 #define	sg_dma_len(sg)		(sg)->length
 
 #define	for_each_sg_page(sgl, iter, nents, pgoffset)			\
 	for (_sg_iter_init(sgl, iter, nents, pgoffset);			\
 	     (iter)->sg; _sg_iter_next(iter))
 #define	for_each_sg_dma_page(sgl, iter, nents, pgoffset) 		\
 	for_each_sg_page(sgl, &(iter)->base, nents, pgoffset)
 
 #define	for_each_sg(sglist, sg, sgmax, iter)				\
 	for (iter = 0, sg = (sglist); iter < (sgmax); iter++, sg = sg_next(sg))
 
 typedef struct scatterlist *(sg_alloc_fn) (unsigned int, gfp_t);
 typedef void (sg_free_fn) (struct scatterlist *, unsigned int);
 
 static inline void
 sg_assign_page(struct scatterlist *sg, struct page *page)
 {
 	unsigned long page_link = sg->page_link & SG_PAGE_LINK_MASK;
 
 	sg->page_link = page_link | (unsigned long)page;
 }
 
 static inline void
 sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
     unsigned int offset)
 {
 	sg_assign_page(sg, page);
 	sg->offset = offset;
 	sg->length = len;
 }
 
 static inline struct page *
 sg_page(struct scatterlist *sg)
 {
 	return ((struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK));
 }
 
 static inline void
 sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
 {
 	sg_set_page(sg, virt_to_page(buf), buflen,
 	    ((uintptr_t)buf) & (PAGE_SIZE - 1));
 }
 
 static inline struct scatterlist *
 sg_next(struct scatterlist *sg)
 {
 	if (sg_is_last(sg))
 		return (NULL);
 	sg++;
 	if (sg_is_chain(sg))
 		sg = sg_chain_ptr(sg);
 	return (sg);
 }
 
 static inline vm_paddr_t
 sg_phys(struct scatterlist *sg)
 {
 	return (VM_PAGE_TO_PHYS(sg_page(sg)) + sg->offset);
 }
 
 static inline void *
 sg_virt(struct scatterlist *sg)
 {
 
 	return ((void *)((unsigned long)page_address(sg_page(sg)) + sg->offset));
 }
 
 static inline void
 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
     struct scatterlist *sgl)
 {
 	struct scatterlist *sg = &prv[prv_nents - 1];
 
 	sg->offset = 0;
 	sg->length = 0;
 	sg->page_link = ((unsigned long)sgl |
 	    SG_PAGE_LINK_CHAIN) & ~SG_PAGE_LINK_LAST;
 }
 
 static inline void
 sg_mark_end(struct scatterlist *sg)
 {
 	sg->page_link |= SG_PAGE_LINK_LAST;
 	sg->page_link &= ~SG_PAGE_LINK_CHAIN;
 }
 
 static inline void
 sg_init_table(struct scatterlist *sg, unsigned int nents)
 {
 	bzero(sg, sizeof(*sg) * nents);
 	sg_mark_end(&sg[nents - 1]);
 }
 
 static struct scatterlist *
 sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
 {
 	if (nents == SG_MAX_SINGLE_ALLOC) {
 		return ((void *)__get_free_page(gfp_mask));
 	} else
 		return (kmalloc(nents * sizeof(struct scatterlist), gfp_mask));
 }
 
 static inline void
 sg_kfree(struct scatterlist *sg, unsigned int nents)
 {
 	if (nents == SG_MAX_SINGLE_ALLOC) {
 		free_page((unsigned long)sg);
 	} else
 		kfree(sg);
 }
 
 static inline void
 __sg_free_table(struct sg_table *table, unsigned int max_ents,
     bool skip_first_chunk, sg_free_fn * free_fn)
 {
 	struct scatterlist *sgl, *next;
 
 	if (unlikely(!table->sgl))
 		return;
 
 	sgl = table->sgl;
 	while (table->orig_nents) {
 		unsigned int alloc_size = table->orig_nents;
 		unsigned int sg_size;
 
 		if (alloc_size > max_ents) {
 			next = sg_chain_ptr(&sgl[max_ents - 1]);
 			alloc_size = max_ents;
 			sg_size = alloc_size - 1;
 		} else {
 			sg_size = alloc_size;
 			next = NULL;
 		}
 
 		table->orig_nents -= sg_size;
 		if (skip_first_chunk)
 			skip_first_chunk = 0;
 		else
 			free_fn(sgl, alloc_size);
 		sgl = next;
 	}
 
 	table->sgl = NULL;
 }
 
 static inline void
 sg_free_table(struct sg_table *table)
 {
 	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
 }
 
 static inline int
 __sg_alloc_table(struct sg_table *table, unsigned int nents,
     unsigned int max_ents, struct scatterlist *first_chunk,
     gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
 {
 	struct scatterlist *sg, *prv;
 	unsigned int left;
 
 	memset(table, 0, sizeof(*table));
 
 	if (nents == 0)
 		return (-EINVAL);
 	left = nents;
 	prv = NULL;
 	do {
 		unsigned int sg_size;
 		unsigned int alloc_size = left;
 
 		if (alloc_size > max_ents) {
 			alloc_size = max_ents;
 			sg_size = alloc_size - 1;
 		} else
 			sg_size = alloc_size;
 
 		left -= sg_size;
 
 		if (first_chunk) {
 			sg = first_chunk;
 			first_chunk = NULL;
 		} else {
 			sg = alloc_fn(alloc_size, gfp_mask);
 		}
 		if (unlikely(!sg)) {
 			if (prv)
 				table->nents = ++table->orig_nents;
 
 			return (-ENOMEM);
 		}
 		sg_init_table(sg, alloc_size);
 		table->nents = table->orig_nents += sg_size;
 
 		if (prv)
 			sg_chain(prv, max_ents, sg);
 		else
 			table->sgl = sg;
 
 		if (!left)
 			sg_mark_end(&sg[sg_size - 1]);
 
 		prv = sg;
 	} while (left);
 
 	return (0);
 }
 
 static inline int
 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
 {
 	int ret;
 
 	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
 	    NULL, gfp_mask, sg_kmalloc);
 	if (unlikely(ret))
 		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
 
 	return (ret);
 }
 
 static inline int
 __sg_alloc_table_from_pages(struct sg_table *sgt,
     struct page **pages, unsigned int count,
     unsigned long off, unsigned long size,
     unsigned int max_segment, gfp_t gfp_mask)
 {
 	unsigned int i, segs, cur, len;
 	int rc;
 	struct scatterlist *s;
 
 	if (__predict_false(!max_segment || offset_in_page(max_segment)))
 		return (-EINVAL);
 
 	len = 0;
 	for (segs = i = 1; i < count; ++i) {
 		len += PAGE_SIZE;
 		if (len >= max_segment ||
 		    page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
 			++segs;
 			len = 0;
 		}
 	}
 	if (__predict_false((rc = sg_alloc_table(sgt, segs, gfp_mask))))
 		return (rc);
 
 	cur = 0;
 	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
 		unsigned long seg_size;
 		unsigned int j;
 
 		len = 0;
 		for (j = cur + 1; j < count; ++j) {
 			len += PAGE_SIZE;
 			if (len >= max_segment || page_to_pfn(pages[j]) !=
 			    page_to_pfn(pages[j - 1]) + 1)
 				break;
 		}
 
 		seg_size = ((j - cur) << PAGE_SHIFT) - off;
 		sg_set_page(s, pages[cur], MIN(size, seg_size), off);
 		size -= seg_size;
 		off = 0;
 		cur = j;
 	}
 	return (0);
 }
 
 static inline int
 sg_alloc_table_from_pages(struct sg_table *sgt,
     struct page **pages, unsigned int count,
     unsigned long off, unsigned long size,
     gfp_t gfp_mask)
 {
 
 	return (__sg_alloc_table_from_pages(sgt, pages, count, off, size,
 	    SCATTERLIST_MAX_SEGMENT, gfp_mask));
 }
 
 static inline int
 sg_nents(struct scatterlist *sg)
 {
 	int nents;
 
 	for (nents = 0; sg; sg = sg_next(sg))
 		nents++;
 	return (nents);
 }
 
 static inline void
 __sg_page_iter_start(struct sg_page_iter *piter,
     struct scatterlist *sglist, unsigned int nents,
     unsigned long pgoffset)
 {
 	piter->internal.pg_advance = 0;
 	piter->internal.nents = nents;
 
 	piter->sg = sglist;
 	piter->sg_pgoffset = pgoffset;
 }
 
 static inline void
 _sg_iter_next(struct sg_page_iter *iter)
 {
 	struct scatterlist *sg;
 	unsigned int pgcount;
 
 	sg = iter->sg;
 	pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
 	++iter->sg_pgoffset;
 	while (iter->sg_pgoffset >= pgcount) {
 		iter->sg_pgoffset -= pgcount;
 		sg = sg_next(sg);
 		--iter->maxents;
 		if (sg == NULL || iter->maxents == 0)
 			break;
 		pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	}
 	iter->sg = sg;
 }
 
 static inline int
 sg_page_count(struct scatterlist *sg)
 {
 	return (PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT);
 }
 #define	sg_dma_page_count(sg) \
 	sg_page_count(sg)
 
 static inline bool
 __sg_page_iter_next(struct sg_page_iter *piter)
 {
 	unsigned int pgcount;
 
 	if (piter->internal.nents == 0)
 		return (0);
 	if (piter->sg == NULL)
 		return (0);
 
 	piter->sg_pgoffset += piter->internal.pg_advance;
 	piter->internal.pg_advance = 1;
 
 	while (1) {
 		pgcount = sg_page_count(piter->sg);
 		if (likely(piter->sg_pgoffset < pgcount))
 			break;
 		piter->sg_pgoffset -= pgcount;
 		piter->sg = sg_next(piter->sg);
 		if (--piter->internal.nents == 0)
 			return (0);
 		if (piter->sg == NULL)
 			return (0);
 	}
 	return (1);
 }
 #define	__sg_page_iter_dma_next(itr) \
 	__sg_page_iter_next(&(itr)->base)
 
 static inline void
 _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
     unsigned int nents, unsigned long pgoffset)
 {
 	if (nents) {
 		iter->sg = sgl;
 		iter->sg_pgoffset = pgoffset - 1;
 		iter->maxents = nents;
 		_sg_iter_next(iter);
 	} else {
 		iter->sg = NULL;
 		iter->sg_pgoffset = 0;
 		iter->maxents = 0;
 	}
 }
 
 /*
  * sg_page_iter_dma_address() is implemented as a macro because it
  * needs to accept two different and identical structure types. This
  * allows both old and new code to co-exist. The compile time assert
  * adds some safety, that the structure sizes match.
  */
 #define	sg_page_iter_dma_address(spi) ({		\
 	struct sg_page_iter *__spi = (void *)(spi);	\
 	dma_addr_t __dma_address;			\
 	CTASSERT(sizeof(*(spi)) == sizeof(*__spi));	\
 	__dma_address = __spi->sg->dma_address +	\
 	    (__spi->sg_pgoffset << PAGE_SHIFT);		\
 	__dma_address;					\
 })
 
 static inline struct page *
 sg_page_iter_page(struct sg_page_iter *piter)
 {
 	return (nth_page(sg_page(piter->sg), piter->sg_pgoffset));
 }
 
+static __inline size_t
+sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
+    const void *buf, size_t buflen, off_t skip)
+{
+	struct sg_page_iter piter;
+	struct page *page;
+	struct sf_buf *sf;
+	size_t len, copied;
+	char *p, *b;
+
+	if (buflen == 0)
+		return (0);
+
+	b = __DECONST(char *, buf);
+	copied = 0;
+	sched_pin();
+	for_each_sg_page(sgl, &piter, nents, 0) {
+
+		/* Skip to the start. */
+		if (piter.sg->length <= skip) {
+			skip -= piter.sg->length;
+			continue;
+		}
+
+		/* See how much to copy. */
+		KASSERT(((piter.sg->length - skip) != 0 && (buflen != 0)),
+		    ("%s: sg len %u - skip %ju || buflen %zu is 0\n",
+		    __func__, piter.sg->length, (uintmax_t)skip, buflen));
+		len = min(piter.sg->length - skip, buflen);
+
+		page = sg_page_iter_page(&piter);
+		sf = sf_buf_alloc(page, SFB_CPUPRIVATE | SFB_NOWAIT);
+		if (sf == NULL)
+			break;
+		p = (char *)sf_buf_kva(sf) + piter.sg_pgoffset + skip;
+		memcpy(p, b, len);
+		sf_buf_free(sf);
+
+		copied += len;
+		/* Either we exactly filled the page, or we are done. */
+		buflen -= len;
+		if (buflen == 0)
+			break;
+		skip -= len;
+		b += len;
+	}
+	sched_unpin();
+
+	return (copied);
+}
+
 #endif					/* _LINUX_SCATTERLIST_H_ */