diff --git a/sys/compat/linuxkpi/common/include/linux/compat.h b/sys/compat/linuxkpi/common/include/linux/compat.h index d1a02f612f42..8a5a6918bb7c 100644 --- a/sys/compat/linuxkpi/common/include/linux/compat.h +++ b/sys/compat/linuxkpi/common/include/linux/compat.h @@ -1,62 +1,68 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_COMPAT_H_ #define _LINUXKPI_LINUX_COMPAT_H_ #include #include #include struct domainset; struct thread; struct task_struct; extern int linux_alloc_current(struct thread *, int flags); extern void linux_free_current(struct task_struct *); extern struct domainset *linux_get_vm_domain_set(int node); +#define __current_unallocated(td) \ + __predict_false((td)->td_lkpi_task == NULL) + static inline void linux_set_current(struct thread *td) { - if (__predict_false(td->td_lkpi_task == NULL)) + if (__current_unallocated(td)) lkpi_alloc_current(td, M_WAITOK); } static inline int linux_set_current_flags(struct thread *td, int flags) { - if (__predict_false(td->td_lkpi_task == NULL)) + if (__current_unallocated(td)) return (lkpi_alloc_current(td, flags)); return (0); } #define compat_ptr(x) ((void *)(uintptr_t)x) #define ptr_to_compat(x) ((uintptr_t)x) +typedef void fpu_safe_exec_cb_t(void *ctx); +void lkpi_fpu_safe_exec(fpu_safe_exec_cb_t func, void *ctx); + #endif /* _LINUXKPI_LINUX_COMPAT_H_ */ diff --git a/sys/compat/linuxkpi/common/include/linux/slab.h b/sys/compat/linuxkpi/common/include/linux/slab.h index 8557f831bb60..298306b6ea05 100644 --- a/sys/compat/linuxkpi/common/include/linux/slab.h +++ b/sys/compat/linuxkpi/common/include/linux/slab.h @@ -1,228 +1,229 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_SLAB_H_ #define _LINUXKPI_LINUX_SLAB_H_ #include #include #include #include #include #include #include #include MALLOC_DECLARE(M_KMALLOC); +#define kmalloc(size, flags) lkpi_kmalloc(size, flags) #define kvmalloc(size, flags) kmalloc(size, flags) #define kvzalloc(size, flags) kmalloc(size, (flags) | __GFP_ZERO) #define kvcalloc(n, size, flags) kvmalloc_array(n, size, (flags) | __GFP_ZERO) #define kzalloc(size, flags) kmalloc(size, (flags) | __GFP_ZERO) #define kzalloc_node(size, flags, node) kmalloc_node(size, (flags) | __GFP_ZERO, node) #define kfree_const(ptr) kfree(ptr) #define vzalloc(size) __vmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0) #define vfree(arg) kfree(arg) #define kvfree(arg) kfree(arg) #define vmalloc_node(size, node) __vmalloc_node(size, GFP_KERNEL, node) #define vmalloc_user(size) __vmalloc(size, GFP_KERNEL | __GFP_ZERO, 0) #define vmalloc(size) __vmalloc(size, GFP_KERNEL, 0) -#define __kmalloc(...) kmalloc(__VA_ARGS__) /* * Prefix some functions with linux_ to avoid namespace conflict * with the OpenSolaris code in the kernel. */ #define kmem_cache linux_kmem_cache #define kmem_cache_create(...) linux_kmem_cache_create(__VA_ARGS__) #define kmem_cache_alloc(...) lkpi_kmem_cache_alloc(__VA_ARGS__) #define kmem_cache_zalloc(...) lkpi_kmem_cache_zalloc(__VA_ARGS__) #define kmem_cache_free(...) lkpi_kmem_cache_free(__VA_ARGS__) #define kmem_cache_destroy(...) linux_kmem_cache_destroy(__VA_ARGS__) #define kmem_cache_shrink(x) (0) #define KMEM_CACHE(__struct, flags) \ linux_kmem_cache_create(#__struct, sizeof(struct __struct), \ __alignof(struct __struct), (flags), NULL) typedef void linux_kmem_ctor_t (void *); struct linux_kmem_cache; #define SLAB_HWCACHE_ALIGN (1 << 0) #define SLAB_TYPESAFE_BY_RCU (1 << 1) #define SLAB_RECLAIM_ACCOUNT (1 << 2) #define SLAB_DESTROY_BY_RCU \ SLAB_TYPESAFE_BY_RCU #define ARCH_KMALLOC_MINALIGN \ __alignof(unsigned long long) /* drm-kmod 5.4 compat */ #define kfree_async(ptr) kfree(ptr); #define ZERO_SIZE_PTR ((void *)16) #define ZERO_OR_NULL_PTR(x) ((x) == NULL || (x) == ZERO_SIZE_PTR) static inline gfp_t linux_check_m_flags(gfp_t flags) { const gfp_t m = M_NOWAIT | M_WAITOK; /* make sure either M_NOWAIT or M_WAITOK is set */ if ((flags & m) == 0) flags |= M_NOWAIT; else if ((flags & m) == m) flags &= ~M_WAITOK; /* mask away LinuxKPI specific flags */ return (flags & GFP_NATIVE_MASK); } static inline void * -kmalloc(size_t size, gfp_t flags) +__kmalloc(size_t size, gfp_t flags) { return (malloc(MAX(size, sizeof(struct llist_node)), M_KMALLOC, linux_check_m_flags(flags))); } static inline void * kmalloc_node(size_t size, gfp_t flags, int node) { return (malloc_domainset(size, M_KMALLOC, linux_get_vm_domain_set(node), linux_check_m_flags(flags))); } static inline void * kcalloc(size_t n, size_t size, gfp_t flags) { flags |= __GFP_ZERO; return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags))); } static inline void * kcalloc_node(size_t n, size_t size, gfp_t flags, int node) { flags |= __GFP_ZERO; return (mallocarray_domainset(n, size, M_KMALLOC, linux_get_vm_domain_set(node), linux_check_m_flags(flags))); } static inline void * __vmalloc(size_t size, gfp_t flags, int other) { return (malloc(size, M_KMALLOC, linux_check_m_flags(flags))); } static inline void * __vmalloc_node(size_t size, gfp_t flags, int node) { return (malloc_domainset(size, M_KMALLOC, linux_get_vm_domain_set(node), linux_check_m_flags(flags))); } static inline void * vmalloc_32(size_t size) { return (contigmalloc(size, M_KMALLOC, M_WAITOK, 0, UINT_MAX, 1, 1)); } static inline void * kmalloc_array(size_t n, size_t size, gfp_t flags) { return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags))); } static inline void * kmalloc_array_node(size_t n, size_t size, gfp_t flags, int node) { return (mallocarray_domainset(n, size, M_KMALLOC, linux_get_vm_domain_set(node), linux_check_m_flags(flags))); } static inline void * kvmalloc_array(size_t n, size_t size, gfp_t flags) { return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags))); } static inline void * krealloc(void *ptr, size_t size, gfp_t flags) { return (realloc(ptr, size, M_KMALLOC, linux_check_m_flags(flags))); } static inline void * krealloc_array(void *ptr, size_t n, size_t size, gfp_t flags) { if (WOULD_OVERFLOW(n, size)) { return NULL; } return (realloc(ptr, n * size, M_KMALLOC, linux_check_m_flags(flags))); } extern void linux_kfree_async(void *); static inline void kfree(const void *ptr) { if (ZERO_OR_NULL_PTR(ptr)) return; if (curthread->td_critnest != 0) linux_kfree_async(__DECONST(void *, ptr)); else free(__DECONST(void *, ptr), M_KMALLOC); } static __inline void kfree_sensitive(const void *ptr) { if (ZERO_OR_NULL_PTR(ptr)) return; zfree(__DECONST(void *, ptr), M_KMALLOC); } static inline size_t ksize(const void *ptr) { return (malloc_usable_size(ptr)); } +extern void *lkpi_kmalloc(size_t size, gfp_t flags); extern struct linux_kmem_cache *linux_kmem_cache_create(const char *name, size_t size, size_t align, unsigned flags, linux_kmem_ctor_t *ctor); extern void *lkpi_kmem_cache_alloc(struct linux_kmem_cache *, gfp_t); extern void *lkpi_kmem_cache_zalloc(struct linux_kmem_cache *, gfp_t); extern void lkpi_kmem_cache_free(struct linux_kmem_cache *, void *); extern void linux_kmem_cache_destroy(struct linux_kmem_cache *); #endif /* _LINUXKPI_LINUX_SLAB_H_ */ diff --git a/sys/compat/linuxkpi/common/src/linux_fpu.c b/sys/compat/linuxkpi/common/src/linux_fpu.c index b26dce98774b..4e40a2b004bb 100644 --- a/sys/compat/linuxkpi/common/src/linux_fpu.c +++ b/sys/compat/linuxkpi/common/src/linux_fpu.c @@ -1,73 +1,99 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Val Packett * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include +#include #include #include -#if defined(__aarch64__) || defined(__amd64__) || defined(__i386__) +#if defined(__aarch64__) || defined(__arm__) || defined(__amd64__) || \ + defined(__i386__) || defined(__powerpc64__) #include /* * Technically the Linux API isn't supposed to allow nesting sections * either, but currently used versions of GPU drivers rely on nesting * working, so we only enter the section on the outermost level. */ void lkpi_kernel_fpu_begin(void) { if ((current->fpu_ctx_level)++ == 0) fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX); } void lkpi_kernel_fpu_end(void) { if (--(current->fpu_ctx_level) == 0) fpu_kern_leave(curthread, NULL); } +void +lkpi_fpu_safe_exec(fpu_safe_exec_cb_t func, void *ctx) +{ + unsigned int save_fpu_level; + + save_fpu_level = + __current_unallocated(curthread) ? 0 : current->fpu_ctx_level; + if (__predict_false(save_fpu_level != 0)) { + current->fpu_ctx_level = 1; + kernel_fpu_end(); + } + func(ctx); + if (__predict_false(save_fpu_level != 0)) { + kernel_fpu_begin(); + current->fpu_ctx_level = save_fpu_level; + } +} + #else void lkpi_kernel_fpu_begin(void) { } void lkpi_kernel_fpu_end(void) { } +void +lkpi_fpu_safe_exec(fpu_safe_exec_cb_t func, void *ctx) +{ + func(ctx); +} + #endif diff --git a/sys/compat/linuxkpi/common/src/linux_slab.c b/sys/compat/linuxkpi/common/src/linux_slab.c index 72097c55f94c..68117d1c9fa7 100644 --- a/sys/compat/linuxkpi/common/src/linux_slab.c +++ b/sys/compat/linuxkpi/common/src/linux_slab.c @@ -1,227 +1,251 @@ /*- * Copyright (c) 2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include +#include #include #include #include #include #include #include #include #include struct linux_kmem_rcu { struct rcu_head rcu_head; struct linux_kmem_cache *cache; }; struct linux_kmem_cache { uma_zone_t cache_zone; linux_kmem_ctor_t *cache_ctor; unsigned cache_flags; unsigned cache_size; struct llist_head cache_items; struct task cache_task; }; #define LINUX_KMEM_TO_RCU(c, m) \ ((struct linux_kmem_rcu *)((char *)(m) + \ (c)->cache_size - sizeof(struct linux_kmem_rcu))) #define LINUX_RCU_TO_KMEM(r) \ ((void *)((char *)(r) + sizeof(struct linux_kmem_rcu) - \ (r)->cache->cache_size)) static LLIST_HEAD(linux_kfree_async_list); static void lkpi_kmem_cache_free_async_fn(void *, int); void * lkpi_kmem_cache_alloc(struct linux_kmem_cache *c, gfp_t flags) { return (uma_zalloc_arg(c->cache_zone, c, linux_check_m_flags(flags))); } void * lkpi_kmem_cache_zalloc(struct linux_kmem_cache *c, gfp_t flags) { return (uma_zalloc_arg(c->cache_zone, c, linux_check_m_flags(flags | M_ZERO))); } static int linux_kmem_ctor(void *mem, int size, void *arg, int flags) { struct linux_kmem_cache *c = arg; if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) { struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, mem); /* duplicate cache pointer */ rcu->cache = c; } /* check for constructor */ if (likely(c->cache_ctor != NULL)) c->cache_ctor(mem); return (0); } static void linux_kmem_cache_free_rcu_callback(struct rcu_head *head) { struct linux_kmem_rcu *rcu = container_of(head, struct linux_kmem_rcu, rcu_head); uma_zfree(rcu->cache->cache_zone, LINUX_RCU_TO_KMEM(rcu)); } struct linux_kmem_cache * linux_kmem_cache_create(const char *name, size_t size, size_t align, unsigned flags, linux_kmem_ctor_t *ctor) { struct linux_kmem_cache *c; c = malloc(sizeof(*c), M_KMALLOC, M_WAITOK); if (flags & SLAB_HWCACHE_ALIGN) align = UMA_ALIGN_CACHE; else if (align != 0) align--; if (flags & SLAB_TYPESAFE_BY_RCU) { /* make room for RCU structure */ size = ALIGN(size, sizeof(void *)); size += sizeof(struct linux_kmem_rcu); /* create cache_zone */ c->cache_zone = uma_zcreate(name, size, linux_kmem_ctor, NULL, NULL, NULL, align, UMA_ZONE_ZINIT); } else { /* make room for async task list items */ size = MAX(size, sizeof(struct llist_node)); /* create cache_zone */ c->cache_zone = uma_zcreate(name, size, ctor ? linux_kmem_ctor : NULL, NULL, NULL, NULL, align, 0); } c->cache_flags = flags; c->cache_ctor = ctor; c->cache_size = size; init_llist_head(&c->cache_items); TASK_INIT(&c->cache_task, 0, lkpi_kmem_cache_free_async_fn, c); return (c); } static inline void lkpi_kmem_cache_free_rcu(struct linux_kmem_cache *c, void *m) { struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, m); call_rcu(&rcu->rcu_head, linux_kmem_cache_free_rcu_callback); } static inline void lkpi_kmem_cache_free_sync(struct linux_kmem_cache *c, void *m) { uma_zfree(c->cache_zone, m); } static void lkpi_kmem_cache_free_async_fn(void *context, int pending) { struct linux_kmem_cache *c = context; struct llist_node *freed, *next; llist_for_each_safe(freed, next, llist_del_all(&c->cache_items)) lkpi_kmem_cache_free_sync(c, freed); } static inline void lkpi_kmem_cache_free_async(struct linux_kmem_cache *c, void *m) { if (m == NULL) return; llist_add(m, &c->cache_items); taskqueue_enqueue(linux_irq_work_tq, &c->cache_task); } void lkpi_kmem_cache_free(struct linux_kmem_cache *c, void *m) { if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) lkpi_kmem_cache_free_rcu(c, m); else if (unlikely(curthread->td_critnest != 0)) lkpi_kmem_cache_free_async(c, m); else lkpi_kmem_cache_free_sync(c, m); } void linux_kmem_cache_destroy(struct linux_kmem_cache *c) { if (c == NULL) return; if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) { /* make sure all free callbacks have been called */ rcu_barrier(); } if (!llist_empty(&c->cache_items)) taskqueue_enqueue(linux_irq_work_tq, &c->cache_task); taskqueue_drain(linux_irq_work_tq, &c->cache_task); uma_zdestroy(c->cache_zone); free(c, M_KMALLOC); } +struct lkpi_kmalloc_ctx { + size_t size; + gfp_t flags; + void *addr; +}; + +static void +lkpi_kmalloc_cb(void *ctx) +{ + struct lkpi_kmalloc_ctx *lmc = ctx; + + lmc->addr = __kmalloc(lmc->size, lmc->flags); +} + +void * +lkpi_kmalloc(size_t size, gfp_t flags) +{ + struct lkpi_kmalloc_ctx lmc = { .size = size, .flags = flags }; + + lkpi_fpu_safe_exec(&lkpi_kmalloc_cb, &lmc); + return(lmc.addr); +} + static void linux_kfree_async_fn(void *context, int pending) { struct llist_node *freed; while((freed = llist_del_first(&linux_kfree_async_list)) != NULL) kfree(freed); } static struct task linux_kfree_async_task = TASK_INITIALIZER(0, linux_kfree_async_fn, &linux_kfree_async_task); void linux_kfree_async(void *addr) { if (addr == NULL) return; llist_add(addr, &linux_kfree_async_list); taskqueue_enqueue(linux_irq_work_tq, &linux_kfree_async_task); }