diff --git a/include/os/freebsd/spl/sys/kmem.h b/include/os/freebsd/spl/sys/kmem.h index 27d290863c0b..c633799318d5 100644 --- a/include/os/freebsd/spl/sys/kmem.h +++ b/include/os/freebsd/spl/sys/kmem.h @@ -1,113 +1,114 @@ /* * Copyright (c) 2007 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _OPENSOLARIS_SYS_KMEM_H_ #define _OPENSOLARIS_SYS_KMEM_H_ #ifdef _KERNEL #include #include #include #include #include #include #include MALLOC_DECLARE(M_SOLARIS); #define POINTER_IS_VALID(p) (!((uintptr_t)(p) & 0x3)) #define POINTER_INVALIDATE(pp) (*(pp) = (void *)((uintptr_t)(*(pp)) | 0x1)) #define KM_SLEEP M_WAITOK #define KM_PUSHPAGE M_WAITOK #define KM_NOSLEEP M_NOWAIT #define KM_NORMALPRI 0 #define KMC_NODEBUG UMA_ZONE_NODUMP typedef struct vmem vmem_t; extern char *kmem_asprintf(const char *, ...) __attribute__((format(printf, 1, 2))); extern char *kmem_vasprintf(const char *fmt, va_list ap) __attribute__((format(printf, 1, 0))); extern int kmem_scnprintf(char *restrict str, size_t size, const char *restrict fmt, ...); typedef struct kmem_cache { char kc_name[32]; #if !defined(KMEM_DEBUG) uma_zone_t kc_zone; #else size_t kc_size; #endif int (*kc_constructor)(void *, void *, int); void (*kc_destructor)(void *, void *); void *kc_private; } kmem_cache_t; extern uint64_t spl_kmem_cache_inuse(kmem_cache_t *cache); extern uint64_t spl_kmem_cache_entry_size(kmem_cache_t *cache); -__attribute__((alloc_size(1))) +__attribute__((malloc, alloc_size(1))) void *zfs_kmem_alloc(size_t size, int kmflags); void zfs_kmem_free(void *buf, size_t size); uint64_t kmem_size(void); kmem_cache_t *kmem_cache_create(const char *name, size_t bufsize, size_t align, int (*constructor)(void *, void *, int), void (*destructor)(void *, void *), void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags); void kmem_cache_destroy(kmem_cache_t *cache); +__attribute__((malloc)) void *kmem_cache_alloc(kmem_cache_t *cache, int flags); void kmem_cache_free(kmem_cache_t *cache, void *buf); boolean_t kmem_cache_reap_active(void); void kmem_cache_reap_soon(kmem_cache_t *); void kmem_reap(void); int kmem_debugging(void); void *calloc(size_t n, size_t s); #define kmem_cache_reap_now kmem_cache_reap_soon #define freemem vm_free_count() #define minfree vm_cnt.v_free_min #define kmem_alloc(size, kmflags) zfs_kmem_alloc((size), (kmflags)) #define kmem_zalloc(size, kmflags) \ zfs_kmem_alloc((size), (kmflags) | M_ZERO) #define kmem_free(buf, size) zfs_kmem_free((buf), (size)) #endif /* _KERNEL */ #ifdef _STANDALONE /* * At the moment, we just need it for the type. We redirect the alloc/free * routines to the usual Free and Malloc in that environment. */ typedef int kmem_cache_t; #endif /* _STANDALONE */ #endif /* _OPENSOLARIS_SYS_KMEM_H_ */ diff --git a/include/os/linux/spl/sys/kmem.h b/include/os/linux/spl/sys/kmem.h index 594425f7b297..8a203f7bb8e2 100644 --- a/include/os/linux/spl/sys/kmem.h +++ b/include/os/linux/spl/sys/kmem.h @@ -1,219 +1,219 @@ /* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Brian Behlendorf . * UCRL-CODE-235197 * * This file is part of the SPL, Solaris Porting Layer. * * The SPL is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * The SPL is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . */ #ifndef _SPL_KMEM_H #define _SPL_KMEM_H #include #include #include #include #include extern int kmem_debugging(void); -extern char *kmem_vasprintf(const char *fmt, va_list ap) - __attribute__((format(printf, 1, 0))); -extern char *kmem_asprintf(const char *fmt, ...) - __attribute__((format(printf, 1, 2))); +__attribute__((format(printf, 1, 0))) +extern char *kmem_vasprintf(const char *fmt, va_list ap); +__attribute__((format(printf, 1, 2))) +extern char *kmem_asprintf(const char *fmt, ...); extern char *kmem_strdup(const char *str); extern void kmem_strfree(char *str); #define kmem_scnprintf scnprintf #define POINTER_IS_VALID(p) (!((uintptr_t)(p) & 0x3)) #define POINTER_INVALIDATE(pp) (*(pp) = (void *)((uintptr_t)(*(pp)) | 0x1)) /* * Memory allocation interfaces */ #define KM_SLEEP 0x0000 /* can block for memory; success guaranteed */ #define KM_NOSLEEP 0x0001 /* cannot block for memory; may fail */ #define KM_PUSHPAGE 0x0004 /* can block for memory; may use reserve */ #define KM_ZERO 0x1000 /* zero the allocation */ #define KM_VMEM 0x2000 /* caller is vmem_* wrapper */ #define KM_PUBLIC_MASK (KM_SLEEP | KM_NOSLEEP | KM_PUSHPAGE) static int spl_fstrans_check(void); void *spl_kvmalloc(size_t size, gfp_t flags); /* * Convert a KM_* flags mask to its Linux GFP_* counterpart. The conversion * function is context aware which means that KM_SLEEP allocations can be * safely used in syncing contexts which have set PF_FSTRANS. */ static inline gfp_t kmem_flags_convert(int flags) { gfp_t lflags = __GFP_NOWARN | __GFP_COMP; if (flags & KM_NOSLEEP) { lflags |= GFP_ATOMIC | __GFP_NORETRY; } else { lflags |= GFP_KERNEL; if (spl_fstrans_check()) lflags &= ~(__GFP_IO|__GFP_FS); } if (flags & KM_PUSHPAGE) lflags |= __GFP_HIGH; if (flags & KM_ZERO) lflags |= __GFP_ZERO; return (lflags); } typedef struct { struct task_struct *fstrans_thread; unsigned int saved_flags; } fstrans_cookie_t; /* * Introduced in Linux 3.9, however this cannot be solely relied on before * Linux 3.18 as it doesn't turn off __GFP_FS as it should. */ #ifdef PF_MEMALLOC_NOIO #define __SPL_PF_MEMALLOC_NOIO (PF_MEMALLOC_NOIO) #else #define __SPL_PF_MEMALLOC_NOIO (0) #endif /* * PF_FSTRANS is removed from Linux 4.12 */ #ifdef PF_FSTRANS #define __SPL_PF_FSTRANS (PF_FSTRANS) #else #define __SPL_PF_FSTRANS (0) #endif #define SPL_FSTRANS (__SPL_PF_FSTRANS|__SPL_PF_MEMALLOC_NOIO) static inline fstrans_cookie_t spl_fstrans_mark(void) { fstrans_cookie_t cookie; BUILD_BUG_ON(SPL_FSTRANS == 0); cookie.fstrans_thread = current; cookie.saved_flags = current->flags & SPL_FSTRANS; current->flags |= SPL_FSTRANS; return (cookie); } static inline void spl_fstrans_unmark(fstrans_cookie_t cookie) { ASSERT3P(cookie.fstrans_thread, ==, current); ASSERT((current->flags & SPL_FSTRANS) == SPL_FSTRANS); current->flags &= ~SPL_FSTRANS; current->flags |= cookie.saved_flags; } static inline int spl_fstrans_check(void) { return (current->flags & SPL_FSTRANS); } /* * specifically used to check PF_FSTRANS flag, cannot be relied on for * checking spl_fstrans_mark(). */ static inline int __spl_pf_fstrans_check(void) { return (current->flags & __SPL_PF_FSTRANS); } /* * Kernel compatibility for GFP flags */ /* < 4.13 */ #ifndef __GFP_RETRY_MAYFAIL #define __GFP_RETRY_MAYFAIL __GFP_REPEAT #endif /* < 4.4 */ #ifndef __GFP_RECLAIM #define __GFP_RECLAIM __GFP_WAIT #endif #ifdef HAVE_ATOMIC64_T #define kmem_alloc_used_add(size) atomic64_add(size, &kmem_alloc_used) #define kmem_alloc_used_sub(size) atomic64_sub(size, &kmem_alloc_used) #define kmem_alloc_used_read() atomic64_read(&kmem_alloc_used) #define kmem_alloc_used_set(size) atomic64_set(&kmem_alloc_used, size) extern atomic64_t kmem_alloc_used; extern unsigned long long kmem_alloc_max; #else /* HAVE_ATOMIC64_T */ #define kmem_alloc_used_add(size) atomic_add(size, &kmem_alloc_used) #define kmem_alloc_used_sub(size) atomic_sub(size, &kmem_alloc_used) #define kmem_alloc_used_read() atomic_read(&kmem_alloc_used) #define kmem_alloc_used_set(size) atomic_set(&kmem_alloc_used, size) extern atomic_t kmem_alloc_used; extern unsigned long long kmem_alloc_max; #endif /* HAVE_ATOMIC64_T */ extern unsigned int spl_kmem_alloc_warn; extern unsigned int spl_kmem_alloc_max; #define kmem_alloc(sz, fl) spl_kmem_alloc((sz), (fl), __func__, __LINE__) #define kmem_zalloc(sz, fl) spl_kmem_zalloc((sz), (fl), __func__, __LINE__) #define kmem_free(ptr, sz) spl_kmem_free((ptr), (sz)) #define kmem_cache_reap_active spl_kmem_cache_reap_active -extern void *spl_kmem_alloc(size_t sz, int fl, const char *func, int line) - __attribute__((alloc_size(1))); -extern void *spl_kmem_zalloc(size_t sz, int fl, const char *func, int line) - __attribute__((alloc_size(1))); +__attribute__((malloc, alloc_size(1))) +extern void *spl_kmem_alloc(size_t sz, int fl, const char *func, int line); +__attribute__((malloc, alloc_size(1))) +extern void *spl_kmem_zalloc(size_t sz, int fl, const char *func, int line); extern void spl_kmem_free(const void *ptr, size_t sz); /* * 5.8 API change, pgprot_t argument removed. */ #ifdef HAVE_VMALLOC_PAGE_KERNEL #define spl_vmalloc(size, flags) __vmalloc(size, flags, PAGE_KERNEL) #else #define spl_vmalloc(size, flags) __vmalloc(size, flags) #endif /* * The following functions are only available for internal use. */ extern void *spl_kmem_alloc_impl(size_t size, int flags, int node); extern void *spl_kmem_alloc_debug(size_t size, int flags, int node); extern void *spl_kmem_alloc_track(size_t size, int flags, const char *func, int line, int node); extern void spl_kmem_free_impl(const void *buf, size_t size); extern void spl_kmem_free_debug(const void *buf, size_t size); extern void spl_kmem_free_track(const void *buf, size_t size); extern int spl_kmem_init(void); extern void spl_kmem_fini(void); extern int spl_kmem_cache_reap_active(void); #endif /* _SPL_KMEM_H */ diff --git a/include/os/linux/spl/sys/vmem.h b/include/os/linux/spl/sys/vmem.h index e77af2a7a48c..92585a17e263 100644 --- a/include/os/linux/spl/sys/vmem.h +++ b/include/os/linux/spl/sys/vmem.h @@ -1,101 +1,103 @@ /* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Brian Behlendorf . * UCRL-CODE-235197 * * This file is part of the SPL, Solaris Porting Layer. * * The SPL is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * The SPL is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . */ #ifndef _SPL_VMEM_H #define _SPL_VMEM_H #include #include #include typedef struct vmem { } vmem_t; /* * Memory allocation interfaces */ #define VMEM_ALLOC 0x01 #define VMEM_FREE 0x02 #ifndef VMALLOC_TOTAL #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) #endif /* * vmem_* is an interface to a low level arena-based memory allocator on * Illumos that is used to allocate virtual address space. The kmem SLAB * allocator allocates slabs from it. Then the generic allocation functions * kmem_{alloc,zalloc,free}() are layered on top of SLAB allocators. * * On Linux, the primary means of doing allocations is via kmalloc(), which * is similarly layered on top of something called the buddy allocator. The * buddy allocator is not available to kernel modules, it uses physical * memory addresses rather than virtual memory addresses and is prone to * fragmentation. * * Linux sets aside a relatively small address space for in-kernel virtual * memory from which allocations can be done using vmalloc(). It might seem * like a good idea to use vmalloc() to implement something similar to * Illumos' allocator. However, this has the following problems: * * 1. Page directory table allocations are hard coded to use GFP_KERNEL. * Consequently, any KM_PUSHPAGE or KM_NOSLEEP allocations done using * vmalloc() will not have proper semantics. * * 2. Address space exhaustion is a real issue on 32-bit platforms where * only a few 100MB are available. The kernel will handle it by spinning * when it runs out of address space. * * 3. All vmalloc() allocations and frees are protected by a single global * lock which serializes all allocations. * * 4. Accessing /proc/meminfo and /proc/vmallocinfo will iterate the entire * list. The former will sum the allocations while the latter will print * them to user space in a way that user space can keep the lock held * indefinitely. When the total number of mapped allocations is large * (several 100,000) a large amount of time will be spent waiting on locks. * * 5. Linux has a wait_on_bit() locking primitive that assumes physical * memory is used, it simply does not work on virtual memory. Certain * Linux structures (e.g. the superblock) use them and might be embedded * into a structure from Illumos. This makes using Linux virtual memory * unsafe in certain situations. * * It follows that we cannot obtain identical semantics to those on Illumos. * Consequently, we implement the kmem_{alloc,zalloc,free}() functions in * such a way that they can be used as drop-in replacements for small vmem_* * allocations (8MB in size or smaller) and map vmem_{alloc,zalloc,free}() * to them. */ #define vmem_alloc(sz, fl) spl_vmem_alloc((sz), (fl), __func__, __LINE__) #define vmem_zalloc(sz, fl) spl_vmem_zalloc((sz), (fl), __func__, __LINE__) #define vmem_free(ptr, sz) spl_vmem_free((ptr), (sz)) -extern void *spl_vmem_alloc(size_t sz, int fl, const char *func, int line); -extern void *spl_vmem_zalloc(size_t sz, int fl, const char *func, int line); +extern void *spl_vmem_alloc(size_t sz, int fl, const char *func, int line) + __attribute__((malloc, alloc_size(1))); +extern void *spl_vmem_zalloc(size_t sz, int fl, const char *func, int line) + __attribute__((malloc, alloc_size(1))); extern void spl_vmem_free(const void *ptr, size_t sz); int spl_vmem_init(void); void spl_vmem_fini(void); #endif /* _SPL_VMEM_H */ diff --git a/include/sys/abd.h b/include/sys/abd.h index 82c51cb05cbc..750f9986c1da 100644 --- a/include/sys/abd.h +++ b/include/sys/abd.h @@ -1,221 +1,226 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2014 by Chunwei Chen. All rights reserved. * Copyright (c) 2016, 2019 by Delphix. All rights reserved. */ #ifndef _ABD_H #define _ABD_H #include #include #include #include #ifdef __cplusplus extern "C" { #endif typedef enum abd_flags { ABD_FLAG_LINEAR = 1 << 0, /* is buffer linear (or scattered)? */ ABD_FLAG_OWNER = 1 << 1, /* does it own its data buffers? */ ABD_FLAG_META = 1 << 2, /* does this represent FS metadata? */ ABD_FLAG_MULTI_ZONE = 1 << 3, /* pages split over memory zones */ ABD_FLAG_MULTI_CHUNK = 1 << 4, /* pages split over multiple chunks */ ABD_FLAG_LINEAR_PAGE = 1 << 5, /* linear but allocd from page */ ABD_FLAG_GANG = 1 << 6, /* mult ABDs chained together */ ABD_FLAG_GANG_FREE = 1 << 7, /* gang ABD is responsible for mem */ ABD_FLAG_ZEROS = 1 << 8, /* ABD for zero-filled buffer */ ABD_FLAG_ALLOCD = 1 << 9, /* we allocated the abd_t */ } abd_flags_t; typedef struct abd { abd_flags_t abd_flags; uint_t abd_size; /* excludes scattered abd_offset */ list_node_t abd_gang_link; #ifdef ZFS_DEBUG struct abd *abd_parent; zfs_refcount_t abd_children; #endif kmutex_t abd_mtx; union { struct abd_scatter { uint_t abd_offset; #if defined(__FreeBSD__) && defined(_KERNEL) void *abd_chunks[1]; /* actually variable-length */ #else uint_t abd_nents; struct scatterlist *abd_sgl; #endif } abd_scatter; struct abd_linear { void *abd_buf; struct scatterlist *abd_sgl; /* for LINEAR_PAGE */ } abd_linear; struct abd_gang { list_t abd_gang_chain; } abd_gang; } abd_u; } abd_t; typedef int abd_iter_func_t(void *buf, size_t len, void *priv); typedef int abd_iter_func2_t(void *bufa, void *bufb, size_t len, void *priv); extern int zfs_abd_scatter_enabled; /* * Allocations and deallocations */ +__attribute__((malloc)) abd_t *abd_alloc(size_t, boolean_t); +__attribute__((malloc)) abd_t *abd_alloc_linear(size_t, boolean_t); +__attribute__((malloc)) abd_t *abd_alloc_gang(void); +__attribute__((malloc)) abd_t *abd_alloc_for_io(size_t, boolean_t); +__attribute__((malloc)) abd_t *abd_alloc_sametype(abd_t *, size_t); boolean_t abd_size_alloc_linear(size_t); void abd_gang_add(abd_t *, abd_t *, boolean_t); void abd_free(abd_t *); abd_t *abd_get_offset(abd_t *, size_t); abd_t *abd_get_offset_size(abd_t *, size_t, size_t); abd_t *abd_get_offset_struct(abd_t *, abd_t *, size_t, size_t); abd_t *abd_get_zeros(size_t); abd_t *abd_get_from_buf(void *, size_t); void abd_cache_reap_now(void); /* * Conversion to and from a normal buffer */ void *abd_to_buf(abd_t *); void *abd_borrow_buf(abd_t *, size_t); void *abd_borrow_buf_copy(abd_t *, size_t); void abd_return_buf(abd_t *, void *, size_t); void abd_return_buf_copy(abd_t *, void *, size_t); void abd_take_ownership_of_buf(abd_t *, boolean_t); void abd_release_ownership_of_buf(abd_t *); /* * ABD operations */ int abd_iterate_func(abd_t *, size_t, size_t, abd_iter_func_t *, void *); int abd_iterate_func2(abd_t *, abd_t *, size_t, size_t, size_t, abd_iter_func2_t *, void *); void abd_copy_off(abd_t *, abd_t *, size_t, size_t, size_t); void abd_copy_from_buf_off(abd_t *, const void *, size_t, size_t); void abd_copy_to_buf_off(void *, abd_t *, size_t, size_t); int abd_cmp(abd_t *, abd_t *); int abd_cmp_buf_off(abd_t *, const void *, size_t, size_t); void abd_zero_off(abd_t *, size_t, size_t); void abd_verify(abd_t *); void abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd, ssize_t csize, ssize_t dsize, const unsigned parity, void (*func_raidz_gen)(void **, const void *, size_t, size_t)); void abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds, ssize_t tsize, const unsigned parity, void (*func_raidz_rec)(void **t, const size_t tsize, void **c, const unsigned *mul), const unsigned *mul); /* * Wrappers for calls with offsets of 0 */ static inline void abd_copy(abd_t *dabd, abd_t *sabd, size_t size) { abd_copy_off(dabd, sabd, 0, 0, size); } static inline void abd_copy_from_buf(abd_t *abd, const void *buf, size_t size) { abd_copy_from_buf_off(abd, buf, 0, size); } static inline void abd_copy_to_buf(void* buf, abd_t *abd, size_t size) { abd_copy_to_buf_off(buf, abd, 0, size); } static inline int abd_cmp_buf(abd_t *abd, const void *buf, size_t size) { return (abd_cmp_buf_off(abd, buf, 0, size)); } static inline void abd_zero(abd_t *abd, size_t size) { abd_zero_off(abd, 0, size); } /* * ABD type check functions */ static inline boolean_t abd_is_linear(abd_t *abd) { return ((abd->abd_flags & ABD_FLAG_LINEAR) ? B_TRUE : B_FALSE); } static inline boolean_t abd_is_linear_page(abd_t *abd) { return ((abd->abd_flags & ABD_FLAG_LINEAR_PAGE) ? B_TRUE : B_FALSE); } static inline boolean_t abd_is_gang(abd_t *abd) { return ((abd->abd_flags & ABD_FLAG_GANG) ? B_TRUE : B_FALSE); } static inline uint_t abd_get_size(abd_t *abd) { return (abd->abd_size); } /* * Module lifecycle * Defined in each specific OS's abd_os.c */ void abd_init(void); void abd_fini(void); /* * Linux ABD bio functions */ #if defined(__linux__) && defined(_KERNEL) unsigned int abd_bio_map_off(struct bio *, abd_t *, unsigned int, size_t); unsigned long abd_nr_pages_off(abd_t *, unsigned int, size_t); #endif #ifdef __cplusplus } #endif #endif /* _ABD_H */ diff --git a/lib/libspl/include/umem.h b/lib/libspl/include/umem.h index 77c216721253..9039212baf14 100644 --- a/lib/libspl/include/umem.h +++ b/lib/libspl/include/umem.h @@ -1,229 +1,230 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License, Version 1.0 only * (the "License"). You may not use this file except in compliance * with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2008 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #ifndef _LIBSPL_UMEM_H #define _LIBSPL_UMEM_H /* * XXX: We should use the real portable umem library if it is detected * at configure time. However, if the library is not available, we can * use a trivial malloc based implementation. This obviously impacts * performance, but unless you are using a full userspace build of zpool for * something other than ztest, you are likely not going to notice or care. * * https://labs.omniti.com/trac/portableumem */ #include #include #include #include #ifdef __cplusplus extern "C" { #endif typedef void vmem_t; /* * Flags for umem_alloc/umem_free */ #define UMEM_DEFAULT 0x0000 /* normal -- may fail */ #define UMEM_NOFAIL 0x0100 /* Never fails */ /* * Flags for umem_cache_create() */ #define UMC_NODEBUG 0x00020000 #define UMEM_CACHE_NAMELEN 31 typedef int umem_nofail_callback_t(void); typedef int umem_constructor_t(void *, void *, int); typedef void umem_destructor_t(void *, void *); typedef void umem_reclaim_t(void *); typedef struct umem_cache { char cache_name[UMEM_CACHE_NAMELEN + 1]; size_t cache_bufsize; size_t cache_align; umem_constructor_t *cache_constructor; umem_destructor_t *cache_destructor; umem_reclaim_t *cache_reclaim; void *cache_private; void *cache_arena; int cache_cflags; } umem_cache_t; /* Prototypes for functions to provide defaults for umem envvars */ const char *_umem_debug_init(void); const char *_umem_options_init(void); const char *_umem_logging_init(void); -__attribute__((alloc_size(1))) +__attribute__((malloc, alloc_size(1))) static inline void * umem_alloc(size_t size, int flags) { void *ptr = NULL; do { ptr = malloc(size); } while (ptr == NULL && (flags & UMEM_NOFAIL)); return (ptr); } -__attribute__((alloc_size(1))) +__attribute__((malloc, alloc_size(1))) static inline void * umem_alloc_aligned(size_t size, size_t align, int flags) { void *ptr = NULL; int rc = EINVAL; do { rc = posix_memalign(&ptr, align, size); } while (rc == ENOMEM && (flags & UMEM_NOFAIL)); if (rc == EINVAL) { fprintf(stderr, "%s: invalid memory alignment (%zd)\n", __func__, align); if (flags & UMEM_NOFAIL) abort(); return (NULL); } return (ptr); } -__attribute__((alloc_size(1))) +__attribute__((malloc, alloc_size(1))) static inline void * umem_zalloc(size_t size, int flags) { void *ptr = NULL; ptr = umem_alloc(size, flags); if (ptr) memset(ptr, 0, size); return (ptr); } static inline void umem_free(const void *ptr, size_t size __maybe_unused) { free((void *)ptr); } /* * umem_free_aligned was added for supporting portability * with non-POSIX platforms that require a different free * to be used with aligned allocations. */ static inline void umem_free_aligned(void *ptr, size_t size __maybe_unused) { #ifndef _WIN32 free((void *)ptr); #else _aligned_free(ptr); #endif } static inline void umem_nofail_callback(umem_nofail_callback_t *cb __maybe_unused) {} static inline umem_cache_t * umem_cache_create( const char *name, size_t bufsize, size_t align, umem_constructor_t *constructor, umem_destructor_t *destructor, umem_reclaim_t *reclaim, void *priv, void *vmp, int cflags) { umem_cache_t *cp; cp = (umem_cache_t *)umem_alloc(sizeof (umem_cache_t), UMEM_DEFAULT); if (cp) { strlcpy(cp->cache_name, name, UMEM_CACHE_NAMELEN); cp->cache_bufsize = bufsize; cp->cache_align = align; cp->cache_constructor = constructor; cp->cache_destructor = destructor; cp->cache_reclaim = reclaim; cp->cache_private = priv; cp->cache_arena = vmp; cp->cache_cflags = cflags; } return (cp); } static inline void umem_cache_destroy(umem_cache_t *cp) { umem_free(cp, sizeof (umem_cache_t)); } +__attribute__((malloc)) static inline void * umem_cache_alloc(umem_cache_t *cp, int flags) { void *ptr = NULL; if (cp->cache_align != 0) ptr = umem_alloc_aligned( cp->cache_bufsize, cp->cache_align, flags); else ptr = umem_alloc(cp->cache_bufsize, flags); if (ptr && cp->cache_constructor) cp->cache_constructor(ptr, cp->cache_private, UMEM_DEFAULT); return (ptr); } static inline void umem_cache_free(umem_cache_t *cp, void *ptr) { if (cp->cache_destructor) cp->cache_destructor(ptr, cp->cache_private); if (cp->cache_align != 0) umem_free_aligned(ptr, cp->cache_bufsize); else umem_free(ptr, cp->cache_bufsize); } static inline void umem_cache_reap_now(umem_cache_t *cp __maybe_unused) { } #ifdef __cplusplus } #endif #endif