diff --git a/module/os/linux/spl/spl-generic.c b/module/os/linux/spl/spl-generic.c index 5179100d1665..de91c44257aa 100644 --- a/module/os/linux/spl/spl-generic.c +++ b/module/os/linux/spl/spl-generic.c @@ -1,826 +1,834 @@ /* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Brian Behlendorf . * UCRL-CODE-235197 * * This file is part of the SPL, Solaris Porting Layer. * * The SPL is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * The SPL is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . * * Solaris Porting Layer (SPL) Generic Implementation. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include unsigned long spl_hostid = 0; EXPORT_SYMBOL(spl_hostid); /* CSTYLED */ module_param(spl_hostid, ulong, 0644); MODULE_PARM_DESC(spl_hostid, "The system hostid."); proc_t p0; EXPORT_SYMBOL(p0); /* * Xorshift Pseudo Random Number Generator based on work by Sebastiano Vigna * * "Further scramblings of Marsaglia's xorshift generators" * http://vigna.di.unimi.it/ftp/papers/xorshiftplus.pdf * * random_get_pseudo_bytes() is an API function on Illumos whose sole purpose * is to provide bytes containing random numbers. It is mapped to /dev/urandom * on Illumos, which uses a "FIPS 186-2 algorithm". No user of the SPL's * random_get_pseudo_bytes() needs bytes that are of cryptographic quality, so * we can implement it using a fast PRNG that we seed using Linux' actual * equivalent to random_get_pseudo_bytes(). We do this by providing each CPU * with an independent seed so that all calls to random_get_pseudo_bytes() are * free of atomic instructions. * * A consequence of using a fast PRNG is that using random_get_pseudo_bytes() * to generate words larger than 128 bits will paradoxically be limited to * `2^128 - 1` possibilities. This is because we have a sequence of `2^128 - 1` * 128-bit words and selecting the first will implicitly select the second. If * a caller finds this behavior undesirable, random_get_bytes() should be used * instead. * * XXX: Linux interrupt handlers that trigger within the critical section * formed by `s[1] = xp[1];` and `xp[0] = s[0];` and call this function will * see the same numbers. Nothing in the code currently calls this in an * interrupt handler, so this is considered to be okay. If that becomes a * problem, we could create a set of per-cpu variables for interrupt handlers * and use them when in_interrupt() from linux/preempt_mask.h evaluates to * true. */ void __percpu *spl_pseudo_entropy; /* * spl_rand_next()/spl_rand_jump() are copied from the following CC-0 licensed * file: * * http://xorshift.di.unimi.it/xorshift128plus.c */ static inline uint64_t spl_rand_next(uint64_t *s) { uint64_t s1 = s[0]; const uint64_t s0 = s[1]; s[0] = s0; s1 ^= s1 << 23; // a s[1] = s1 ^ s0 ^ (s1 >> 18) ^ (s0 >> 5); // b, c return (s[1] + s0); } static inline void spl_rand_jump(uint64_t *s) { static const uint64_t JUMP[] = { 0x8a5cd789635d2dff, 0x121fd2155c472f96 }; uint64_t s0 = 0; uint64_t s1 = 0; int i, b; for (i = 0; i < sizeof (JUMP) / sizeof (*JUMP); i++) for (b = 0; b < 64; b++) { if (JUMP[i] & 1ULL << b) { s0 ^= s[0]; s1 ^= s[1]; } (void) spl_rand_next(s); } s[0] = s0; s[1] = s1; } int random_get_pseudo_bytes(uint8_t *ptr, size_t len) { uint64_t *xp, s[2]; ASSERT(ptr); xp = get_cpu_ptr(spl_pseudo_entropy); s[0] = xp[0]; s[1] = xp[1]; while (len) { union { uint64_t ui64; uint8_t byte[sizeof (uint64_t)]; }entropy; int i = MIN(len, sizeof (uint64_t)); len -= i; entropy.ui64 = spl_rand_next(s); while (i--) *ptr++ = entropy.byte[i]; } xp[0] = s[0]; xp[1] = s[1]; put_cpu_ptr(spl_pseudo_entropy); return (0); } EXPORT_SYMBOL(random_get_pseudo_bytes); #if BITS_PER_LONG == 32 /* * Support 64/64 => 64 division on a 32-bit platform. While the kernel * provides a div64_u64() function for this we do not use it because the * implementation is flawed. There are cases which return incorrect * results as late as linux-2.6.35. Until this is fixed upstream the * spl must provide its own implementation. * * This implementation is a slightly modified version of the algorithm * proposed by the book 'Hacker's Delight'. The original source can be * found here and is available for use without restriction. * * http://www.hackersdelight.org/HDcode/newCode/divDouble.c */ /* * Calculate number of leading of zeros for a 64-bit value. */ static int nlz64(uint64_t x) { register int n = 0; if (x == 0) return (64); if (x <= 0x00000000FFFFFFFFULL) { n = n + 32; x = x << 32; } if (x <= 0x0000FFFFFFFFFFFFULL) { n = n + 16; x = x << 16; } if (x <= 0x00FFFFFFFFFFFFFFULL) { n = n + 8; x = x << 8; } if (x <= 0x0FFFFFFFFFFFFFFFULL) { n = n + 4; x = x << 4; } if (x <= 0x3FFFFFFFFFFFFFFFULL) { n = n + 2; x = x << 2; } if (x <= 0x7FFFFFFFFFFFFFFFULL) { n = n + 1; } return (n); } /* * Newer kernels have a div_u64() function but we define our own * to simplify portability between kernel versions. */ static inline uint64_t __div_u64(uint64_t u, uint32_t v) { (void) do_div(u, v); return (u); } /* * Turn off missing prototypes warning for these functions. They are * replacements for libgcc-provided functions and will never be called * directly. */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wmissing-prototypes" /* * Implementation of 64-bit unsigned division for 32-bit machines. * * First the procedure takes care of the case in which the divisor is a * 32-bit quantity. There are two subcases: (1) If the left half of the * dividend is less than the divisor, one execution of do_div() is all that * is required (overflow is not possible). (2) Otherwise it does two * divisions, using the grade school method. */ uint64_t __udivdi3(uint64_t u, uint64_t v) { uint64_t u0, u1, v1, q0, q1, k; int n; if (v >> 32 == 0) { // If v < 2**32: if (u >> 32 < v) { // If u/v cannot overflow, return (__div_u64(u, v)); // just do one division. } else { // If u/v would overflow: u1 = u >> 32; // Break u into two halves. u0 = u & 0xFFFFFFFF; q1 = __div_u64(u1, v); // First quotient digit. k = u1 - q1 * v; // First remainder, < v. u0 += (k << 32); q0 = __div_u64(u0, v); // Seconds quotient digit. return ((q1 << 32) + q0); } } else { // If v >= 2**32: n = nlz64(v); // 0 <= n <= 31. v1 = (v << n) >> 32; // Normalize divisor, MSB is 1. u1 = u >> 1; // To ensure no overflow. q1 = __div_u64(u1, v1); // Get quotient from q0 = (q1 << n) >> 31; // Undo normalization and // division of u by 2. if (q0 != 0) // Make q0 correct or q0 = q0 - 1; // too small by 1. if ((u - q0 * v) >= v) q0 = q0 + 1; // Now q0 is correct. return (q0); } } EXPORT_SYMBOL(__udivdi3); #ifndef abs64 /* CSTYLED */ #define abs64(x) ({ uint64_t t = (x) >> 63; ((x) ^ t) - t; }) #endif /* * Implementation of 64-bit signed division for 32-bit machines. */ int64_t __divdi3(int64_t u, int64_t v) { int64_t q, t; q = __udivdi3(abs64(u), abs64(v)); t = (u ^ v) >> 63; // If u, v have different return ((q ^ t) - t); // signs, negate q. } EXPORT_SYMBOL(__divdi3); /* * Implementation of 64-bit unsigned modulo for 32-bit machines. */ uint64_t __umoddi3(uint64_t dividend, uint64_t divisor) { return (dividend - (divisor * __udivdi3(dividend, divisor))); } EXPORT_SYMBOL(__umoddi3); /* 64-bit signed modulo for 32-bit machines. */ int64_t __moddi3(int64_t n, int64_t d) { int64_t q; boolean_t nn = B_FALSE; if (n < 0) { nn = B_TRUE; n = -n; } if (d < 0) d = -d; q = __umoddi3(n, d); return (nn ? -q : q); } EXPORT_SYMBOL(__moddi3); /* * Implementation of 64-bit unsigned division/modulo for 32-bit machines. */ uint64_t __udivmoddi4(uint64_t n, uint64_t d, uint64_t *r) { uint64_t q = __udivdi3(n, d); if (r) *r = n - d * q; return (q); } EXPORT_SYMBOL(__udivmoddi4); /* * Implementation of 64-bit signed division/modulo for 32-bit machines. */ int64_t __divmoddi4(int64_t n, int64_t d, int64_t *r) { int64_t q, rr; boolean_t nn = B_FALSE; boolean_t nd = B_FALSE; if (n < 0) { nn = B_TRUE; n = -n; } if (d < 0) { nd = B_TRUE; d = -d; } q = __udivmoddi4(n, d, (uint64_t *)&rr); if (nn != nd) q = -q; if (nn) rr = -rr; if (r) *r = rr; return (q); } EXPORT_SYMBOL(__divmoddi4); #if defined(__arm) || defined(__arm__) /* * Implementation of 64-bit (un)signed division for 32-bit arm machines. * * Run-time ABI for the ARM Architecture (page 20). A pair of (unsigned) * long longs is returned in {{r0, r1}, {r2,r3}}, the quotient in {r0, r1}, * and the remainder in {r2, r3}. The return type is specifically left * set to 'void' to ensure the compiler does not overwrite these registers * during the return. All results are in registers as per ABI */ void __aeabi_uldivmod(uint64_t u, uint64_t v) { uint64_t res; uint64_t mod; res = __udivdi3(u, v); mod = __umoddi3(u, v); { register uint32_t r0 asm("r0") = (res & 0xFFFFFFFF); register uint32_t r1 asm("r1") = (res >> 32); register uint32_t r2 asm("r2") = (mod & 0xFFFFFFFF); register uint32_t r3 asm("r3") = (mod >> 32); asm volatile("" : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3) /* output */ : "r"(r0), "r"(r1), "r"(r2), "r"(r3)); /* input */ return; /* r0; */ } } EXPORT_SYMBOL(__aeabi_uldivmod); void __aeabi_ldivmod(int64_t u, int64_t v) { int64_t res; uint64_t mod; res = __divdi3(u, v); mod = __umoddi3(u, v); { register uint32_t r0 asm("r0") = (res & 0xFFFFFFFF); register uint32_t r1 asm("r1") = (res >> 32); register uint32_t r2 asm("r2") = (mod & 0xFFFFFFFF); register uint32_t r3 asm("r3") = (mod >> 32); asm volatile("" : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3) /* output */ : "r"(r0), "r"(r1), "r"(r2), "r"(r3)); /* input */ return; /* r0; */ } } EXPORT_SYMBOL(__aeabi_ldivmod); #endif /* __arm || __arm__ */ #pragma GCC diagnostic pop #endif /* BITS_PER_LONG */ /* * NOTE: The strtoxx behavior is solely based on my reading of the Solaris * ddi_strtol(9F) man page. I have not verified the behavior of these * functions against their Solaris counterparts. It is possible that I * may have misinterpreted the man page or the man page is incorrect. */ int ddi_strtol(const char *, char **, int, long *); int ddi_strtoull(const char *, char **, int, unsigned long long *); int ddi_strtoll(const char *, char **, int, long long *); #define define_ddi_strtox(type, valtype) \ int ddi_strto##type(const char *str, char **endptr, \ int base, valtype *result) \ { \ valtype last_value, value = 0; \ char *ptr = (char *)str; \ int digit, minus = 0; \ \ while (strchr(" \t\n\r\f", *ptr)) \ ++ptr; \ \ if (strlen(ptr) == 0) \ return (EINVAL); \ \ switch (*ptr) { \ case '-': \ minus = 1; \ zfs_fallthrough; \ case '+': \ ++ptr; \ break; \ } \ \ /* Auto-detect base based on prefix */ \ if (!base) { \ if (str[0] == '0') { \ if (tolower(str[1]) == 'x' && isxdigit(str[2])) { \ base = 16; /* hex */ \ ptr += 2; \ } else if (str[1] >= '0' && str[1] < 8) { \ base = 8; /* octal */ \ ptr += 1; \ } else { \ return (EINVAL); \ } \ } else { \ base = 10; /* decimal */ \ } \ } \ \ while (1) { \ if (isdigit(*ptr)) \ digit = *ptr - '0'; \ else if (isalpha(*ptr)) \ digit = tolower(*ptr) - 'a' + 10; \ else \ break; \ \ if (digit >= base) \ break; \ \ last_value = value; \ value = value * base + digit; \ if (last_value > value) /* Overflow */ \ return (ERANGE); \ \ ptr++; \ } \ \ *result = minus ? -value : value; \ \ if (endptr) \ *endptr = ptr; \ \ return (0); \ } \ define_ddi_strtox(l, long) define_ddi_strtox(ull, unsigned long long) define_ddi_strtox(ll, long long) EXPORT_SYMBOL(ddi_strtol); EXPORT_SYMBOL(ddi_strtoll); EXPORT_SYMBOL(ddi_strtoull); int ddi_copyin(const void *from, void *to, size_t len, int flags) { /* Fake ioctl() issued by kernel, 'from' is a kernel address */ if (flags & FKIOCTL) { memcpy(to, from, len); return (0); } return (copyin(from, to, len)); } EXPORT_SYMBOL(ddi_copyin); int ddi_copyout(const void *from, void *to, size_t len, int flags) { /* Fake ioctl() issued by kernel, 'from' is a kernel address */ if (flags & FKIOCTL) { memcpy(to, from, len); return (0); } return (copyout(from, to, len)); } EXPORT_SYMBOL(ddi_copyout); static ssize_t spl_kernel_read(struct file *file, void *buf, size_t count, loff_t *pos) { #if defined(HAVE_KERNEL_READ_PPOS) return (kernel_read(file, buf, count, pos)); #else mm_segment_t saved_fs; ssize_t ret; saved_fs = get_fs(); set_fs(KERNEL_DS); ret = vfs_read(file, (void __user *)buf, count, pos); set_fs(saved_fs); return (ret); #endif } static int spl_getattr(struct file *filp, struct kstat *stat) { int rc; ASSERT(filp); ASSERT(stat); #if defined(HAVE_4ARGS_VFS_GETATTR) rc = vfs_getattr(&filp->f_path, stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); #elif defined(HAVE_2ARGS_VFS_GETATTR) rc = vfs_getattr(&filp->f_path, stat); #elif defined(HAVE_3ARGS_VFS_GETATTR) rc = vfs_getattr(filp->f_path.mnt, filp->f_dentry, stat); #else #error "No available vfs_getattr()" #endif if (rc) return (-rc); return (0); } /* * Read the unique system identifier from the /etc/hostid file. * * The behavior of /usr/bin/hostid on Linux systems with the * regular eglibc and coreutils is: * * 1. Generate the value if the /etc/hostid file does not exist * or if the /etc/hostid file is less than four bytes in size. * * 2. If the /etc/hostid file is at least 4 bytes, then return * the first four bytes [0..3] in native endian order. * * 3. Always ignore bytes [4..] if they exist in the file. * * Only the first four bytes are significant, even on systems that * have a 64-bit word size. * * See: * * eglibc: sysdeps/unix/sysv/linux/gethostid.c * coreutils: src/hostid.c * * Notes: * * The /etc/hostid file on Solaris is a text file that often reads: * * # DO NOT EDIT * "0123456789" * * Directly copying this file to Linux results in a constant * hostid of 4f442023 because the default comment constitutes * the first four bytes of the file. * */ static char *spl_hostid_path = HW_HOSTID_PATH; module_param(spl_hostid_path, charp, 0444); MODULE_PARM_DESC(spl_hostid_path, "The system hostid file (/etc/hostid)"); static int hostid_read(uint32_t *hostid) { uint64_t size; uint32_t value = 0; int error; loff_t off; struct file *filp; struct kstat stat; filp = filp_open(spl_hostid_path, 0, 0); if (IS_ERR(filp)) return (ENOENT); error = spl_getattr(filp, &stat); if (error) { filp_close(filp, 0); return (error); } size = stat.size; // cppcheck-suppress sizeofwithnumericparameter if (size < sizeof (HW_HOSTID_MASK)) { filp_close(filp, 0); return (EINVAL); } off = 0; /* * Read directly into the variable like eglibc does. * Short reads are okay; native behavior is preserved. */ error = spl_kernel_read(filp, &value, sizeof (value), &off); if (error < 0) { filp_close(filp, 0); return (EIO); } /* Mask down to 32 bits like coreutils does. */ *hostid = (value & HW_HOSTID_MASK); filp_close(filp, 0); return (0); } /* * Return the system hostid. Preferentially use the spl_hostid module option * when set, otherwise use the value in the /etc/hostid file. */ uint32_t zone_get_hostid(void *zone) { uint32_t hostid; ASSERT3P(zone, ==, NULL); if (spl_hostid != 0) return ((uint32_t)(spl_hostid & HW_HOSTID_MASK)); if (hostid_read(&hostid) == 0) return (hostid); return (0); } EXPORT_SYMBOL(zone_get_hostid); static int spl_kvmem_init(void) { int rc = 0; rc = spl_kmem_init(); if (rc) return (rc); rc = spl_vmem_init(); if (rc) { spl_kmem_fini(); return (rc); } return (rc); } /* * We initialize the random number generator with 128 bits of entropy from the * system random number generator. In the improbable case that we have a zero * seed, we fallback to the system jiffies, unless it is also zero, in which * situation we use a preprogrammed seed. We step forward by 2^64 iterations to * initialize each of the per-cpu seeds so that the sequences generated on each * CPU are guaranteed to never overlap in practice. */ -static void __init +static int __init spl_random_init(void) { uint64_t s[2]; int i = 0; spl_pseudo_entropy = __alloc_percpu(2 * sizeof (uint64_t), sizeof (uint64_t)); + if (!spl_pseudo_entropy) + return (-ENOMEM); + get_random_bytes(s, sizeof (s)); if (s[0] == 0 && s[1] == 0) { if (jiffies != 0) { s[0] = jiffies; s[1] = ~0 - jiffies; } else { (void) memcpy(s, "improbable seed", sizeof (s)); } printk("SPL: get_random_bytes() returned 0 " "when generating random seed. Setting initial seed to " "0x%016llx%016llx.\n", cpu_to_be64(s[0]), cpu_to_be64(s[1])); } for_each_possible_cpu(i) { uint64_t *wordp = per_cpu_ptr(spl_pseudo_entropy, i); spl_rand_jump(s); wordp[0] = s[0]; wordp[1] = s[1]; } + + return (0); } static void spl_random_fini(void) { free_percpu(spl_pseudo_entropy); } static void spl_kvmem_fini(void) { spl_vmem_fini(); spl_kmem_fini(); } static int __init spl_init(void) { int rc = 0; - spl_random_init(); + if ((rc = spl_random_init())) + goto out0; if ((rc = spl_kvmem_init())) goto out1; if ((rc = spl_tsd_init())) goto out2; if ((rc = spl_taskq_init())) goto out3; if ((rc = spl_kmem_cache_init())) goto out4; if ((rc = spl_proc_init())) goto out5; if ((rc = spl_kstat_init())) goto out6; if ((rc = spl_zlib_init())) goto out7; if ((rc = spl_zone_init())) goto out8; return (rc); out8: spl_zlib_fini(); out7: spl_kstat_fini(); out6: spl_proc_fini(); out5: spl_kmem_cache_fini(); out4: spl_taskq_fini(); out3: spl_tsd_fini(); out2: spl_kvmem_fini(); out1: + spl_random_fini(); +out0: return (rc); } static void __exit spl_fini(void) { spl_zone_fini(); spl_zlib_fini(); spl_kstat_fini(); spl_proc_fini(); spl_kmem_cache_fini(); spl_taskq_fini(); spl_tsd_fini(); spl_kvmem_fini(); spl_random_fini(); } module_init(spl_init); module_exit(spl_fini); MODULE_DESCRIPTION("Solaris Porting Layer"); MODULE_AUTHOR(ZFS_META_AUTHOR); MODULE_LICENSE("GPL"); MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE); diff --git a/module/os/linux/spl/spl-kmem-cache.c b/module/os/linux/spl/spl-kmem-cache.c index ba4ca49a2ac9..efb8d0c30330 100644 --- a/module/os/linux/spl/spl-kmem-cache.c +++ b/module/os/linux/spl/spl-kmem-cache.c @@ -1,1462 +1,1465 @@ /* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Brian Behlendorf . * UCRL-CODE-235197 * * This file is part of the SPL, Solaris Porting Layer. * * The SPL is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * The SPL is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . */ #include #include #include #include #include #include #include #include #include #include /* * Within the scope of spl-kmem.c file the kmem_cache_* definitions * are removed to allow access to the real Linux slab allocator. */ #undef kmem_cache_destroy #undef kmem_cache_create #undef kmem_cache_alloc #undef kmem_cache_free /* * Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}() * with smp_mb__{before,after}_atomic() because they were redundant. This is * only used inside our SLAB allocator, so we implement an internal wrapper * here to give us smp_mb__{before,after}_atomic() on older kernels. */ #ifndef smp_mb__before_atomic #define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x) #endif #ifndef smp_mb__after_atomic #define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x) #endif /* BEGIN CSTYLED */ /* * Cache magazines are an optimization designed to minimize the cost of * allocating memory. They do this by keeping a per-cpu cache of recently * freed objects, which can then be reallocated without taking a lock. This * can improve performance on highly contended caches. However, because * objects in magazines will prevent otherwise empty slabs from being * immediately released this may not be ideal for low memory machines. * * For this reason spl_kmem_cache_magazine_size can be used to set a maximum * magazine size. When this value is set to 0 the magazine size will be * automatically determined based on the object size. Otherwise magazines * will be limited to 2-256 objects per magazine (i.e per cpu). Magazines * may never be entirely disabled in this implementation. */ static unsigned int spl_kmem_cache_magazine_size = 0; module_param(spl_kmem_cache_magazine_size, uint, 0444); MODULE_PARM_DESC(spl_kmem_cache_magazine_size, "Default magazine size (2-256), set automatically (0)"); /* * The default behavior is to report the number of objects remaining in the * cache. This allows the Linux VM to repeatedly reclaim objects from the * cache when memory is low satisfy other memory allocations. Alternately, * setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache * is reclaimed. This may increase the likelihood of out of memory events. */ static unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */; module_param(spl_kmem_cache_reclaim, uint, 0644); MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)"); static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB; module_param(spl_kmem_cache_obj_per_slab, uint, 0644); MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab"); static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE; module_param(spl_kmem_cache_max_size, uint, 0644); MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB"); /* * For small objects the Linux slab allocator should be used to make the most * efficient use of the memory. However, large objects are not supported by * the Linux slab and therefore the SPL implementation is preferred. A cutoff * of 16K was determined to be optimal for architectures using 4K pages and * to also work well on architecutres using larger 64K page sizes. */ static unsigned int spl_kmem_cache_slab_limit = 16384; module_param(spl_kmem_cache_slab_limit, uint, 0644); MODULE_PARM_DESC(spl_kmem_cache_slab_limit, "Objects less than N bytes use the Linux slab"); /* * The number of threads available to allocate new slabs for caches. This * should not need to be tuned but it is available for performance analysis. */ static unsigned int spl_kmem_cache_kmem_threads = 4; module_param(spl_kmem_cache_kmem_threads, uint, 0444); MODULE_PARM_DESC(spl_kmem_cache_kmem_threads, "Number of spl_kmem_cache threads"); /* END CSTYLED */ /* * Slab allocation interfaces * * While the Linux slab implementation was inspired by the Solaris * implementation I cannot use it to emulate the Solaris APIs. I * require two features which are not provided by the Linux slab. * * 1) Constructors AND destructors. Recent versions of the Linux * kernel have removed support for destructors. This is a deal * breaker for the SPL which contains particularly expensive * initializers for mutex's, condition variables, etc. We also * require a minimal level of cleanup for these data types unlike * many Linux data types which do need to be explicitly destroyed. * * 2) Virtual address space backed slab. Callers of the Solaris slab * expect it to work well for both small are very large allocations. * Because of memory fragmentation the Linux slab which is backed * by kmalloc'ed memory performs very badly when confronted with * large numbers of large allocations. Basing the slab on the * virtual address space removes the need for contiguous pages * and greatly improve performance for large allocations. * * For these reasons, the SPL has its own slab implementation with * the needed features. It is not as highly optimized as either the * Solaris or Linux slabs, but it should get me most of what is * needed until it can be optimized or obsoleted by another approach. * * One serious concern I do have about this method is the relatively * small virtual address space on 32bit arches. This will seriously * constrain the size of the slab caches and their performance. */ struct list_head spl_kmem_cache_list; /* List of caches */ struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */ taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */ static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj); static void * kv_alloc(spl_kmem_cache_t *skc, int size, int flags) { gfp_t lflags = kmem_flags_convert(flags); void *ptr; ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM); /* Resulting allocated memory will be page aligned */ ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); return (ptr); } static void kv_free(spl_kmem_cache_t *skc, void *ptr, int size) { ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); /* * The Linux direct reclaim path uses this out of band value to * determine if forward progress is being made. Normally this is * incremented by kmem_freepages() which is part of the various * Linux slab implementations. However, since we are using none * of that infrastructure we are responsible for incrementing it. */ if (current->reclaim_state) current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT; vfree(ptr); } /* * Required space for each aligned sks. */ static inline uint32_t spl_sks_size(spl_kmem_cache_t *skc) { return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t), skc->skc_obj_align, uint32_t)); } /* * Required space for each aligned object. */ static inline uint32_t spl_obj_size(spl_kmem_cache_t *skc) { uint32_t align = skc->skc_obj_align; return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) + P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t)); } uint64_t spl_kmem_cache_inuse(kmem_cache_t *cache) { return (cache->skc_obj_total); } EXPORT_SYMBOL(spl_kmem_cache_inuse); uint64_t spl_kmem_cache_entry_size(kmem_cache_t *cache) { return (cache->skc_obj_size); } EXPORT_SYMBOL(spl_kmem_cache_entry_size); /* * Lookup the spl_kmem_object_t for an object given that object. */ static inline spl_kmem_obj_t * spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj) { return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size, skc->skc_obj_align, uint32_t)); } /* * It's important that we pack the spl_kmem_obj_t structure and the * actual objects in to one large address space to minimize the number * of calls to the allocator. It is far better to do a few large * allocations and then subdivide it ourselves. Now which allocator * we use requires balancing a few trade offs. * * For small objects we use kmem_alloc() because as long as you are * only requesting a small number of pages (ideally just one) its cheap. * However, when you start requesting multiple pages with kmem_alloc() * it gets increasingly expensive since it requires contiguous pages. * For this reason we shift to vmem_alloc() for slabs of large objects * which removes the need for contiguous pages. We do not use * vmem_alloc() in all cases because there is significant locking * overhead in __get_vm_area_node(). This function takes a single * global lock when acquiring an available virtual address range which * serializes all vmem_alloc()'s for all slab caches. Using slightly * different allocation functions for small and large objects should * give us the best of both worlds. * * +------------------------+ * | spl_kmem_slab_t --+-+ | * | skc_obj_size <-+ | | * | spl_kmem_obj_t | | * | skc_obj_size <---+ | * | spl_kmem_obj_t | | * | ... v | * +------------------------+ */ static spl_kmem_slab_t * spl_slab_alloc(spl_kmem_cache_t *skc, int flags) { spl_kmem_slab_t *sks; void *base; uint32_t obj_size; base = kv_alloc(skc, skc->skc_slab_size, flags); if (base == NULL) return (NULL); sks = (spl_kmem_slab_t *)base; sks->sks_magic = SKS_MAGIC; sks->sks_objs = skc->skc_slab_objs; sks->sks_age = jiffies; sks->sks_cache = skc; INIT_LIST_HEAD(&sks->sks_list); INIT_LIST_HEAD(&sks->sks_free_list); sks->sks_ref = 0; obj_size = spl_obj_size(skc); for (int i = 0; i < sks->sks_objs; i++) { void *obj = base + spl_sks_size(skc) + (i * obj_size); ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj); sko->sko_addr = obj; sko->sko_magic = SKO_MAGIC; sko->sko_slab = sks; INIT_LIST_HEAD(&sko->sko_list); list_add_tail(&sko->sko_list, &sks->sks_free_list); } return (sks); } /* * Remove a slab from complete or partial list, it must be called with * the 'skc->skc_lock' held but the actual free must be performed * outside the lock to prevent deadlocking on vmem addresses. */ static void spl_slab_free(spl_kmem_slab_t *sks, struct list_head *sks_list, struct list_head *sko_list) { spl_kmem_cache_t *skc; ASSERT(sks->sks_magic == SKS_MAGIC); ASSERT(sks->sks_ref == 0); skc = sks->sks_cache; ASSERT(skc->skc_magic == SKC_MAGIC); /* * Update slab/objects counters in the cache, then remove the * slab from the skc->skc_partial_list. Finally add the slab * and all its objects in to the private work lists where the * destructors will be called and the memory freed to the system. */ skc->skc_obj_total -= sks->sks_objs; skc->skc_slab_total--; list_del(&sks->sks_list); list_add(&sks->sks_list, sks_list); list_splice_init(&sks->sks_free_list, sko_list); } /* * Reclaim empty slabs at the end of the partial list. */ static void spl_slab_reclaim(spl_kmem_cache_t *skc) { spl_kmem_slab_t *sks = NULL, *m = NULL; spl_kmem_obj_t *sko = NULL, *n = NULL; LIST_HEAD(sks_list); LIST_HEAD(sko_list); /* * Empty slabs and objects must be moved to a private list so they * can be safely freed outside the spin lock. All empty slabs are * at the end of skc->skc_partial_list, therefore once a non-empty * slab is found we can stop scanning. */ spin_lock(&skc->skc_lock); list_for_each_entry_safe_reverse(sks, m, &skc->skc_partial_list, sks_list) { if (sks->sks_ref > 0) break; spl_slab_free(sks, &sks_list, &sko_list); } spin_unlock(&skc->skc_lock); /* * The following two loops ensure all the object destructors are run, * and the slabs themselves are freed. This is all done outside the * skc->skc_lock since this allows the destructor to sleep, and * allows us to perform a conditional reschedule when a freeing a * large number of objects and slabs back to the system. */ list_for_each_entry_safe(sko, n, &sko_list, sko_list) { ASSERT(sko->sko_magic == SKO_MAGIC); } list_for_each_entry_safe(sks, m, &sks_list, sks_list) { ASSERT(sks->sks_magic == SKS_MAGIC); kv_free(skc, sks, skc->skc_slab_size); } } static spl_kmem_emergency_t * spl_emergency_search(struct rb_root *root, void *obj) { struct rb_node *node = root->rb_node; spl_kmem_emergency_t *ske; unsigned long address = (unsigned long)obj; while (node) { ske = container_of(node, spl_kmem_emergency_t, ske_node); if (address < ske->ske_obj) node = node->rb_left; else if (address > ske->ske_obj) node = node->rb_right; else return (ske); } return (NULL); } static int spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske) { struct rb_node **new = &(root->rb_node), *parent = NULL; spl_kmem_emergency_t *ske_tmp; unsigned long address = ske->ske_obj; while (*new) { ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node); parent = *new; if (address < ske_tmp->ske_obj) new = &((*new)->rb_left); else if (address > ske_tmp->ske_obj) new = &((*new)->rb_right); else return (0); } rb_link_node(&ske->ske_node, parent, new); rb_insert_color(&ske->ske_node, root); return (1); } /* * Allocate a single emergency object and track it in a red black tree. */ static int spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj) { gfp_t lflags = kmem_flags_convert(flags); spl_kmem_emergency_t *ske; int order = get_order(skc->skc_obj_size); int empty; /* Last chance use a partial slab if one now exists */ spin_lock(&skc->skc_lock); empty = list_empty(&skc->skc_partial_list); spin_unlock(&skc->skc_lock); if (!empty) return (-EEXIST); ske = kmalloc(sizeof (*ske), lflags); if (ske == NULL) return (-ENOMEM); ske->ske_obj = __get_free_pages(lflags, order); if (ske->ske_obj == 0) { kfree(ske); return (-ENOMEM); } spin_lock(&skc->skc_lock); empty = spl_emergency_insert(&skc->skc_emergency_tree, ske); if (likely(empty)) { skc->skc_obj_total++; skc->skc_obj_emergency++; if (skc->skc_obj_emergency > skc->skc_obj_emergency_max) skc->skc_obj_emergency_max = skc->skc_obj_emergency; } spin_unlock(&skc->skc_lock); if (unlikely(!empty)) { free_pages(ske->ske_obj, order); kfree(ske); return (-EINVAL); } *obj = (void *)ske->ske_obj; return (0); } /* * Locate the passed object in the red black tree and free it. */ static int spl_emergency_free(spl_kmem_cache_t *skc, void *obj) { spl_kmem_emergency_t *ske; int order = get_order(skc->skc_obj_size); spin_lock(&skc->skc_lock); ske = spl_emergency_search(&skc->skc_emergency_tree, obj); if (ske) { rb_erase(&ske->ske_node, &skc->skc_emergency_tree); skc->skc_obj_emergency--; skc->skc_obj_total--; } spin_unlock(&skc->skc_lock); if (ske == NULL) return (-ENOENT); free_pages(ske->ske_obj, order); kfree(ske); return (0); } /* * Release objects from the per-cpu magazine back to their slab. The flush * argument contains the max number of entries to remove from the magazine. */ static void spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush) { spin_lock(&skc->skc_lock); ASSERT(skc->skc_magic == SKC_MAGIC); ASSERT(skm->skm_magic == SKM_MAGIC); int count = MIN(flush, skm->skm_avail); for (int i = 0; i < count; i++) spl_cache_shrink(skc, skm->skm_objs[i]); skm->skm_avail -= count; memmove(skm->skm_objs, &(skm->skm_objs[count]), sizeof (void *) * skm->skm_avail); spin_unlock(&skc->skc_lock); } /* * Size a slab based on the size of each aligned object plus spl_kmem_obj_t. * When on-slab we want to target spl_kmem_cache_obj_per_slab. However, * for very small objects we may end up with more than this so as not * to waste space in the minimal allocation of a single page. */ static int spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size) { uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs; sks_size = spl_sks_size(skc); obj_size = spl_obj_size(skc); max_size = (spl_kmem_cache_max_size * 1024 * 1024); tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size); if (tgt_size <= max_size) { tgt_objs = (tgt_size - sks_size) / obj_size; } else { tgt_objs = (max_size - sks_size) / obj_size; tgt_size = (tgt_objs * obj_size) + sks_size; } if (tgt_objs == 0) return (-ENOSPC); *objs = tgt_objs; *size = tgt_size; return (0); } /* * Make a guess at reasonable per-cpu magazine size based on the size of * each object and the cost of caching N of them in each magazine. Long * term this should really adapt based on an observed usage heuristic. */ static int spl_magazine_size(spl_kmem_cache_t *skc) { uint32_t obj_size = spl_obj_size(skc); int size; if (spl_kmem_cache_magazine_size > 0) return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2)); /* Per-magazine sizes below assume a 4Kib page size */ if (obj_size > (PAGE_SIZE * 256)) size = 4; /* Minimum 4Mib per-magazine */ else if (obj_size > (PAGE_SIZE * 32)) size = 16; /* Minimum 2Mib per-magazine */ else if (obj_size > (PAGE_SIZE)) size = 64; /* Minimum 256Kib per-magazine */ else if (obj_size > (PAGE_SIZE / 4)) size = 128; /* Minimum 128Kib per-magazine */ else size = 256; return (size); } /* * Allocate a per-cpu magazine to associate with a specific core. */ static spl_kmem_magazine_t * spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu) { spl_kmem_magazine_t *skm; int size = sizeof (spl_kmem_magazine_t) + sizeof (void *) * skc->skc_mag_size; skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); if (skm) { skm->skm_magic = SKM_MAGIC; skm->skm_avail = 0; skm->skm_size = skc->skc_mag_size; skm->skm_refill = skc->skc_mag_refill; skm->skm_cache = skc; skm->skm_cpu = cpu; } return (skm); } /* * Free a per-cpu magazine associated with a specific core. */ static void spl_magazine_free(spl_kmem_magazine_t *skm) { ASSERT(skm->skm_magic == SKM_MAGIC); ASSERT(skm->skm_avail == 0); kfree(skm); } /* * Create all pre-cpu magazines of reasonable sizes. */ static int spl_magazine_create(spl_kmem_cache_t *skc) { int i = 0; ASSERT((skc->skc_flags & KMC_SLAB) == 0); skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) * num_possible_cpus(), kmem_flags_convert(KM_SLEEP)); skc->skc_mag_size = spl_magazine_size(skc); skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2; for_each_possible_cpu(i) { skc->skc_mag[i] = spl_magazine_alloc(skc, i); if (!skc->skc_mag[i]) { for (i--; i >= 0; i--) spl_magazine_free(skc->skc_mag[i]); kfree(skc->skc_mag); return (-ENOMEM); } } return (0); } /* * Destroy all pre-cpu magazines. */ static void spl_magazine_destroy(spl_kmem_cache_t *skc) { spl_kmem_magazine_t *skm; int i = 0; ASSERT((skc->skc_flags & KMC_SLAB) == 0); for_each_possible_cpu(i) { skm = skc->skc_mag[i]; spl_cache_flush(skc, skm, skm->skm_avail); spl_magazine_free(skm); } kfree(skc->skc_mag); } /* * Create a object cache based on the following arguments: * name cache name * size cache object size * align cache object alignment * ctor cache object constructor * dtor cache object destructor * reclaim cache object reclaim * priv cache private data for ctor/dtor/reclaim * vmp unused must be NULL * flags * KMC_KVMEM Force kvmem backed SPL cache * KMC_SLAB Force Linux slab backed cache * KMC_NODEBUG Disable debugging (unsupported) */ spl_kmem_cache_t * spl_kmem_cache_create(const char *name, size_t size, size_t align, spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim, void *priv, void *vmp, int flags) { gfp_t lflags = kmem_flags_convert(KM_SLEEP); spl_kmem_cache_t *skc; int rc; /* * Unsupported flags */ ASSERT(vmp == NULL); ASSERT(reclaim == NULL); might_sleep(); skc = kzalloc(sizeof (*skc), lflags); if (skc == NULL) return (NULL); skc->skc_magic = SKC_MAGIC; skc->skc_name_size = strlen(name) + 1; skc->skc_name = (char *)kmalloc(skc->skc_name_size, lflags); if (skc->skc_name == NULL) { kfree(skc); return (NULL); } strncpy(skc->skc_name, name, skc->skc_name_size); skc->skc_ctor = ctor; skc->skc_dtor = dtor; skc->skc_private = priv; skc->skc_vmp = vmp; skc->skc_linux_cache = NULL; skc->skc_flags = flags; skc->skc_obj_size = size; skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN; atomic_set(&skc->skc_ref, 0); INIT_LIST_HEAD(&skc->skc_list); INIT_LIST_HEAD(&skc->skc_complete_list); INIT_LIST_HEAD(&skc->skc_partial_list); skc->skc_emergency_tree = RB_ROOT; spin_lock_init(&skc->skc_lock); init_waitqueue_head(&skc->skc_waitq); skc->skc_slab_fail = 0; skc->skc_slab_create = 0; skc->skc_slab_destroy = 0; skc->skc_slab_total = 0; skc->skc_slab_alloc = 0; skc->skc_slab_max = 0; skc->skc_obj_total = 0; skc->skc_obj_alloc = 0; skc->skc_obj_max = 0; skc->skc_obj_deadlock = 0; skc->skc_obj_emergency = 0; skc->skc_obj_emergency_max = 0; rc = percpu_counter_init_common(&skc->skc_linux_alloc, 0, GFP_KERNEL); if (rc != 0) { kfree(skc); return (NULL); } /* * Verify the requested alignment restriction is sane. */ if (align) { VERIFY(ISP2(align)); VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN); VERIFY3U(align, <=, PAGE_SIZE); skc->skc_obj_align = align; } /* * When no specific type of slab is requested (kmem, vmem, or * linuxslab) then select a cache type based on the object size * and default tunables. */ if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) { if (spl_kmem_cache_slab_limit && size <= (size_t)spl_kmem_cache_slab_limit) { /* * Objects smaller than spl_kmem_cache_slab_limit can * use the Linux slab for better space-efficiency. */ skc->skc_flags |= KMC_SLAB; } else { /* * All other objects are considered large and are * placed on kvmem backed slabs. */ skc->skc_flags |= KMC_KVMEM; } } /* * Given the type of slab allocate the required resources. */ if (skc->skc_flags & KMC_KVMEM) { rc = spl_slab_size(skc, &skc->skc_slab_objs, &skc->skc_slab_size); if (rc) goto out; rc = spl_magazine_create(skc); if (rc) goto out; } else { unsigned long slabflags = 0; if (size > (SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE)) { rc = EINVAL; goto out; } #if defined(SLAB_USERCOPY) /* * Required for PAX-enabled kernels if the slab is to be * used for copying between user and kernel space. */ slabflags |= SLAB_USERCOPY; #endif #if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY) /* * Newer grsec patchset uses kmem_cache_create_usercopy() * instead of SLAB_USERCOPY flag */ skc->skc_linux_cache = kmem_cache_create_usercopy( skc->skc_name, size, align, slabflags, 0, size, NULL); #else skc->skc_linux_cache = kmem_cache_create( skc->skc_name, size, align, slabflags, NULL); #endif if (skc->skc_linux_cache == NULL) { rc = ENOMEM; goto out; } } down_write(&spl_kmem_cache_sem); list_add_tail(&skc->skc_list, &spl_kmem_cache_list); up_write(&spl_kmem_cache_sem); return (skc); out: kfree(skc->skc_name); percpu_counter_destroy(&skc->skc_linux_alloc); kfree(skc); return (NULL); } EXPORT_SYMBOL(spl_kmem_cache_create); /* * Register a move callback for cache defragmentation. * XXX: Unimplemented but harmless to stub out for now. */ void spl_kmem_cache_set_move(spl_kmem_cache_t *skc, kmem_cbrc_t (move)(void *, void *, size_t, void *)) { ASSERT(move != NULL); } EXPORT_SYMBOL(spl_kmem_cache_set_move); /* * Destroy a cache and all objects associated with the cache. */ void spl_kmem_cache_destroy(spl_kmem_cache_t *skc) { DECLARE_WAIT_QUEUE_HEAD(wq); taskqid_t id; ASSERT(skc->skc_magic == SKC_MAGIC); ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB)); down_write(&spl_kmem_cache_sem); list_del_init(&skc->skc_list); up_write(&spl_kmem_cache_sem); /* Cancel any and wait for any pending delayed tasks */ VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags)); spin_lock(&skc->skc_lock); id = skc->skc_taskqid; spin_unlock(&skc->skc_lock); taskq_cancel_id(spl_kmem_cache_taskq, id); /* * Wait until all current callers complete, this is mainly * to catch the case where a low memory situation triggers a * cache reaping action which races with this destroy. */ wait_event(wq, atomic_read(&skc->skc_ref) == 0); if (skc->skc_flags & KMC_KVMEM) { spl_magazine_destroy(skc); spl_slab_reclaim(skc); } else { ASSERT(skc->skc_flags & KMC_SLAB); kmem_cache_destroy(skc->skc_linux_cache); } spin_lock(&skc->skc_lock); /* * Validate there are no objects in use and free all the * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */ ASSERT3U(skc->skc_slab_alloc, ==, 0); ASSERT3U(skc->skc_obj_alloc, ==, 0); ASSERT3U(skc->skc_slab_total, ==, 0); ASSERT3U(skc->skc_obj_total, ==, 0); ASSERT3U(skc->skc_obj_emergency, ==, 0); ASSERT(list_empty(&skc->skc_complete_list)); ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0); percpu_counter_destroy(&skc->skc_linux_alloc); spin_unlock(&skc->skc_lock); kfree(skc->skc_name); kfree(skc); } EXPORT_SYMBOL(spl_kmem_cache_destroy); /* * Allocate an object from a slab attached to the cache. This is used to * repopulate the per-cpu magazine caches in batches when they run low. */ static void * spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks) { spl_kmem_obj_t *sko; ASSERT(skc->skc_magic == SKC_MAGIC); ASSERT(sks->sks_magic == SKS_MAGIC); sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list); ASSERT(sko->sko_magic == SKO_MAGIC); ASSERT(sko->sko_addr != NULL); /* Remove from sks_free_list */ list_del_init(&sko->sko_list); sks->sks_age = jiffies; sks->sks_ref++; skc->skc_obj_alloc++; /* Track max obj usage statistics */ if (skc->skc_obj_alloc > skc->skc_obj_max) skc->skc_obj_max = skc->skc_obj_alloc; /* Track max slab usage statistics */ if (sks->sks_ref == 1) { skc->skc_slab_alloc++; if (skc->skc_slab_alloc > skc->skc_slab_max) skc->skc_slab_max = skc->skc_slab_alloc; } return (sko->sko_addr); } /* * Generic slab allocation function to run by the global work queues. * It is responsible for allocating a new slab, linking it in to the list * of partial slabs, and then waking any waiters. */ static int __spl_cache_grow(spl_kmem_cache_t *skc, int flags) { spl_kmem_slab_t *sks; fstrans_cookie_t cookie = spl_fstrans_mark(); sks = spl_slab_alloc(skc, flags); spl_fstrans_unmark(cookie); spin_lock(&skc->skc_lock); if (sks) { skc->skc_slab_total++; skc->skc_obj_total += sks->sks_objs; list_add_tail(&sks->sks_list, &skc->skc_partial_list); smp_mb__before_atomic(); clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); smp_mb__after_atomic(); } spin_unlock(&skc->skc_lock); return (sks == NULL ? -ENOMEM : 0); } static void spl_cache_grow_work(void *data) { spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data; spl_kmem_cache_t *skc = ska->ska_cache; int error = __spl_cache_grow(skc, ska->ska_flags); atomic_dec(&skc->skc_ref); smp_mb__before_atomic(); clear_bit(KMC_BIT_GROWING, &skc->skc_flags); smp_mb__after_atomic(); if (error == 0) wake_up_all(&skc->skc_waitq); kfree(ska); } /* * Returns non-zero when a new slab should be available. */ static int spl_cache_grow_wait(spl_kmem_cache_t *skc) { return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags)); } /* * No available objects on any slabs, create a new slab. Note that this * functionality is disabled for KMC_SLAB caches which are backed by the * Linux slab. */ static int spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj) { int remaining, rc = 0; ASSERT0(flags & ~KM_PUBLIC_MASK); ASSERT(skc->skc_magic == SKC_MAGIC); ASSERT((skc->skc_flags & KMC_SLAB) == 0); might_sleep(); *obj = NULL; /* * Before allocating a new slab wait for any reaping to complete and * then return so the local magazine can be rechecked for new objects. */ if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) { rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING, TASK_UNINTERRUPTIBLE); return (rc ? rc : -EAGAIN); } /* * Note: It would be nice to reduce the overhead of context switch * and improve NUMA locality, by trying to allocate a new slab in the * current process context with KM_NOSLEEP flag. * * However, this can't be applied to vmem/kvmem due to a bug that * spl_vmalloc() doesn't honor gfp flags in page table allocation. */ /* * This is handled by dispatching a work request to the global work * queue. This allows us to asynchronously allocate a new slab while * retaining the ability to safely fall back to a smaller synchronous * allocations to ensure forward progress is always maintained. */ if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) { spl_kmem_alloc_t *ska; ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags)); if (ska == NULL) { clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags); smp_mb__after_atomic(); wake_up_all(&skc->skc_waitq); return (-ENOMEM); } atomic_inc(&skc->skc_ref); ska->ska_cache = skc; ska->ska_flags = flags; taskq_init_ent(&ska->ska_tqe); taskq_dispatch_ent(spl_kmem_cache_taskq, spl_cache_grow_work, ska, 0, &ska->ska_tqe); } /* * The goal here is to only detect the rare case where a virtual slab * allocation has deadlocked. We must be careful to minimize the use * of emergency objects which are more expensive to track. Therefore, * we set a very long timeout for the asynchronous allocation and if * the timeout is reached the cache is flagged as deadlocked. From * this point only new emergency objects will be allocated until the * asynchronous allocation completes and clears the deadlocked flag. */ if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) { rc = spl_emergency_alloc(skc, flags, obj); } else { remaining = wait_event_timeout(skc->skc_waitq, spl_cache_grow_wait(skc), HZ / 10); if (!remaining) { spin_lock(&skc->skc_lock); if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) { set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); skc->skc_obj_deadlock++; } spin_unlock(&skc->skc_lock); } rc = -ENOMEM; } return (rc); } /* * Refill a per-cpu magazine with objects from the slabs for this cache. * Ideally the magazine can be repopulated using existing objects which have * been released, however if we are unable to locate enough free objects new * slabs of objects will be created. On success NULL is returned, otherwise * the address of a single emergency object is returned for use by the caller. */ static void * spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags) { spl_kmem_slab_t *sks; int count = 0, rc, refill; void *obj = NULL; ASSERT(skc->skc_magic == SKC_MAGIC); ASSERT(skm->skm_magic == SKM_MAGIC); refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail); spin_lock(&skc->skc_lock); while (refill > 0) { /* No slabs available we may need to grow the cache */ if (list_empty(&skc->skc_partial_list)) { spin_unlock(&skc->skc_lock); local_irq_enable(); rc = spl_cache_grow(skc, flags, &obj); local_irq_disable(); /* Emergency object for immediate use by caller */ if (rc == 0 && obj != NULL) return (obj); if (rc) goto out; /* Rescheduled to different CPU skm is not local */ if (skm != skc->skc_mag[smp_processor_id()]) goto out; /* * Potentially rescheduled to the same CPU but * allocations may have occurred from this CPU while * we were sleeping so recalculate max refill. */ refill = MIN(refill, skm->skm_size - skm->skm_avail); spin_lock(&skc->skc_lock); continue; } /* Grab the next available slab */ sks = list_entry((&skc->skc_partial_list)->next, spl_kmem_slab_t, sks_list); ASSERT(sks->sks_magic == SKS_MAGIC); ASSERT(sks->sks_ref < sks->sks_objs); ASSERT(!list_empty(&sks->sks_free_list)); /* * Consume as many objects as needed to refill the requested * cache. We must also be careful not to overfill it. */ while (sks->sks_ref < sks->sks_objs && refill-- > 0 && ++count) { ASSERT(skm->skm_avail < skm->skm_size); ASSERT(count < skm->skm_size); skm->skm_objs[skm->skm_avail++] = spl_cache_obj(skc, sks); } /* Move slab to skc_complete_list when full */ if (sks->sks_ref == sks->sks_objs) { list_del(&sks->sks_list); list_add(&sks->sks_list, &skc->skc_complete_list); } } spin_unlock(&skc->skc_lock); out: return (NULL); } /* * Release an object back to the slab from which it came. */ static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj) { spl_kmem_slab_t *sks = NULL; spl_kmem_obj_t *sko = NULL; ASSERT(skc->skc_magic == SKC_MAGIC); sko = spl_sko_from_obj(skc, obj); ASSERT(sko->sko_magic == SKO_MAGIC); sks = sko->sko_slab; ASSERT(sks->sks_magic == SKS_MAGIC); ASSERT(sks->sks_cache == skc); list_add(&sko->sko_list, &sks->sks_free_list); sks->sks_age = jiffies; sks->sks_ref--; skc->skc_obj_alloc--; /* * Move slab to skc_partial_list when no longer full. Slabs * are added to the head to keep the partial list is quasi-full * sorted order. Fuller at the head, emptier at the tail. */ if (sks->sks_ref == (sks->sks_objs - 1)) { list_del(&sks->sks_list); list_add(&sks->sks_list, &skc->skc_partial_list); } /* * Move empty slabs to the end of the partial list so * they can be easily found and freed during reclamation. */ if (sks->sks_ref == 0) { list_del(&sks->sks_list); list_add_tail(&sks->sks_list, &skc->skc_partial_list); skc->skc_slab_alloc--; } } /* * Allocate an object from the per-cpu magazine, or if the magazine * is empty directly allocate from a slab and repopulate the magazine. */ void * spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags) { spl_kmem_magazine_t *skm; void *obj = NULL; ASSERT0(flags & ~KM_PUBLIC_MASK); ASSERT(skc->skc_magic == SKC_MAGIC); ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); /* * Allocate directly from a Linux slab. All optimizations are left * to the underlying cache we only need to guarantee that KM_SLEEP * callers will never fail. */ if (skc->skc_flags & KMC_SLAB) { struct kmem_cache *slc = skc->skc_linux_cache; do { obj = kmem_cache_alloc(slc, kmem_flags_convert(flags)); } while ((obj == NULL) && !(flags & KM_NOSLEEP)); if (obj != NULL) { /* * Even though we leave everything up to the * underlying cache we still keep track of * how many objects we've allocated in it for * better debuggability. */ percpu_counter_inc(&skc->skc_linux_alloc); } goto ret; } local_irq_disable(); restart: /* * Safe to update per-cpu structure without lock, but * in the restart case we must be careful to reacquire * the local magazine since this may have changed * when we need to grow the cache. */ skm = skc->skc_mag[smp_processor_id()]; ASSERT(skm->skm_magic == SKM_MAGIC); if (likely(skm->skm_avail)) { /* Object available in CPU cache, use it */ obj = skm->skm_objs[--skm->skm_avail]; } else { obj = spl_cache_refill(skc, skm, flags); if ((obj == NULL) && !(flags & KM_NOSLEEP)) goto restart; local_irq_enable(); goto ret; } local_irq_enable(); ASSERT(obj); ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); ret: /* Pre-emptively migrate object to CPU L1 cache */ if (obj) { if (obj && skc->skc_ctor) skc->skc_ctor(obj, skc->skc_private, flags); else prefetchw(obj); } return (obj); } EXPORT_SYMBOL(spl_kmem_cache_alloc); /* * Free an object back to the local per-cpu magazine, there is no * guarantee that this is the same magazine the object was originally * allocated from. We may need to flush entire from the magazine * back to the slabs to make space. */ void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) { spl_kmem_magazine_t *skm; unsigned long flags; int do_reclaim = 0; int do_emergency = 0; ASSERT(skc->skc_magic == SKC_MAGIC); ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); /* * Run the destructor */ if (skc->skc_dtor) skc->skc_dtor(obj, skc->skc_private); /* * Free the object from the Linux underlying Linux slab. */ if (skc->skc_flags & KMC_SLAB) { kmem_cache_free(skc->skc_linux_cache, obj); percpu_counter_dec(&skc->skc_linux_alloc); return; } /* * While a cache has outstanding emergency objects all freed objects * must be checked. However, since emergency objects will never use * a virtual address these objects can be safely excluded as an * optimization. */ if (!is_vmalloc_addr(obj)) { spin_lock(&skc->skc_lock); do_emergency = (skc->skc_obj_emergency > 0); spin_unlock(&skc->skc_lock); if (do_emergency && (spl_emergency_free(skc, obj) == 0)) return; } local_irq_save(flags); /* * Safe to update per-cpu structure without lock, but * no remote memory allocation tracking is being performed * it is entirely possible to allocate an object from one * CPU cache and return it to another. */ skm = skc->skc_mag[smp_processor_id()]; ASSERT(skm->skm_magic == SKM_MAGIC); /* * Per-CPU cache full, flush it to make space for this object, * this may result in an empty slab which can be reclaimed once * interrupts are re-enabled. */ if (unlikely(skm->skm_avail >= skm->skm_size)) { spl_cache_flush(skc, skm, skm->skm_refill); do_reclaim = 1; } /* Available space in cache, use it */ skm->skm_objs[skm->skm_avail++] = obj; local_irq_restore(flags); if (do_reclaim) spl_slab_reclaim(skc); } EXPORT_SYMBOL(spl_kmem_cache_free); /* * Depending on how many and which objects are released it may simply * repopulate the local magazine which will then need to age-out. Objects * which cannot fit in the magazine will be released back to their slabs * which will also need to age out before being released. This is all just * best effort and we do not want to thrash creating and destroying slabs. */ void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc) { ASSERT(skc->skc_magic == SKC_MAGIC); ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); if (skc->skc_flags & KMC_SLAB) return; atomic_inc(&skc->skc_ref); /* * Prevent concurrent cache reaping when contended. */ if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) goto out; /* Reclaim from the magazine and free all now empty slabs. */ unsigned long irq_flags; local_irq_save(irq_flags); spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()]; spl_cache_flush(skc, skm, skm->skm_avail); local_irq_restore(irq_flags); spl_slab_reclaim(skc); clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags); smp_mb__after_atomic(); wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING); out: atomic_dec(&skc->skc_ref); } EXPORT_SYMBOL(spl_kmem_cache_reap_now); /* * This is stubbed out for code consistency with other platforms. There * is existing logic to prevent concurrent reaping so while this is ugly * it should do no harm. */ int spl_kmem_cache_reap_active(void) { return (0); } EXPORT_SYMBOL(spl_kmem_cache_reap_active); /* * Reap all free slabs from all registered caches. */ void spl_kmem_reap(void) { spl_kmem_cache_t *skc = NULL; down_read(&spl_kmem_cache_sem); list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) { spl_kmem_cache_reap_now(skc); } up_read(&spl_kmem_cache_sem); } EXPORT_SYMBOL(spl_kmem_reap); int spl_kmem_cache_init(void) { init_rwsem(&spl_kmem_cache_sem); INIT_LIST_HEAD(&spl_kmem_cache_list); spl_kmem_cache_taskq = taskq_create("spl_kmem_cache", spl_kmem_cache_kmem_threads, maxclsyspri, spl_kmem_cache_kmem_threads * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); + if (spl_kmem_cache_taskq == NULL) + return (-ENOMEM); + return (0); } void spl_kmem_cache_fini(void) { taskq_destroy(spl_kmem_cache_taskq); } diff --git a/module/os/linux/spl/spl-taskq.c b/module/os/linux/spl/spl-taskq.c index 0aab148975aa..3b0c29606c2e 100644 --- a/module/os/linux/spl/spl-taskq.c +++ b/module/os/linux/spl/spl-taskq.c @@ -1,1433 +1,1433 @@ /* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Brian Behlendorf . * UCRL-CODE-235197 * * This file is part of the SPL, Solaris Porting Layer. * * The SPL is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * The SPL is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . * * Solaris Porting Layer (SPL) Task Queue Implementation. */ #include #include #include #include #include #ifdef HAVE_CPU_HOTPLUG #include #endif static int spl_taskq_thread_bind = 0; module_param(spl_taskq_thread_bind, int, 0644); MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); static int spl_taskq_thread_dynamic = 1; module_param(spl_taskq_thread_dynamic, int, 0444); MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads"); static int spl_taskq_thread_priority = 1; module_param(spl_taskq_thread_priority, int, 0644); MODULE_PARM_DESC(spl_taskq_thread_priority, "Allow non-default priority for taskq threads"); static int spl_taskq_thread_sequential = 4; module_param(spl_taskq_thread_sequential, int, 0644); MODULE_PARM_DESC(spl_taskq_thread_sequential, "Create new taskq threads after N sequential tasks"); /* * Global system-wide dynamic task queue available for all consumers. This * taskq is not intended for long-running tasks; instead, a dedicated taskq * should be created. */ taskq_t *system_taskq; EXPORT_SYMBOL(system_taskq); /* Global dynamic task queue for long delay */ taskq_t *system_delay_taskq; EXPORT_SYMBOL(system_delay_taskq); /* Private dedicated taskq for creating new taskq threads on demand. */ static taskq_t *dynamic_taskq; static taskq_thread_t *taskq_thread_create(taskq_t *); #ifdef HAVE_CPU_HOTPLUG /* Multi-callback id for cpu hotplugging. */ static int spl_taskq_cpuhp_state; #endif /* List of all taskqs */ LIST_HEAD(tq_list); struct rw_semaphore tq_list_sem; static uint_t taskq_tsd; static int task_km_flags(uint_t flags) { if (flags & TQ_NOSLEEP) return (KM_NOSLEEP); if (flags & TQ_PUSHPAGE) return (KM_PUSHPAGE); return (KM_SLEEP); } /* * taskq_find_by_name - Find the largest instance number of a named taskq. */ static int taskq_find_by_name(const char *name) { struct list_head *tql = NULL; taskq_t *tq; list_for_each_prev(tql, &tq_list) { tq = list_entry(tql, taskq_t, tq_taskqs); if (strcmp(name, tq->tq_name) == 0) return (tq->tq_instance); } return (-1); } /* * NOTE: Must be called with tq->tq_lock held, returns a list_t which * is not attached to the free, work, or pending taskq lists. */ static taskq_ent_t * task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags) { taskq_ent_t *t; int count = 0; ASSERT(tq); retry: /* Acquire taskq_ent_t's from free list if available */ if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL)); ASSERT(!timer_pending(&t->tqent_timer)); list_del_init(&t->tqent_list); return (t); } /* Free list is empty and memory allocations are prohibited */ if (flags & TQ_NOALLOC) return (NULL); /* Hit maximum taskq_ent_t pool size */ if (tq->tq_nalloc >= tq->tq_maxalloc) { if (flags & TQ_NOSLEEP) return (NULL); /* * Sleep periodically polling the free list for an available * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed * but we cannot block forever waiting for an taskq_ent_t to * show up in the free list, otherwise a deadlock can happen. * * Therefore, we need to allocate a new task even if the number * of allocated tasks is above tq->tq_maxalloc, but we still * end up delaying the task allocation by one second, thereby * throttling the task dispatch rate. */ spin_unlock_irqrestore(&tq->tq_lock, *irqflags); schedule_timeout(HZ / 100); spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class); if (count < 100) { count++; goto retry; } } spin_unlock_irqrestore(&tq->tq_lock, *irqflags); t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags)); spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class); if (t) { taskq_init_ent(t); tq->tq_nalloc++; } return (t); } /* * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t * to already be removed from the free, work, or pending taskq lists. */ static void task_free(taskq_t *tq, taskq_ent_t *t) { ASSERT(tq); ASSERT(t); ASSERT(list_empty(&t->tqent_list)); ASSERT(!timer_pending(&t->tqent_timer)); kmem_free(t, sizeof (taskq_ent_t)); tq->tq_nalloc--; } /* * NOTE: Must be called with tq->tq_lock held, either destroys the * taskq_ent_t if too many exist or moves it to the free list for later use. */ static void task_done(taskq_t *tq, taskq_ent_t *t) { ASSERT(tq); ASSERT(t); /* Wake tasks blocked in taskq_wait_id() */ wake_up_all(&t->tqent_waitq); list_del_init(&t->tqent_list); if (tq->tq_nalloc <= tq->tq_minalloc) { t->tqent_id = TASKQID_INVALID; t->tqent_func = NULL; t->tqent_arg = NULL; t->tqent_flags = 0; list_add_tail(&t->tqent_list, &tq->tq_free_list); } else { task_free(tq, t); } } /* * When a delayed task timer expires remove it from the delay list and * add it to the priority list in order for immediate processing. */ static void task_expire_impl(taskq_ent_t *t) { taskq_ent_t *w; taskq_t *tq = t->tqent_taskq; struct list_head *l = NULL; unsigned long flags; spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); if (t->tqent_flags & TQENT_FLAG_CANCEL) { ASSERT(list_empty(&t->tqent_list)); spin_unlock_irqrestore(&tq->tq_lock, flags); return; } t->tqent_birth = jiffies; DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); /* * The priority list must be maintained in strict task id order * from lowest to highest for lowest_id to be easily calculable. */ list_del(&t->tqent_list); list_for_each_prev(l, &tq->tq_prio_list) { w = list_entry(l, taskq_ent_t, tqent_list); if (w->tqent_id < t->tqent_id) { list_add(&t->tqent_list, l); break; } } if (l == &tq->tq_prio_list) list_add(&t->tqent_list, &tq->tq_prio_list); spin_unlock_irqrestore(&tq->tq_lock, flags); wake_up(&tq->tq_work_waitq); } static void task_expire(spl_timer_list_t tl) { struct timer_list *tmr = (struct timer_list *)tl; taskq_ent_t *t = from_timer(t, tmr, tqent_timer); task_expire_impl(t); } /* * Returns the lowest incomplete taskqid_t. The taskqid_t may * be queued on the pending list, on the priority list, on the * delay list, or on the work list currently being handled, but * it is not 100% complete yet. */ static taskqid_t taskq_lowest_id(taskq_t *tq) { taskqid_t lowest_id = tq->tq_next_id; taskq_ent_t *t; taskq_thread_t *tqt; if (!list_empty(&tq->tq_pend_list)) { t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); lowest_id = MIN(lowest_id, t->tqent_id); } if (!list_empty(&tq->tq_prio_list)) { t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list); lowest_id = MIN(lowest_id, t->tqent_id); } if (!list_empty(&tq->tq_delay_list)) { t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list); lowest_id = MIN(lowest_id, t->tqent_id); } if (!list_empty(&tq->tq_active_list)) { tqt = list_entry(tq->tq_active_list.next, taskq_thread_t, tqt_active_list); ASSERT(tqt->tqt_id != TASKQID_INVALID); lowest_id = MIN(lowest_id, tqt->tqt_id); } return (lowest_id); } /* * Insert a task into a list keeping the list sorted by increasing taskqid. */ static void taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) { taskq_thread_t *w; struct list_head *l = NULL; ASSERT(tq); ASSERT(tqt); list_for_each_prev(l, &tq->tq_active_list) { w = list_entry(l, taskq_thread_t, tqt_active_list); if (w->tqt_id < tqt->tqt_id) { list_add(&tqt->tqt_active_list, l); break; } } if (l == &tq->tq_active_list) list_add(&tqt->tqt_active_list, &tq->tq_active_list); } /* * Find and return a task from the given list if it exists. The list * must be in lowest to highest task id order. */ static taskq_ent_t * taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id) { struct list_head *l = NULL; taskq_ent_t *t; list_for_each(l, lh) { t = list_entry(l, taskq_ent_t, tqent_list); if (t->tqent_id == id) return (t); if (t->tqent_id > id) break; } return (NULL); } /* * Find an already dispatched task given the task id regardless of what * state it is in. If a task is still pending it will be returned. * If a task is executing, then -EBUSY will be returned instead. * If the task has already been run then NULL is returned. */ static taskq_ent_t * taskq_find(taskq_t *tq, taskqid_t id) { taskq_thread_t *tqt; struct list_head *l = NULL; taskq_ent_t *t; t = taskq_find_list(tq, &tq->tq_delay_list, id); if (t) return (t); t = taskq_find_list(tq, &tq->tq_prio_list, id); if (t) return (t); t = taskq_find_list(tq, &tq->tq_pend_list, id); if (t) return (t); list_for_each(l, &tq->tq_active_list) { tqt = list_entry(l, taskq_thread_t, tqt_active_list); if (tqt->tqt_id == id) { /* * Instead of returning tqt_task, we just return a non * NULL value to prevent misuse, since tqt_task only * has two valid fields. */ return (ERR_PTR(-EBUSY)); } } return (NULL); } /* * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and * taskq_wait() functions below. * * Taskq waiting is accomplished by tracking the lowest outstanding task * id and the next available task id. As tasks are dispatched they are * added to the tail of the pending, priority, or delay lists. As worker * threads become available the tasks are removed from the heads of these * lists and linked to the worker threads. This ensures the lists are * kept sorted by lowest to highest task id. * * Therefore the lowest outstanding task id can be quickly determined by * checking the head item from all of these lists. This value is stored * with the taskq as the lowest id. It only needs to be recalculated when * either the task with the current lowest id completes or is canceled. * * By blocking until the lowest task id exceeds the passed task id the * taskq_wait_outstanding() function can be easily implemented. Similarly, * by blocking until the lowest task id matches the next task id taskq_wait() * can be implemented. * * Callers should be aware that when there are multiple worked threads it * is possible for larger task ids to complete before smaller ones. Also * when the taskq contains delay tasks with small task ids callers may * block for a considerable length of time waiting for them to expire and * execute. */ static int taskq_wait_id_check(taskq_t *tq, taskqid_t id) { int rc; unsigned long flags; spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); rc = (taskq_find(tq, id) == NULL); spin_unlock_irqrestore(&tq->tq_lock, flags); return (rc); } /* * The taskq_wait_id() function blocks until the passed task id completes. * This does not guarantee that all lower task ids have completed. */ void taskq_wait_id(taskq_t *tq, taskqid_t id) { wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id)); } EXPORT_SYMBOL(taskq_wait_id); static int taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id) { int rc; unsigned long flags; spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); rc = (id < tq->tq_lowest_id); spin_unlock_irqrestore(&tq->tq_lock, flags); return (rc); } /* * The taskq_wait_outstanding() function will block until all tasks with a * lower taskqid than the passed 'id' have been completed. Note that all * task id's are assigned monotonically at dispatch time. Zero may be * passed for the id to indicate all tasks dispatch up to this point, * but not after, should be waited for. */ void taskq_wait_outstanding(taskq_t *tq, taskqid_t id) { id = id ? id : tq->tq_next_id - 1; wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id)); } EXPORT_SYMBOL(taskq_wait_outstanding); static int taskq_wait_check(taskq_t *tq) { int rc; unsigned long flags; spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); rc = (tq->tq_lowest_id == tq->tq_next_id); spin_unlock_irqrestore(&tq->tq_lock, flags); return (rc); } /* * The taskq_wait() function will block until the taskq is empty. * This means that if a taskq re-dispatches work to itself taskq_wait() * callers will block indefinitely. */ void taskq_wait(taskq_t *tq) { wait_event(tq->tq_wait_waitq, taskq_wait_check(tq)); } EXPORT_SYMBOL(taskq_wait); int taskq_member(taskq_t *tq, kthread_t *t) { return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t)); } EXPORT_SYMBOL(taskq_member); taskq_t * taskq_of_curthread(void) { return (tsd_get(taskq_tsd)); } EXPORT_SYMBOL(taskq_of_curthread); /* * Cancel an already dispatched task given the task id. Still pending tasks * will be immediately canceled, and if the task is active the function will * block until it completes. Preallocated tasks which are canceled must be * freed by the caller. */ int taskq_cancel_id(taskq_t *tq, taskqid_t id) { taskq_ent_t *t; int rc = ENOENT; unsigned long flags; ASSERT(tq); spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); t = taskq_find(tq, id); if (t && t != ERR_PTR(-EBUSY)) { list_del_init(&t->tqent_list); t->tqent_flags |= TQENT_FLAG_CANCEL; /* * When canceling the lowest outstanding task id we * must recalculate the new lowest outstanding id. */ if (tq->tq_lowest_id == t->tqent_id) { tq->tq_lowest_id = taskq_lowest_id(tq); ASSERT3S(tq->tq_lowest_id, >, t->tqent_id); } /* * The task_expire() function takes the tq->tq_lock so drop * drop the lock before synchronously cancelling the timer. */ if (timer_pending(&t->tqent_timer)) { spin_unlock_irqrestore(&tq->tq_lock, flags); del_timer_sync(&t->tqent_timer); spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); } if (!(t->tqent_flags & TQENT_FLAG_PREALLOC)) task_done(tq, t); rc = 0; } spin_unlock_irqrestore(&tq->tq_lock, flags); if (t == ERR_PTR(-EBUSY)) { taskq_wait_id(tq, id); rc = EBUSY; } return (rc); } EXPORT_SYMBOL(taskq_cancel_id); static int taskq_thread_spawn(taskq_t *tq); taskqid_t taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) { taskq_ent_t *t; taskqid_t rc = TASKQID_INVALID; unsigned long irqflags; ASSERT(tq); ASSERT(func); spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); /* Taskq being destroyed and all tasks drained */ if (!(tq->tq_flags & TASKQ_ACTIVE)) goto out; /* Do not queue the task unless there is idle thread for it */ ASSERT(tq->tq_nactive <= tq->tq_nthreads); if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { /* Dynamic taskq may be able to spawn another thread */ if (!(tq->tq_flags & TASKQ_DYNAMIC) || taskq_thread_spawn(tq) == 0) goto out; } if ((t = task_alloc(tq, flags, &irqflags)) == NULL) goto out; spin_lock(&t->tqent_lock); /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */ if (flags & TQ_NOQUEUE) list_add(&t->tqent_list, &tq->tq_prio_list); /* Queue to the priority list instead of the pending list */ else if (flags & TQ_FRONT) list_add_tail(&t->tqent_list, &tq->tq_prio_list); else list_add_tail(&t->tqent_list, &tq->tq_pend_list); t->tqent_id = rc = tq->tq_next_id; tq->tq_next_id++; t->tqent_func = func; t->tqent_arg = arg; t->tqent_taskq = tq; t->tqent_timer.function = NULL; t->tqent_timer.expires = 0; t->tqent_birth = jiffies; DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); spin_unlock(&t->tqent_lock); wake_up(&tq->tq_work_waitq); out: /* Spawn additional taskq threads if required. */ if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads) (void) taskq_thread_spawn(tq); spin_unlock_irqrestore(&tq->tq_lock, irqflags); return (rc); } EXPORT_SYMBOL(taskq_dispatch); taskqid_t taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, uint_t flags, clock_t expire_time) { taskqid_t rc = TASKQID_INVALID; taskq_ent_t *t; unsigned long irqflags; ASSERT(tq); ASSERT(func); spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); /* Taskq being destroyed and all tasks drained */ if (!(tq->tq_flags & TASKQ_ACTIVE)) goto out; if ((t = task_alloc(tq, flags, &irqflags)) == NULL) goto out; spin_lock(&t->tqent_lock); /* Queue to the delay list for subsequent execution */ list_add_tail(&t->tqent_list, &tq->tq_delay_list); t->tqent_id = rc = tq->tq_next_id; tq->tq_next_id++; t->tqent_func = func; t->tqent_arg = arg; t->tqent_taskq = tq; t->tqent_timer.function = task_expire; t->tqent_timer.expires = (unsigned long)expire_time; add_timer(&t->tqent_timer); ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); spin_unlock(&t->tqent_lock); out: /* Spawn additional taskq threads if required. */ if (tq->tq_nactive == tq->tq_nthreads) (void) taskq_thread_spawn(tq); spin_unlock_irqrestore(&tq->tq_lock, irqflags); return (rc); } EXPORT_SYMBOL(taskq_dispatch_delay); void taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, taskq_ent_t *t) { unsigned long irqflags; ASSERT(tq); ASSERT(func); spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); /* Taskq being destroyed and all tasks drained */ if (!(tq->tq_flags & TASKQ_ACTIVE)) { t->tqent_id = TASKQID_INVALID; goto out; } if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { /* Dynamic taskq may be able to spawn another thread */ if (!(tq->tq_flags & TASKQ_DYNAMIC) || taskq_thread_spawn(tq) == 0) goto out2; flags |= TQ_FRONT; } spin_lock(&t->tqent_lock); /* * Make sure the entry is not on some other taskq; it is important to * ASSERT() under lock */ ASSERT(taskq_empty_ent(t)); /* * Mark it as a prealloc'd task. This is important * to ensure that we don't free it later. */ t->tqent_flags |= TQENT_FLAG_PREALLOC; /* Queue to the priority list instead of the pending list */ if (flags & TQ_FRONT) list_add_tail(&t->tqent_list, &tq->tq_prio_list); else list_add_tail(&t->tqent_list, &tq->tq_pend_list); t->tqent_id = tq->tq_next_id; tq->tq_next_id++; t->tqent_func = func; t->tqent_arg = arg; t->tqent_taskq = tq; t->tqent_birth = jiffies; DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); spin_unlock(&t->tqent_lock); wake_up(&tq->tq_work_waitq); out: /* Spawn additional taskq threads if required. */ if (tq->tq_nactive == tq->tq_nthreads) (void) taskq_thread_spawn(tq); out2: spin_unlock_irqrestore(&tq->tq_lock, irqflags); } EXPORT_SYMBOL(taskq_dispatch_ent); int taskq_empty_ent(taskq_ent_t *t) { return (list_empty(&t->tqent_list)); } EXPORT_SYMBOL(taskq_empty_ent); void taskq_init_ent(taskq_ent_t *t) { spin_lock_init(&t->tqent_lock); init_waitqueue_head(&t->tqent_waitq); timer_setup(&t->tqent_timer, NULL, 0); INIT_LIST_HEAD(&t->tqent_list); t->tqent_id = 0; t->tqent_func = NULL; t->tqent_arg = NULL; t->tqent_flags = 0; t->tqent_taskq = NULL; } EXPORT_SYMBOL(taskq_init_ent); /* * Return the next pending task, preference is given to tasks on the * priority list which were dispatched with TQ_FRONT. */ static taskq_ent_t * taskq_next_ent(taskq_t *tq) { struct list_head *list; if (!list_empty(&tq->tq_prio_list)) list = &tq->tq_prio_list; else if (!list_empty(&tq->tq_pend_list)) list = &tq->tq_pend_list; else return (NULL); return (list_entry(list->next, taskq_ent_t, tqent_list)); } /* * Spawns a new thread for the specified taskq. */ static void taskq_thread_spawn_task(void *arg) { taskq_t *tq = (taskq_t *)arg; unsigned long flags; if (taskq_thread_create(tq) == NULL) { /* restore spawning count if failed */ spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); tq->tq_nspawn--; spin_unlock_irqrestore(&tq->tq_lock, flags); } } /* * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current * number of threads is insufficient to handle the pending tasks. These * new threads must be created by the dedicated dynamic_taskq to avoid * deadlocks between thread creation and memory reclaim. The system_taskq * which is also a dynamic taskq cannot be safely used for this. */ static int taskq_thread_spawn(taskq_t *tq) { int spawning = 0; if (!(tq->tq_flags & TASKQ_DYNAMIC)) return (0); if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) && (tq->tq_flags & TASKQ_ACTIVE)) { spawning = (++tq->tq_nspawn); taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task, tq, TQ_NOSLEEP); } return (spawning); } /* * Threads in a dynamic taskq should only exit once it has been completely * drained and no other threads are actively servicing tasks. This prevents * threads from being created and destroyed more than is required. * * The first thread is the thread list is treated as the primary thread. * There is nothing special about the primary thread but in order to avoid * all the taskq pids from changing we opt to make it long running. */ static int taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt) { if (!(tq->tq_flags & TASKQ_DYNAMIC)) return (0); if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t, tqt_thread_list) == tqt) return (0); return ((tq->tq_nspawn == 0) && /* No threads are being spawned */ (tq->tq_nactive == 0) && /* No threads are handling tasks */ (tq->tq_nthreads > 1) && /* More than 1 thread is running */ (!taskq_next_ent(tq)) && /* There are no pending tasks */ (spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */ } static int taskq_thread(void *args) { DECLARE_WAITQUEUE(wait, current); sigset_t blocked; taskq_thread_t *tqt = args; taskq_t *tq; taskq_ent_t *t; int seq_tasks = 0; unsigned long flags; taskq_ent_t dup_task = {}; ASSERT(tqt); ASSERT(tqt->tqt_tq); tq = tqt->tqt_tq; current->flags |= PF_NOFREEZE; (void) spl_fstrans_mark(); sigfillset(&blocked); sigprocmask(SIG_BLOCK, &blocked, NULL); flush_signals(current); tsd_set(taskq_tsd, tq); spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); /* * If we are dynamically spawned, decrease spawning count. Note that * we could be created during taskq_create, in which case we shouldn't * do the decrement. But it's fine because taskq_create will reset * tq_nspawn later. */ if (tq->tq_flags & TASKQ_DYNAMIC) tq->tq_nspawn--; /* Immediately exit if more threads than allowed were created. */ if (tq->tq_nthreads >= tq->tq_maxthreads) goto error; tq->tq_nthreads++; list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list); wake_up(&tq->tq_wait_waitq); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { if (list_empty(&tq->tq_pend_list) && list_empty(&tq->tq_prio_list)) { if (taskq_thread_should_stop(tq, tqt)) { wake_up_all(&tq->tq_wait_waitq); break; } add_wait_queue_exclusive(&tq->tq_work_waitq, &wait); spin_unlock_irqrestore(&tq->tq_lock, flags); schedule(); seq_tasks = 0; spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); remove_wait_queue(&tq->tq_work_waitq, &wait); } else { __set_current_state(TASK_RUNNING); } if ((t = taskq_next_ent(tq)) != NULL) { list_del_init(&t->tqent_list); /* * A TQENT_FLAG_PREALLOC task may be reused or freed * during the task function call. Store tqent_id and * tqent_flags here. * * Also use an on stack taskq_ent_t for tqt_task * assignment in this case; we want to make sure * to duplicate all fields, so the values are * correct when it's accessed via DTRACE_PROBE*. */ tqt->tqt_id = t->tqent_id; tqt->tqt_flags = t->tqent_flags; if (t->tqent_flags & TQENT_FLAG_PREALLOC) { dup_task = *t; t = &dup_task; } tqt->tqt_task = t; taskq_insert_in_order(tq, tqt); tq->tq_nactive++; spin_unlock_irqrestore(&tq->tq_lock, flags); DTRACE_PROBE1(taskq_ent__start, taskq_ent_t *, t); /* Perform the requested task */ t->tqent_func(t->tqent_arg); DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t); spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); tq->tq_nactive--; list_del_init(&tqt->tqt_active_list); tqt->tqt_task = NULL; /* For prealloc'd tasks, we don't free anything. */ if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) task_done(tq, t); /* * When the current lowest outstanding taskqid is * done calculate the new lowest outstanding id */ if (tq->tq_lowest_id == tqt->tqt_id) { tq->tq_lowest_id = taskq_lowest_id(tq); ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); } /* Spawn additional taskq threads if required. */ if ((++seq_tasks) > spl_taskq_thread_sequential && taskq_thread_spawn(tq)) seq_tasks = 0; tqt->tqt_id = TASKQID_INVALID; tqt->tqt_flags = 0; wake_up_all(&tq->tq_wait_waitq); } else { if (taskq_thread_should_stop(tq, tqt)) break; } set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); tq->tq_nthreads--; list_del_init(&tqt->tqt_thread_list); error: kmem_free(tqt, sizeof (taskq_thread_t)); spin_unlock_irqrestore(&tq->tq_lock, flags); tsd_set(taskq_tsd, NULL); thread_exit(); return (0); } static taskq_thread_t * taskq_thread_create(taskq_t *tq) { static int last_used_cpu = 0; taskq_thread_t *tqt; tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE); INIT_LIST_HEAD(&tqt->tqt_thread_list); INIT_LIST_HEAD(&tqt->tqt_active_list); tqt->tqt_tq = tq; tqt->tqt_id = TASKQID_INVALID; tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt, "%s", tq->tq_name); if (tqt->tqt_thread == NULL) { kmem_free(tqt, sizeof (taskq_thread_t)); return (NULL); } if (spl_taskq_thread_bind) { last_used_cpu = (last_used_cpu + 1) % num_online_cpus(); kthread_bind(tqt->tqt_thread, last_used_cpu); } if (spl_taskq_thread_priority) set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri)); wake_up_process(tqt->tqt_thread); return (tqt); } taskq_t * taskq_create(const char *name, int threads_arg, pri_t pri, int minalloc, int maxalloc, uint_t flags) { taskq_t *tq; taskq_thread_t *tqt; int count = 0, rc = 0, i; unsigned long irqflags; int nthreads = threads_arg; ASSERT(name != NULL); ASSERT(minalloc >= 0); ASSERT(maxalloc <= INT_MAX); ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */ /* Scale the number of threads using nthreads as a percentage */ if (flags & TASKQ_THREADS_CPU_PCT) { ASSERT(nthreads <= 100); ASSERT(nthreads >= 0); nthreads = MIN(threads_arg, 100); nthreads = MAX(nthreads, 0); nthreads = MAX((num_online_cpus() * nthreads) /100, 1); } tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE); if (tq == NULL) return (NULL); tq->tq_hp_support = B_FALSE; #ifdef HAVE_CPU_HOTPLUG if (flags & TASKQ_THREADS_CPU_PCT) { tq->tq_hp_support = B_TRUE; if (cpuhp_state_add_instance_nocalls(spl_taskq_cpuhp_state, &tq->tq_hp_cb_node) != 0) { kmem_free(tq, sizeof (*tq)); return (NULL); } } #endif spin_lock_init(&tq->tq_lock); INIT_LIST_HEAD(&tq->tq_thread_list); INIT_LIST_HEAD(&tq->tq_active_list); tq->tq_name = kmem_strdup(name); tq->tq_nactive = 0; tq->tq_nthreads = 0; tq->tq_nspawn = 0; tq->tq_maxthreads = nthreads; tq->tq_cpu_pct = threads_arg; tq->tq_pri = pri; tq->tq_minalloc = minalloc; tq->tq_maxalloc = maxalloc; tq->tq_nalloc = 0; tq->tq_flags = (flags | TASKQ_ACTIVE); tq->tq_next_id = TASKQID_INITIAL; tq->tq_lowest_id = TASKQID_INITIAL; INIT_LIST_HEAD(&tq->tq_free_list); INIT_LIST_HEAD(&tq->tq_pend_list); INIT_LIST_HEAD(&tq->tq_prio_list); INIT_LIST_HEAD(&tq->tq_delay_list); init_waitqueue_head(&tq->tq_work_waitq); init_waitqueue_head(&tq->tq_wait_waitq); tq->tq_lock_class = TQ_LOCK_GENERAL; INIT_LIST_HEAD(&tq->tq_taskqs); if (flags & TASKQ_PREPOPULATE) { spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); for (i = 0; i < minalloc; i++) task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW, &irqflags)); spin_unlock_irqrestore(&tq->tq_lock, irqflags); } if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) nthreads = 1; for (i = 0; i < nthreads; i++) { tqt = taskq_thread_create(tq); if (tqt == NULL) rc = 1; else count++; } /* Wait for all threads to be started before potential destroy */ wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count); /* * taskq_thread might have touched nspawn, but we don't want them to * because they're not dynamically spawned. So we reset it to 0 */ tq->tq_nspawn = 0; if (rc) { taskq_destroy(tq); tq = NULL; } else { down_write(&tq_list_sem); tq->tq_instance = taskq_find_by_name(name) + 1; list_add_tail(&tq->tq_taskqs, &tq_list); up_write(&tq_list_sem); } return (tq); } EXPORT_SYMBOL(taskq_create); void taskq_destroy(taskq_t *tq) { struct task_struct *thread; taskq_thread_t *tqt; taskq_ent_t *t; unsigned long flags; ASSERT(tq); spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); tq->tq_flags &= ~TASKQ_ACTIVE; spin_unlock_irqrestore(&tq->tq_lock, flags); #ifdef HAVE_CPU_HOTPLUG if (tq->tq_hp_support) { VERIFY0(cpuhp_state_remove_instance_nocalls( spl_taskq_cpuhp_state, &tq->tq_hp_cb_node)); } #endif /* * When TASKQ_ACTIVE is clear new tasks may not be added nor may * new worker threads be spawned for dynamic taskq. */ if (dynamic_taskq != NULL) taskq_wait_outstanding(dynamic_taskq, 0); taskq_wait(tq); /* remove taskq from global list used by the kstats */ down_write(&tq_list_sem); list_del(&tq->tq_taskqs); up_write(&tq_list_sem); spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); /* wait for spawning threads to insert themselves to the list */ while (tq->tq_nspawn) { spin_unlock_irqrestore(&tq->tq_lock, flags); schedule_timeout_interruptible(1); spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); } /* * Signal each thread to exit and block until it does. Each thread * is responsible for removing itself from the list and freeing its * taskq_thread_t. This allows for idle threads to opt to remove * themselves from the taskq. They can be recreated as needed. */ while (!list_empty(&tq->tq_thread_list)) { tqt = list_entry(tq->tq_thread_list.next, taskq_thread_t, tqt_thread_list); thread = tqt->tqt_thread; spin_unlock_irqrestore(&tq->tq_lock, flags); kthread_stop(thread); spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); } while (!list_empty(&tq->tq_free_list)) { t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); list_del_init(&t->tqent_list); task_free(tq, t); } ASSERT0(tq->tq_nthreads); ASSERT0(tq->tq_nalloc); ASSERT0(tq->tq_nspawn); ASSERT(list_empty(&tq->tq_thread_list)); ASSERT(list_empty(&tq->tq_active_list)); ASSERT(list_empty(&tq->tq_free_list)); ASSERT(list_empty(&tq->tq_pend_list)); ASSERT(list_empty(&tq->tq_prio_list)); ASSERT(list_empty(&tq->tq_delay_list)); spin_unlock_irqrestore(&tq->tq_lock, flags); kmem_strfree(tq->tq_name); kmem_free(tq, sizeof (taskq_t)); } EXPORT_SYMBOL(taskq_destroy); static unsigned int spl_taskq_kick = 0; /* * 2.6.36 API Change * module_param_cb is introduced to take kernel_param_ops and * module_param_call is marked as obsolete. Also set and get operations * were changed to take a 'const struct kernel_param *'. */ static int #ifdef module_param_cb param_set_taskq_kick(const char *val, const struct kernel_param *kp) #else param_set_taskq_kick(const char *val, struct kernel_param *kp) #endif { int ret; taskq_t *tq = NULL; taskq_ent_t *t; unsigned long flags; ret = param_set_uint(val, kp); if (ret < 0 || !spl_taskq_kick) return (ret); /* reset value */ spl_taskq_kick = 0; down_read(&tq_list_sem); list_for_each_entry(tq, &tq_list, tq_taskqs) { spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); /* Check if the first pending is older than 5 seconds */ t = taskq_next_ent(tq); if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) { (void) taskq_thread_spawn(tq); printk(KERN_INFO "spl: Kicked taskq %s/%d\n", tq->tq_name, tq->tq_instance); } spin_unlock_irqrestore(&tq->tq_lock, flags); } up_read(&tq_list_sem); return (ret); } #ifdef module_param_cb static const struct kernel_param_ops param_ops_taskq_kick = { .set = param_set_taskq_kick, .get = param_get_uint, }; module_param_cb(spl_taskq_kick, ¶m_ops_taskq_kick, &spl_taskq_kick, 0644); #else module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint, &spl_taskq_kick, 0644); #endif MODULE_PARM_DESC(spl_taskq_kick, "Write nonzero to kick stuck taskqs to spawn more threads"); #ifdef HAVE_CPU_HOTPLUG /* * This callback will be called exactly once for each core that comes online, * for each dynamic taskq. We attempt to expand taskqs that have * TASKQ_THREADS_CPU_PCT set. We need to redo the percentage calculation every * time, to correctly determine whether or not to add a thread. */ static int spl_taskq_expand(unsigned int cpu, struct hlist_node *node) { taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); unsigned long flags; int err = 0; ASSERT(tq); spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); if (!(tq->tq_flags & TASKQ_ACTIVE)) { spin_unlock_irqrestore(&tq->tq_lock, flags); return (err); } ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); int nthreads = MIN(tq->tq_cpu_pct, 100); nthreads = MAX(((num_online_cpus() + 1) * nthreads) / 100, 1); tq->tq_maxthreads = nthreads; if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && tq->tq_maxthreads > tq->tq_nthreads) { spin_unlock_irqrestore(&tq->tq_lock, flags); taskq_thread_t *tqt = taskq_thread_create(tq); if (tqt == NULL) err = -1; return (err); } spin_unlock_irqrestore(&tq->tq_lock, flags); return (err); } /* * While we don't support offlining CPUs, it is possible that CPUs will fail * to online successfully. We do need to be able to handle this case * gracefully. */ static int spl_taskq_prepare_down(unsigned int cpu, struct hlist_node *node) { taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); unsigned long flags; ASSERT(tq); spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); if (!(tq->tq_flags & TASKQ_ACTIVE)) goto out; ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); int nthreads = MIN(tq->tq_cpu_pct, 100); nthreads = MAX(((num_online_cpus()) * nthreads) / 100, 1); tq->tq_maxthreads = nthreads; if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && tq->tq_maxthreads < tq->tq_nthreads) { ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads - 1); taskq_thread_t *tqt = list_entry(tq->tq_thread_list.next, taskq_thread_t, tqt_thread_list); struct task_struct *thread = tqt->tqt_thread; spin_unlock_irqrestore(&tq->tq_lock, flags); kthread_stop(thread); return (0); } out: spin_unlock_irqrestore(&tq->tq_lock, flags); return (0); } #endif int spl_taskq_init(void) { init_rwsem(&tq_list_sem); tsd_create(&taskq_tsd, NULL); #ifdef HAVE_CPU_HOTPLUG spl_taskq_cpuhp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "fs/spl_taskq:online", spl_taskq_expand, spl_taskq_prepare_down); #endif system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64), maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); if (system_taskq == NULL) - return (1); + return (-ENOMEM); system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4), maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); if (system_delay_taskq == NULL) { #ifdef HAVE_CPU_HOTPLUG cpuhp_remove_multi_state(spl_taskq_cpuhp_state); #endif taskq_destroy(system_taskq); - return (1); + return (-ENOMEM); } dynamic_taskq = taskq_create("spl_dynamic_taskq", 1, maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE); if (dynamic_taskq == NULL) { #ifdef HAVE_CPU_HOTPLUG cpuhp_remove_multi_state(spl_taskq_cpuhp_state); #endif taskq_destroy(system_taskq); taskq_destroy(system_delay_taskq); - return (1); + return (-ENOMEM); } /* * This is used to annotate tq_lock, so * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch * does not trigger a lockdep warning re: possible recursive locking */ dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC; return (0); } void spl_taskq_fini(void) { taskq_destroy(dynamic_taskq); dynamic_taskq = NULL; taskq_destroy(system_delay_taskq); system_delay_taskq = NULL; taskq_destroy(system_taskq); system_taskq = NULL; tsd_destroy(&taskq_tsd); #ifdef HAVE_CPU_HOTPLUG cpuhp_remove_multi_state(spl_taskq_cpuhp_state); spl_taskq_cpuhp_state = 0; #endif } diff --git a/module/os/linux/spl/spl-tsd.c b/module/os/linux/spl/spl-tsd.c index 546db9ab8bd7..389c9d0d6df3 100644 --- a/module/os/linux/spl/spl-tsd.c +++ b/module/os/linux/spl/spl-tsd.c @@ -1,719 +1,719 @@ /* * Copyright (C) 2010 Lawrence Livermore National Security, LLC. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Brian Behlendorf . * UCRL-CODE-235197 * * This file is part of the SPL, Solaris Porting Layer. * * The SPL is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * The SPL is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . * * * Solaris Porting Layer (SPL) Thread Specific Data Implementation. * * Thread specific data has implemented using a hash table, this avoids * the need to add a member to the task structure and allows maximum * portability between kernels. This implementation has been optimized * to keep the tsd_set() and tsd_get() times as small as possible. * * The majority of the entries in the hash table are for specific tsd * entries. These entries are hashed by the product of their key and * pid because by design the key and pid are guaranteed to be unique. * Their product also has the desirable properly that it will be uniformly * distributed over the hash bins providing neither the pid nor key is zero. * Under linux the zero pid is always the init process and thus won't be * used, and this implementation is careful to never to assign a zero key. * By default the hash table is sized to 512 bins which is expected to * be sufficient for light to moderate usage of thread specific data. * * The hash table contains two additional type of entries. They first * type is entry is called a 'key' entry and it is added to the hash during * tsd_create(). It is used to store the address of the destructor function * and it is used as an anchor point. All tsd entries which use the same * key will be linked to this entry. This is used during tsd_destroy() to * quickly call the destructor function for all tsd associated with the key. * The 'key' entry may be looked up with tsd_hash_search() by passing the * key you wish to lookup and DTOR_PID constant as the pid. * * The second type of entry is called a 'pid' entry and it is added to the * hash the first time a process set a key. The 'pid' entry is also used * as an anchor and all tsd for the process will be linked to it. This * list is using during tsd_exit() to ensure all registered destructors * are run for the process. The 'pid' entry may be looked up with * tsd_hash_search() by passing the PID_KEY constant as the key, and * the process pid. Note that tsd_exit() is called by thread_exit() * so if your using the Solaris thread API you should not need to call * tsd_exit() directly. * */ #include #include #include #include typedef struct tsd_hash_bin { spinlock_t hb_lock; struct hlist_head hb_head; } tsd_hash_bin_t; typedef struct tsd_hash_table { spinlock_t ht_lock; uint_t ht_bits; uint_t ht_key; tsd_hash_bin_t *ht_bins; } tsd_hash_table_t; typedef struct tsd_hash_entry { uint_t he_key; pid_t he_pid; dtor_func_t he_dtor; void *he_value; struct hlist_node he_list; struct list_head he_key_list; struct list_head he_pid_list; } tsd_hash_entry_t; static tsd_hash_table_t *tsd_hash_table = NULL; /* * tsd_hash_search - searches hash table for tsd_hash_entry * @table: hash table * @key: search key * @pid: search pid */ static tsd_hash_entry_t * tsd_hash_search(tsd_hash_table_t *table, uint_t key, pid_t pid) { struct hlist_node *node = NULL; tsd_hash_entry_t *entry; tsd_hash_bin_t *bin; ulong_t hash; hash = hash_long((ulong_t)key * (ulong_t)pid, table->ht_bits); bin = &table->ht_bins[hash]; spin_lock(&bin->hb_lock); hlist_for_each(node, &bin->hb_head) { entry = list_entry(node, tsd_hash_entry_t, he_list); if ((entry->he_key == key) && (entry->he_pid == pid)) { spin_unlock(&bin->hb_lock); return (entry); } } spin_unlock(&bin->hb_lock); return (NULL); } /* * tsd_hash_dtor - call the destructor and free all entries on the list * @work: list of hash entries * * For a list of entries which have all already been removed from the * hash call their registered destructor then free the associated memory. */ static void tsd_hash_dtor(struct hlist_head *work) { tsd_hash_entry_t *entry; while (!hlist_empty(work)) { entry = hlist_entry(work->first, tsd_hash_entry_t, he_list); hlist_del(&entry->he_list); if (entry->he_dtor && entry->he_pid != DTOR_PID) entry->he_dtor(entry->he_value); kmem_free(entry, sizeof (tsd_hash_entry_t)); } } /* * tsd_hash_add - adds an entry to hash table * @table: hash table * @key: search key * @pid: search pid * * The caller is responsible for ensuring the unique key/pid do not * already exist in the hash table. This possible because all entries * are thread specific thus a concurrent thread will never attempt to * add this key/pid. Because multiple bins must be checked to add * links to the dtor and pid entries the entire table is locked. */ static int tsd_hash_add(tsd_hash_table_t *table, uint_t key, pid_t pid, void *value) { tsd_hash_entry_t *entry, *dtor_entry, *pid_entry; tsd_hash_bin_t *bin; ulong_t hash; int rc = 0; ASSERT3P(tsd_hash_search(table, key, pid), ==, NULL); /* New entry allocate structure, set value, and add to hash */ entry = kmem_alloc(sizeof (tsd_hash_entry_t), KM_PUSHPAGE); if (entry == NULL) return (ENOMEM); entry->he_key = key; entry->he_pid = pid; entry->he_value = value; INIT_HLIST_NODE(&entry->he_list); INIT_LIST_HEAD(&entry->he_key_list); INIT_LIST_HEAD(&entry->he_pid_list); spin_lock(&table->ht_lock); /* Destructor entry must exist for all valid keys */ dtor_entry = tsd_hash_search(table, entry->he_key, DTOR_PID); ASSERT3P(dtor_entry, !=, NULL); entry->he_dtor = dtor_entry->he_dtor; /* Process entry must exist for all valid processes */ pid_entry = tsd_hash_search(table, PID_KEY, entry->he_pid); ASSERT3P(pid_entry, !=, NULL); hash = hash_long((ulong_t)key * (ulong_t)pid, table->ht_bits); bin = &table->ht_bins[hash]; spin_lock(&bin->hb_lock); /* Add to the hash, key, and pid lists */ hlist_add_head(&entry->he_list, &bin->hb_head); list_add(&entry->he_key_list, &dtor_entry->he_key_list); list_add(&entry->he_pid_list, &pid_entry->he_pid_list); spin_unlock(&bin->hb_lock); spin_unlock(&table->ht_lock); return (rc); } /* * tsd_hash_add_key - adds a destructor entry to the hash table * @table: hash table * @keyp: search key * @dtor: key destructor * * For every unique key there is a single entry in the hash which is used * as anchor. All other thread specific entries for this key are linked * to this anchor via the 'he_key_list' list head. On return they keyp * will be set to the next available key for the hash table. */ static int tsd_hash_add_key(tsd_hash_table_t *table, uint_t *keyp, dtor_func_t dtor) { tsd_hash_entry_t *tmp_entry, *entry; tsd_hash_bin_t *bin; ulong_t hash; int keys_checked = 0; ASSERT3P(table, !=, NULL); /* Allocate entry to be used as a destructor for this key */ entry = kmem_alloc(sizeof (tsd_hash_entry_t), KM_PUSHPAGE); if (entry == NULL) return (ENOMEM); /* Determine next available key value */ spin_lock(&table->ht_lock); do { /* Limited to TSD_KEYS_MAX concurrent unique keys */ if (table->ht_key++ > TSD_KEYS_MAX) table->ht_key = 1; /* Ensure failure when all TSD_KEYS_MAX keys are in use */ if (keys_checked++ >= TSD_KEYS_MAX) { spin_unlock(&table->ht_lock); return (ENOENT); } tmp_entry = tsd_hash_search(table, table->ht_key, DTOR_PID); } while (tmp_entry); /* Add destructor entry in to hash table */ entry->he_key = *keyp = table->ht_key; entry->he_pid = DTOR_PID; entry->he_dtor = dtor; entry->he_value = NULL; INIT_HLIST_NODE(&entry->he_list); INIT_LIST_HEAD(&entry->he_key_list); INIT_LIST_HEAD(&entry->he_pid_list); hash = hash_long((ulong_t)*keyp * (ulong_t)DTOR_PID, table->ht_bits); bin = &table->ht_bins[hash]; spin_lock(&bin->hb_lock); hlist_add_head(&entry->he_list, &bin->hb_head); spin_unlock(&bin->hb_lock); spin_unlock(&table->ht_lock); return (0); } /* * tsd_hash_add_pid - adds a process entry to the hash table * @table: hash table * @pid: search pid * * For every process there is a single entry in the hash which is used * as anchor. All other thread specific entries for this process are * linked to this anchor via the 'he_pid_list' list head. */ static int tsd_hash_add_pid(tsd_hash_table_t *table, pid_t pid) { tsd_hash_entry_t *entry; tsd_hash_bin_t *bin; ulong_t hash; /* Allocate entry to be used as the process reference */ entry = kmem_alloc(sizeof (tsd_hash_entry_t), KM_PUSHPAGE); if (entry == NULL) return (ENOMEM); spin_lock(&table->ht_lock); entry->he_key = PID_KEY; entry->he_pid = pid; entry->he_dtor = NULL; entry->he_value = NULL; INIT_HLIST_NODE(&entry->he_list); INIT_LIST_HEAD(&entry->he_key_list); INIT_LIST_HEAD(&entry->he_pid_list); hash = hash_long((ulong_t)PID_KEY * (ulong_t)pid, table->ht_bits); bin = &table->ht_bins[hash]; spin_lock(&bin->hb_lock); hlist_add_head(&entry->he_list, &bin->hb_head); spin_unlock(&bin->hb_lock); spin_unlock(&table->ht_lock); return (0); } /* * tsd_hash_del - delete an entry from hash table, key, and pid lists * @table: hash table * @key: search key * @pid: search pid */ static void tsd_hash_del(tsd_hash_table_t *table, tsd_hash_entry_t *entry) { hlist_del(&entry->he_list); list_del_init(&entry->he_key_list); list_del_init(&entry->he_pid_list); } /* * tsd_hash_table_init - allocate a hash table * @bits: hash table size * * A hash table with 2^bits bins will be created, it may not be resized * after the fact and must be free'd with tsd_hash_table_fini(). */ static tsd_hash_table_t * tsd_hash_table_init(uint_t bits) { tsd_hash_table_t *table; int hash, size = (1 << bits); table = kmem_zalloc(sizeof (tsd_hash_table_t), KM_SLEEP); if (table == NULL) return (NULL); table->ht_bins = kmem_zalloc(sizeof (tsd_hash_bin_t) * size, KM_SLEEP); if (table->ht_bins == NULL) { kmem_free(table, sizeof (tsd_hash_table_t)); return (NULL); } for (hash = 0; hash < size; hash++) { spin_lock_init(&table->ht_bins[hash].hb_lock); INIT_HLIST_HEAD(&table->ht_bins[hash].hb_head); } spin_lock_init(&table->ht_lock); table->ht_bits = bits; table->ht_key = 1; return (table); } /* * tsd_hash_table_fini - free a hash table * @table: hash table * * Free a hash table allocated by tsd_hash_table_init(). If the hash * table is not empty this function will call the proper destructor for * all remaining entries before freeing the memory used by those entries. */ static void tsd_hash_table_fini(tsd_hash_table_t *table) { HLIST_HEAD(work); tsd_hash_bin_t *bin; tsd_hash_entry_t *entry; int size, i; ASSERT3P(table, !=, NULL); spin_lock(&table->ht_lock); for (i = 0, size = (1 << table->ht_bits); i < size; i++) { bin = &table->ht_bins[i]; spin_lock(&bin->hb_lock); while (!hlist_empty(&bin->hb_head)) { entry = hlist_entry(bin->hb_head.first, tsd_hash_entry_t, he_list); tsd_hash_del(table, entry); hlist_add_head(&entry->he_list, &work); } spin_unlock(&bin->hb_lock); } spin_unlock(&table->ht_lock); tsd_hash_dtor(&work); kmem_free(table->ht_bins, sizeof (tsd_hash_bin_t)*(1<ht_bits)); kmem_free(table, sizeof (tsd_hash_table_t)); } /* * tsd_remove_entry - remove a tsd entry for this thread * @entry: entry to remove * * Remove the thread specific data @entry for this thread. * If this is the last entry for this thread, also remove the PID entry. */ static void tsd_remove_entry(tsd_hash_entry_t *entry) { HLIST_HEAD(work); tsd_hash_table_t *table; tsd_hash_entry_t *pid_entry; tsd_hash_bin_t *pid_entry_bin, *entry_bin; ulong_t hash; table = tsd_hash_table; ASSERT3P(table, !=, NULL); ASSERT3P(entry, !=, NULL); spin_lock(&table->ht_lock); hash = hash_long((ulong_t)entry->he_key * (ulong_t)entry->he_pid, table->ht_bits); entry_bin = &table->ht_bins[hash]; /* save the possible pid_entry */ pid_entry = list_entry(entry->he_pid_list.next, tsd_hash_entry_t, he_pid_list); /* remove entry */ spin_lock(&entry_bin->hb_lock); tsd_hash_del(table, entry); hlist_add_head(&entry->he_list, &work); spin_unlock(&entry_bin->hb_lock); /* if pid_entry is indeed pid_entry, then remove it if it's empty */ if (pid_entry->he_key == PID_KEY && list_empty(&pid_entry->he_pid_list)) { hash = hash_long((ulong_t)pid_entry->he_key * (ulong_t)pid_entry->he_pid, table->ht_bits); pid_entry_bin = &table->ht_bins[hash]; spin_lock(&pid_entry_bin->hb_lock); tsd_hash_del(table, pid_entry); hlist_add_head(&pid_entry->he_list, &work); spin_unlock(&pid_entry_bin->hb_lock); } spin_unlock(&table->ht_lock); tsd_hash_dtor(&work); } /* * tsd_set - set thread specific data * @key: lookup key * @value: value to set * * Caller must prevent racing tsd_create() or tsd_destroy(), protected * from racing tsd_get() or tsd_set() because it is thread specific. * This function has been optimized to be fast for the update case. * When setting the tsd initially it will be slower due to additional * required locking and potential memory allocations. */ int tsd_set(uint_t key, void *value) { tsd_hash_table_t *table; tsd_hash_entry_t *entry; pid_t pid; int rc; /* mark remove if value is NULL */ boolean_t remove = (value == NULL); table = tsd_hash_table; pid = curthread->pid; ASSERT3P(table, !=, NULL); if ((key == 0) || (key > TSD_KEYS_MAX)) return (EINVAL); /* Entry already exists in hash table update value */ entry = tsd_hash_search(table, key, pid); if (entry) { entry->he_value = value; /* remove the entry */ if (remove) tsd_remove_entry(entry); return (0); } /* don't create entry if value is NULL */ if (remove) return (0); /* Add a process entry to the hash if not yet exists */ entry = tsd_hash_search(table, PID_KEY, pid); if (entry == NULL) { rc = tsd_hash_add_pid(table, pid); if (rc) return (rc); } rc = tsd_hash_add(table, key, pid, value); return (rc); } EXPORT_SYMBOL(tsd_set); /* * tsd_get - get thread specific data * @key: lookup key * * Caller must prevent racing tsd_create() or tsd_destroy(). This * implementation is designed to be fast and scalable, it does not * lock the entire table only a single hash bin. */ void * tsd_get(uint_t key) { tsd_hash_entry_t *entry; ASSERT3P(tsd_hash_table, !=, NULL); if ((key == 0) || (key > TSD_KEYS_MAX)) return (NULL); entry = tsd_hash_search(tsd_hash_table, key, curthread->pid); if (entry == NULL) return (NULL); return (entry->he_value); } EXPORT_SYMBOL(tsd_get); /* * tsd_get_by_thread - get thread specific data for specified thread * @key: lookup key * @thread: thread to lookup * * Caller must prevent racing tsd_create() or tsd_destroy(). This * implementation is designed to be fast and scalable, it does not * lock the entire table only a single hash bin. */ void * tsd_get_by_thread(uint_t key, kthread_t *thread) { tsd_hash_entry_t *entry; ASSERT3P(tsd_hash_table, !=, NULL); if ((key == 0) || (key > TSD_KEYS_MAX)) return (NULL); entry = tsd_hash_search(tsd_hash_table, key, thread->pid); if (entry == NULL) return (NULL); return (entry->he_value); } EXPORT_SYMBOL(tsd_get_by_thread); /* * tsd_create - create thread specific data key * @keyp: lookup key address * @dtor: destructor called during tsd_destroy() or tsd_exit() * * Provided key must be set to 0 or it assumed to be already in use. * The dtor is allowed to be NULL in which case no additional cleanup * for the data is performed during tsd_destroy() or tsd_exit(). * * Caller must prevent racing tsd_set() or tsd_get(), this function is * safe from racing tsd_create(), tsd_destroy(), and tsd_exit(). */ void tsd_create(uint_t *keyp, dtor_func_t dtor) { ASSERT3P(keyp, !=, NULL); if (*keyp) return; (void) tsd_hash_add_key(tsd_hash_table, keyp, dtor); } EXPORT_SYMBOL(tsd_create); /* * tsd_destroy - destroy thread specific data * @keyp: lookup key address * * Destroys the thread specific data on all threads which use this key. * * Caller must prevent racing tsd_set() or tsd_get(), this function is * safe from racing tsd_create(), tsd_destroy(), and tsd_exit(). */ void tsd_destroy(uint_t *keyp) { HLIST_HEAD(work); tsd_hash_table_t *table; tsd_hash_entry_t *dtor_entry, *entry; tsd_hash_bin_t *dtor_entry_bin, *entry_bin; ulong_t hash; table = tsd_hash_table; ASSERT3P(table, !=, NULL); spin_lock(&table->ht_lock); dtor_entry = tsd_hash_search(table, *keyp, DTOR_PID); if (dtor_entry == NULL) { spin_unlock(&table->ht_lock); return; } /* * All threads which use this key must be linked off of the * DTOR_PID entry. They are removed from the hash table and * linked in to a private working list to be destroyed. */ while (!list_empty(&dtor_entry->he_key_list)) { entry = list_entry(dtor_entry->he_key_list.next, tsd_hash_entry_t, he_key_list); ASSERT3U(dtor_entry->he_key, ==, entry->he_key); ASSERT3P(dtor_entry->he_dtor, ==, entry->he_dtor); hash = hash_long((ulong_t)entry->he_key * (ulong_t)entry->he_pid, table->ht_bits); entry_bin = &table->ht_bins[hash]; spin_lock(&entry_bin->hb_lock); tsd_hash_del(table, entry); hlist_add_head(&entry->he_list, &work); spin_unlock(&entry_bin->hb_lock); } hash = hash_long((ulong_t)dtor_entry->he_key * (ulong_t)dtor_entry->he_pid, table->ht_bits); dtor_entry_bin = &table->ht_bins[hash]; spin_lock(&dtor_entry_bin->hb_lock); tsd_hash_del(table, dtor_entry); hlist_add_head(&dtor_entry->he_list, &work); spin_unlock(&dtor_entry_bin->hb_lock); spin_unlock(&table->ht_lock); tsd_hash_dtor(&work); *keyp = 0; } EXPORT_SYMBOL(tsd_destroy); /* * tsd_exit - destroys all thread specific data for this thread * * Destroys all the thread specific data for this thread. * * Caller must prevent racing tsd_set() or tsd_get(), this function is * safe from racing tsd_create(), tsd_destroy(), and tsd_exit(). */ void tsd_exit(void) { HLIST_HEAD(work); tsd_hash_table_t *table; tsd_hash_entry_t *pid_entry, *entry; tsd_hash_bin_t *pid_entry_bin, *entry_bin; ulong_t hash; table = tsd_hash_table; ASSERT3P(table, !=, NULL); spin_lock(&table->ht_lock); pid_entry = tsd_hash_search(table, PID_KEY, curthread->pid); if (pid_entry == NULL) { spin_unlock(&table->ht_lock); return; } /* * All keys associated with this pid must be linked off of the * PID_KEY entry. They are removed from the hash table and * linked in to a private working list to be destroyed. */ while (!list_empty(&pid_entry->he_pid_list)) { entry = list_entry(pid_entry->he_pid_list.next, tsd_hash_entry_t, he_pid_list); ASSERT3U(pid_entry->he_pid, ==, entry->he_pid); hash = hash_long((ulong_t)entry->he_key * (ulong_t)entry->he_pid, table->ht_bits); entry_bin = &table->ht_bins[hash]; spin_lock(&entry_bin->hb_lock); tsd_hash_del(table, entry); hlist_add_head(&entry->he_list, &work); spin_unlock(&entry_bin->hb_lock); } hash = hash_long((ulong_t)pid_entry->he_key * (ulong_t)pid_entry->he_pid, table->ht_bits); pid_entry_bin = &table->ht_bins[hash]; spin_lock(&pid_entry_bin->hb_lock); tsd_hash_del(table, pid_entry); hlist_add_head(&pid_entry->he_list, &work); spin_unlock(&pid_entry_bin->hb_lock); spin_unlock(&table->ht_lock); tsd_hash_dtor(&work); } EXPORT_SYMBOL(tsd_exit); int spl_tsd_init(void) { tsd_hash_table = tsd_hash_table_init(TSD_HASH_TABLE_BITS_DEFAULT); if (tsd_hash_table == NULL) - return (1); + return (-ENOMEM); return (0); } void spl_tsd_fini(void) { tsd_hash_table_fini(tsd_hash_table); tsd_hash_table = NULL; } diff --git a/module/os/linux/spl/spl-zlib.c b/module/os/linux/spl/spl-zlib.c index 589496da0c78..8c6282ee5d16 100644 --- a/module/os/linux/spl/spl-zlib.c +++ b/module/os/linux/spl/spl-zlib.c @@ -1,217 +1,217 @@ /* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Brian Behlendorf . * UCRL-CODE-235197 * * This file is part of the SPL, Solaris Porting Layer. * * The SPL is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * The SPL is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . * * * z_compress_level/z_uncompress are nearly identical copies of the * compress2/uncompress functions provided by the official zlib package * available at http://zlib.net/. The only changes made we to slightly * adapt the functions called to match the linux kernel implementation * of zlib. The full zlib license follows: * * zlib.h -- interface of the 'zlib' general purpose compression library * version 1.2.5, April 19th, 2010 * * Copyright (C) 1995-2010 Jean-loup Gailly and Mark Adler * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would be * appreciated but is not required. * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * 3. This notice may not be removed or altered from any source distribution. * * Jean-loup Gailly * Mark Adler */ #include #include #include #include static spl_kmem_cache_t *zlib_workspace_cache; /* * A kmem_cache is used for the zlib workspaces to avoid having to vmalloc * and vfree for every call. Using a kmem_cache also has the advantage * that improves the odds that the memory used will be local to this cpu. * To further improve things it might be wise to create a dedicated per-cpu * workspace for use. This would take some additional care because we then * must disable preemption around the critical section, and verify that * zlib_deflate* and zlib_inflate* never internally call schedule(). */ static void * zlib_workspace_alloc(int flags) { return (kmem_cache_alloc(zlib_workspace_cache, flags & ~(__GFP_FS))); } static void zlib_workspace_free(void *workspace) { kmem_cache_free(zlib_workspace_cache, workspace); } /* * Compresses the source buffer into the destination buffer. The level * parameter has the same meaning as in deflateInit. sourceLen is the byte * length of the source buffer. Upon entry, destLen is the total size of the * destination buffer, which must be at least 0.1% larger than sourceLen plus * 12 bytes. Upon exit, destLen is the actual size of the compressed buffer. * * compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough * memory, Z_BUF_ERROR if there was not enough room in the output buffer, * Z_STREAM_ERROR if the level parameter is invalid. */ int z_compress_level(void *dest, size_t *destLen, const void *source, size_t sourceLen, int level) { z_stream stream; int err; stream.next_in = (Byte *)source; stream.avail_in = (uInt)sourceLen; stream.next_out = dest; stream.avail_out = (uInt)*destLen; if ((size_t)stream.avail_out != *destLen) return (Z_BUF_ERROR); stream.workspace = zlib_workspace_alloc(KM_SLEEP); if (!stream.workspace) return (Z_MEM_ERROR); err = zlib_deflateInit(&stream, level); if (err != Z_OK) { zlib_workspace_free(stream.workspace); return (err); } err = zlib_deflate(&stream, Z_FINISH); if (err != Z_STREAM_END) { zlib_deflateEnd(&stream); zlib_workspace_free(stream.workspace); return (err == Z_OK ? Z_BUF_ERROR : err); } *destLen = stream.total_out; err = zlib_deflateEnd(&stream); zlib_workspace_free(stream.workspace); return (err); } EXPORT_SYMBOL(z_compress_level); /* * Decompresses the source buffer into the destination buffer. sourceLen is * the byte length of the source buffer. Upon entry, destLen is the total * size of the destination buffer, which must be large enough to hold the * entire uncompressed data. (The size of the uncompressed data must have * been saved previously by the compressor and transmitted to the decompressor * by some mechanism outside the scope of this compression library.) * Upon exit, destLen is the actual size of the compressed buffer. * This function can be used to decompress a whole file at once if the * input file is mmap'ed. * * uncompress returns Z_OK if success, Z_MEM_ERROR if there was not * enough memory, Z_BUF_ERROR if there was not enough room in the output * buffer, or Z_DATA_ERROR if the input data was corrupted. */ int z_uncompress(void *dest, size_t *destLen, const void *source, size_t sourceLen) { z_stream stream; int err; stream.next_in = (Byte *)source; stream.avail_in = (uInt)sourceLen; stream.next_out = dest; stream.avail_out = (uInt)*destLen; if ((size_t)stream.avail_out != *destLen) return (Z_BUF_ERROR); stream.workspace = zlib_workspace_alloc(KM_SLEEP); if (!stream.workspace) return (Z_MEM_ERROR); err = zlib_inflateInit(&stream); if (err != Z_OK) { zlib_workspace_free(stream.workspace); return (err); } err = zlib_inflate(&stream, Z_FINISH); if (err != Z_STREAM_END) { zlib_inflateEnd(&stream); zlib_workspace_free(stream.workspace); if (err == Z_NEED_DICT || (err == Z_BUF_ERROR && stream.avail_in == 0)) return (Z_DATA_ERROR); return (err); } *destLen = stream.total_out; err = zlib_inflateEnd(&stream); zlib_workspace_free(stream.workspace); return (err); } EXPORT_SYMBOL(z_uncompress); int spl_zlib_init(void) { int size; size = MAX(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), zlib_inflate_workspacesize()); zlib_workspace_cache = kmem_cache_create( "spl_zlib_workspace_cache", size, 0, NULL, NULL, NULL, NULL, NULL, KMC_KVMEM); if (!zlib_workspace_cache) - return (1); + return (-ENOMEM); return (0); } void spl_zlib_fini(void) { kmem_cache_destroy(zlib_workspace_cache); zlib_workspace_cache = NULL; }