Index: head/sys/contrib/ck/FREEBSD-Xlist =================================================================== --- head/sys/contrib/ck/FREEBSD-Xlist (revision 331897) +++ head/sys/contrib/ck/FREEBSD-Xlist (revision 331898) @@ -1,11 +1,12 @@ #$FreeBSD$ */LICENSE */Makefile.in */README */build */configure */doc */regressions */tools */include/ck_md.h.in +*/include/freebsd/ck_md.h.in */src/Makefile.in Index: head/sys/contrib/ck/include/ck_cc.h =================================================================== --- head/sys/contrib/ck/include/ck_cc.h (revision 331897) +++ head/sys/contrib/ck/include/ck_cc.h (revision 331898) @@ -1,180 +1,173 @@ /* * Copyright 2009-2015 Samy Al Bahra. * Copyright 2014 Paul Khuong. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef CK_CC_H #define CK_CC_H #if defined(__GNUC__) || defined(__SUNPRO_C) #include "gcc/ck_cc.h" #endif #ifndef CK_CC_RESTRICT #define CK_CC_RESTRICT #endif #ifndef CK_CC_INLINE #define CK_CC_INLINE inline #endif #ifndef CK_CC_FORCE_INLINE #define CK_CC_FORCE_INLINE inline #endif #define CK_CC_DECONST_PTR(X) ((void *)(uintptr_t)(X)) /* * Container function. * This relies on (compiler) implementation-defined behavior. */ #define CK_CC_CONTAINER(F, T, M, N) \ CK_CC_INLINE static T * \ N(F *p) \ { \ F *n = p; \ return (T *)(void *)(((char *)n) - ((size_t)&((T *)0)->M)); \ } #define CK_CC_PAD(x) union { char pad[x]; } #ifndef CK_CC_ALIASED #define CK_CC_ALIASED #endif #ifndef CK_CC_UNUSED #define CK_CC_UNUSED #endif #ifndef CK_CC_USED #define CK_CC_USED #endif #ifndef CK_CC_IMM #define CK_CC_IMM #endif #ifndef CK_CC_PACKED #define CK_CC_PACKED #endif #ifndef CK_CC_WEAKREF #define CK_CC_WEAKREF #endif #ifndef CK_CC_ALIGN #define CK_CC_ALIGN(X) #endif #ifndef CK_CC_CACHELINE #define CK_CC_CACHELINE #endif #ifndef CK_CC_LIKELY #define CK_CC_LIKELY(x) x #endif #ifndef CK_CC_UNLIKELY #define CK_CC_UNLIKELY(x) x #endif #ifndef CK_CC_TYPEOF #define CK_CC_TYPEOF(X, DEFAULT) (DEFAULT) #endif +#define CK_F_CC_FFS_G(L, T) \ +CK_CC_INLINE static int \ +ck_cc_##L(T v) \ +{ \ + unsigned int i; \ + \ + if (v == 0) \ + return 0; \ + \ + for (i = 1; (v & 1) == 0; i++, v >>= 1); \ + return i; \ +} + #ifndef CK_F_CC_FFS #define CK_F_CC_FFS -CK_CC_INLINE static int -ck_cc_ffs(unsigned int x) -{ - unsigned int i; +CK_F_CC_FFS_G(ffs, unsigned int) +#endif /* CK_F_CC_FFS */ - if (x == 0) - return 0; +#ifndef CK_F_CC_FFSL +#define CK_F_CC_FFSL +CK_F_CC_FFS_G(ffsl, unsigned long) +#endif /* CK_F_CC_FFSL */ - for (i = 1; (x & 1) == 0; i++, x >>= 1); +#ifndef CK_F_CC_FFSLL +#define CK_F_CC_FFSLL +CK_F_CC_FFS_G(ffsll, unsigned long long) +#endif /* CK_F_CC_FFSLL */ - return i; -} -#endif +#undef CK_F_CC_FFS_G -#ifndef CK_F_CC_CLZ -#define CK_F_CC_CLZ -#include - -CK_CC_INLINE static int -ck_cc_clz(unsigned int x) -{ - unsigned int count, i; - - for (count = 0, i = sizeof(unsigned int) * CHAR_BIT; i > 0; count++) { - unsigned int bit = 1U << --i; - - if (x & bit) - break; - } - - return count; -} -#endif - #ifndef CK_F_CC_CTZ #define CK_F_CC_CTZ CK_CC_INLINE static int ck_cc_ctz(unsigned int x) { unsigned int i; if (x == 0) return 0; for (i = 0; (x & 1) == 0; i++, x >>= 1); - return i; } #endif #ifndef CK_F_CC_POPCOUNT #define CK_F_CC_POPCOUNT CK_CC_INLINE static int ck_cc_popcount(unsigned int x) { unsigned int acc; for (acc = 0; x != 0; x >>= 1) acc += x & 1; return acc; } #endif #ifdef __cplusplus #define CK_CPP_CAST(type, arg) static_cast(arg) #else #define CK_CPP_CAST(type, arg) arg #endif #endif /* CK_CC_H */ Index: head/sys/contrib/ck/include/ck_hs.h =================================================================== --- head/sys/contrib/ck/include/ck_hs.h (revision 331897) +++ head/sys/contrib/ck/include/ck_hs.h (revision 331898) @@ -1,134 +1,136 @@ /* * Copyright 2012-2015 Samy Al Bahra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef CK_HS_H #define CK_HS_H #include #include #include #include #include #include #include /* * Indicates a single-writer many-reader workload. Mutually * exclusive with CK_HS_MODE_MPMC */ #define CK_HS_MODE_SPMC 1 /* * Indicates that values to be stored are not pointers but * values. Allows for full precision. Mutually exclusive * with CK_HS_MODE_OBJECT. */ #define CK_HS_MODE_DIRECT 2 /* * Indicates that the values to be stored are pointers. * Allows for space optimizations in the presence of pointer * packing. Mutually exclusive with CK_HS_MODE_DIRECT. */ #define CK_HS_MODE_OBJECT 8 /* * Indicates a delete-heavy workload. This will reduce the * need for garbage collection at the cost of approximately * 12% to 20% increased memory usage. */ #define CK_HS_MODE_DELETE 16 /* Currently unsupported. */ #define CK_HS_MODE_MPMC (void) /* * Hash callback function. */ typedef unsigned long ck_hs_hash_cb_t(const void *, unsigned long); /* * Returns pointer to object if objects are equivalent. */ typedef bool ck_hs_compare_cb_t(const void *, const void *); #if defined(CK_MD_POINTER_PACK_ENABLE) && defined(CK_MD_VMA_BITS) #define CK_HS_PP #define CK_HS_KEY_MASK ((1U << ((sizeof(void *) * 8) - CK_MD_VMA_BITS)) - 1) #endif struct ck_hs_map; struct ck_hs { struct ck_malloc *m; struct ck_hs_map *map; unsigned int mode; unsigned long seed; ck_hs_hash_cb_t *hf; ck_hs_compare_cb_t *compare; }; typedef struct ck_hs ck_hs_t; struct ck_hs_stat { unsigned long tombstones; unsigned long n_entries; unsigned int probe_maximum; }; struct ck_hs_iterator { void **cursor; unsigned long offset; + struct ck_hs_map *map; }; typedef struct ck_hs_iterator ck_hs_iterator_t; -#define CK_HS_ITERATOR_INITIALIZER { NULL, 0 } +#define CK_HS_ITERATOR_INITIALIZER { NULL, 0, NULL } /* Convenience wrapper to table hash function. */ #define CK_HS_HASH(T, F, K) F((K), (T)->seed) typedef void *ck_hs_apply_fn_t(void *, void *); bool ck_hs_apply(ck_hs_t *, unsigned long, const void *, ck_hs_apply_fn_t *, void *); void ck_hs_iterator_init(ck_hs_iterator_t *); bool ck_hs_next(ck_hs_t *, ck_hs_iterator_t *, void **); +bool ck_hs_next_spmc(ck_hs_t *, ck_hs_iterator_t *, void **); bool ck_hs_move(ck_hs_t *, ck_hs_t *, ck_hs_hash_cb_t *, ck_hs_compare_cb_t *, struct ck_malloc *); bool ck_hs_init(ck_hs_t *, unsigned int, ck_hs_hash_cb_t *, ck_hs_compare_cb_t *, struct ck_malloc *, unsigned long, unsigned long); void ck_hs_destroy(ck_hs_t *); void *ck_hs_get(ck_hs_t *, unsigned long, const void *); bool ck_hs_put(ck_hs_t *, unsigned long, const void *); bool ck_hs_put_unique(ck_hs_t *, unsigned long, const void *); bool ck_hs_set(ck_hs_t *, unsigned long, const void *, void **); bool ck_hs_fas(ck_hs_t *, unsigned long, const void *, void **); void *ck_hs_remove(ck_hs_t *, unsigned long, const void *); bool ck_hs_grow(ck_hs_t *, unsigned long); bool ck_hs_rebuild(ck_hs_t *); bool ck_hs_gc(ck_hs_t *, unsigned long, unsigned long); unsigned long ck_hs_count(ck_hs_t *); bool ck_hs_reset(ck_hs_t *); bool ck_hs_reset_size(ck_hs_t *, unsigned long); void ck_hs_stat(ck_hs_t *, struct ck_hs_stat *); #endif /* CK_HS_H */ Index: head/sys/contrib/ck/include/ck_md.h =================================================================== --- head/sys/contrib/ck/include/ck_md.h (revision 331897) +++ head/sys/contrib/ck/include/ck_md.h (revision 331898) @@ -1,80 +1,134 @@ /* - * Copyright 2011-2012 Samy Al Bahra. + * Copyright 2018 Samy Al Bahra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $FreeBSD$ + * $FreeBSD: head/sys/contrib/ck/include/ck_md.h 329388 2018-02-16 17:50:06Z cog +net $ */ +/* + * This header file is meant for use of Concurrency Kit in the FreeBSD kernel. + */ + #ifndef CK_MD_H #define CK_MD_H +#include + +#ifndef _KERNEL +#error This header file is meant for the FreeBSD kernel. +#endif /* _KERNEL */ + #ifndef CK_MD_CACHELINE +/* + * FreeBSD's CACHE_LINE macro is a compile-time maximum cache-line size for an + * architecture, defined to be 128 bytes by default on x86*. Even in presence + * of adjacent sector prefetch, this doesn't make sense from a modeling + * perspective. + */ +#if defined(__amd64__) || defined(__i386__) #define CK_MD_CACHELINE (64) -#endif +#else +#define CK_MD_CACHELINE (CACHE_LINE_SIZE) +#endif /* !__amd64__ && !__i386__ */ +#endif /* CK_MD_CACHELINE */ #ifndef CK_MD_PAGESIZE -#define CK_MD_PAGESIZE (4096) +#define CK_MD_PAGESIZE (PAGE_SIZE) #endif +/* + * Once FreeBSD has a mechanism to detect RTM, this can be enabled and RTM + * facilities can be called. These facilities refer to TSX. + */ #ifndef CK_MD_RTM_DISABLE #define CK_MD_RTM_DISABLE #endif /* CK_MD_RTM_DISABLE */ +/* + * Do not enable pointer-packing-related (VMA) optimizations in kernel-space. + */ #ifndef CK_MD_POINTER_PACK_DISABLE #define CK_MD_POINTER_PACK_DISABLE #endif /* CK_MD_POINTER_PACK_DISABLE */ -#ifndef CK_MD_VMA_BITS_UNKNOWN -#define CK_MD_VMA_BITS_UNKNOWN +/* + * The following would be used for pointer-packing tricks, disabled for the + * kernel. + */ +#ifndef CK_MD_VMA_BITS_UNKNOWN +#define CK_MD_VMA_BITS_UNKNOWN #endif /* CK_MD_VMA_BITS_UNKNOWN */ +/* + * Do not enable double operations in kernel-space. + */ #ifndef CK_PR_DISABLE_DOUBLE #define CK_PR_DISABLE_DOUBLE #endif /* CK_PR_DISABLE_DOUBLE */ -#define CK_VERSION "0.6.0" -#define CK_GIT_SHA "" +/* + * If building for a uni-processor target, then enable the uniprocessor + * feature flag. This, among other things, will remove the lock prefix. + */ +#ifndef SMP +#define CK_MD_UMP +#endif /* SMP */ /* + * Disable the use of compiler builtin functions. + */ +#define CK_MD_CC_BUILTIN_DISABLE 1 + +/* * CK expects those, which are normally defined by the build system. */ #if defined(__i386__) && !defined(__x86__) #define __x86__ +/* + * If x86 becomes more relevant, we may want to consider importing in + * __mbk() to avoid potential issues around false sharing. + */ #define CK_MD_TSO +#define CK_MD_SSE_DISABLE 1 #elif defined(__amd64__) #define CK_MD_TSO #elif defined(__sparc64__) && !defined(__sparcv9__) #define __sparcv9__ #define CK_MD_TSO #elif defined(__powerpc64__) && !defined(__ppc64__) #define __ppc64__ #elif defined(__powerpc__) && !defined(__ppc__) #define __ppc__ #endif +/* If no memory model has been defined, assume RMO. */ #if !defined(CK_MD_RMO) && !defined(CK_MD_TSO) && !defined(CK_MD_PSO) #define CK_MD_RMO #endif + +#define CK_VERSION "0.7.0" +#define CK_GIT_SHA "db5db44" #endif /* CK_MD_H */ Index: head/sys/contrib/ck/include/ck_pr.h =================================================================== --- head/sys/contrib/ck/include/ck_pr.h (revision 331897) +++ head/sys/contrib/ck/include/ck_pr.h (revision 331898) @@ -1,1223 +1,1225 @@ /* * Copyright 2009-2015 Samy Al Bahra. * Copyright 2011 David Joseph. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef CK_PR_H #define CK_PR_H #include #include #include #include #include #ifndef CK_USE_CC_BUILTINS #if defined(__x86_64__) #include "gcc/x86_64/ck_pr.h" #elif defined(__x86__) #include "gcc/x86/ck_pr.h" #elif defined(__sparcv9__) #include "gcc/sparcv9/ck_pr.h" #elif defined(__ppc64__) #include "gcc/ppc64/ck_pr.h" +#elif defined(__s390x__) +#include "gcc/s390x/ck_pr.h" #elif defined(__ppc__) #include "gcc/ppc/ck_pr.h" #elif defined(__arm__) #if __ARM_ARCH >= 6 #include "gcc/arm/ck_pr.h" #else #include "gcc/arm/ck_pr_armv4.h" #endif #elif defined(__aarch64__) #include "gcc/aarch64/ck_pr.h" #elif !defined(__GNUC__) #error Your platform is unsupported #endif #endif /* !CK_USE_CC_BUILTINS */ #if defined(__GNUC__) #include "gcc/ck_pr.h" #endif #define CK_PR_FENCE_EMIT(T) \ CK_CC_INLINE static void \ ck_pr_fence_##T(void) \ { \ ck_pr_fence_strict_##T(); \ return; \ } #define CK_PR_FENCE_NOOP(T) \ CK_CC_INLINE static void \ ck_pr_fence_##T(void) \ { \ ck_pr_barrier(); \ return; \ } /* * None of the currently supported platforms allow for data-dependent * load ordering. */ CK_PR_FENCE_NOOP(load_depends) #define ck_pr_fence_strict_load_depends ck_pr_fence_load_depends /* * In memory models where atomic operations do not have serializing * effects, atomic read-modify-write operations are modeled as stores. */ #if defined(CK_MD_RMO) /* * Only stores to the same location have a global * ordering. */ CK_PR_FENCE_EMIT(atomic) CK_PR_FENCE_EMIT(atomic_load) CK_PR_FENCE_EMIT(atomic_store) CK_PR_FENCE_EMIT(store_atomic) CK_PR_FENCE_EMIT(load_atomic) CK_PR_FENCE_EMIT(load_store) CK_PR_FENCE_EMIT(store_load) CK_PR_FENCE_EMIT(load) CK_PR_FENCE_EMIT(store) CK_PR_FENCE_EMIT(memory) CK_PR_FENCE_EMIT(acquire) CK_PR_FENCE_EMIT(release) CK_PR_FENCE_EMIT(acqrel) CK_PR_FENCE_EMIT(lock) CK_PR_FENCE_EMIT(unlock) #elif defined(CK_MD_PSO) /* * Anything can be re-ordered with respect to stores. * Otherwise, loads are executed in-order. */ CK_PR_FENCE_EMIT(atomic) CK_PR_FENCE_NOOP(atomic_load) CK_PR_FENCE_EMIT(atomic_store) CK_PR_FENCE_EMIT(store_atomic) CK_PR_FENCE_NOOP(load_atomic) CK_PR_FENCE_EMIT(load_store) CK_PR_FENCE_EMIT(store_load) CK_PR_FENCE_NOOP(load) CK_PR_FENCE_EMIT(store) CK_PR_FENCE_EMIT(memory) CK_PR_FENCE_EMIT(acquire) CK_PR_FENCE_EMIT(release) CK_PR_FENCE_EMIT(acqrel) CK_PR_FENCE_EMIT(lock) CK_PR_FENCE_EMIT(unlock) #elif defined(CK_MD_TSO) /* * Only loads are re-ordered and only with respect to * prior stores. Atomic operations are serializing. */ CK_PR_FENCE_NOOP(atomic) CK_PR_FENCE_NOOP(atomic_load) CK_PR_FENCE_NOOP(atomic_store) CK_PR_FENCE_NOOP(store_atomic) CK_PR_FENCE_NOOP(load_atomic) CK_PR_FENCE_NOOP(load_store) CK_PR_FENCE_EMIT(store_load) CK_PR_FENCE_NOOP(load) CK_PR_FENCE_NOOP(store) CK_PR_FENCE_EMIT(memory) CK_PR_FENCE_NOOP(acquire) CK_PR_FENCE_NOOP(release) CK_PR_FENCE_NOOP(acqrel) CK_PR_FENCE_NOOP(lock) CK_PR_FENCE_NOOP(unlock) #else #error "No memory model has been defined." #endif /* CK_MD_TSO */ #undef CK_PR_FENCE_EMIT #undef CK_PR_FENCE_NOOP #ifndef CK_F_PR_RFO #define CK_F_PR_RFO CK_CC_INLINE static void ck_pr_rfo(const void *m) { (void)m; return; } #endif /* CK_F_PR_RFO */ #define CK_PR_STORE_SAFE(DST, VAL, TYPE) \ ck_pr_md_store_##TYPE( \ ((void)sizeof(*(DST) = (VAL)), (DST)), \ (VAL)) #define ck_pr_store_ptr(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), ptr) #define ck_pr_store_char(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), char) #ifndef CK_PR_DISABLE_DOUBLE #define ck_pr_store_double(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), double) #endif #define ck_pr_store_uint(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), uint) #define ck_pr_store_int(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), int) #define ck_pr_store_32(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 32) #define ck_pr_store_16(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 16) #define ck_pr_store_8(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 8) #define ck_pr_store_ptr_unsafe(DST, VAL) ck_pr_md_store_ptr((DST), (VAL)) #ifdef CK_F_PR_LOAD_64 #define ck_pr_store_64(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 64) #endif /* CK_F_PR_LOAD_64 */ #define CK_PR_LOAD_PTR_SAFE(SRC) (CK_CC_TYPEOF(*(SRC), (void *)))ck_pr_md_load_ptr((SRC)) #define ck_pr_load_ptr(SRC) CK_PR_LOAD_PTR_SAFE((SRC)) #define CK_PR_LOAD_SAFE(SRC, TYPE) ck_pr_md_load_##TYPE((SRC)) #define ck_pr_load_char(SRC) CK_PR_LOAD_SAFE((SRC), char) #ifndef CK_PR_DISABLE_DOUBLE #define ck_pr_load_double(SRC) CK_PR_LOAD_SAFE((SRC), double) #endif #define ck_pr_load_uint(SRC) CK_PR_LOAD_SAFE((SRC), uint) #define ck_pr_load_int(SRC) CK_PR_LOAD_SAFE((SRC), int) #define ck_pr_load_32(SRC) CK_PR_LOAD_SAFE((SRC), 32) #define ck_pr_load_16(SRC) CK_PR_LOAD_SAFE((SRC), 16) #define ck_pr_load_8(SRC) CK_PR_LOAD_SAFE((SRC), 8) #ifdef CK_F_PR_LOAD_64 #define ck_pr_load_64(SRC) CK_PR_LOAD_SAFE((SRC), 64) #endif /* CK_F_PR_LOAD_64 */ #define CK_PR_BIN(K, S, M, T, P, C) \ CK_CC_INLINE static void \ ck_pr_##K##_##S(M *target, T value) \ { \ T previous; \ C punt; \ punt = ck_pr_md_load_##S(target); \ previous = (T)punt; \ while (ck_pr_cas_##S##_value(target, \ (C)previous, \ (C)(previous P value), \ &previous) == false) \ ck_pr_stall(); \ \ return; \ } #define CK_PR_BIN_S(K, S, T, P) CK_PR_BIN(K, S, T, T, P, T) #if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE) #ifndef CK_F_PR_ADD_CHAR #define CK_F_PR_ADD_CHAR CK_PR_BIN_S(add, char, char, +) #endif /* CK_F_PR_ADD_CHAR */ #ifndef CK_F_PR_SUB_CHAR #define CK_F_PR_SUB_CHAR CK_PR_BIN_S(sub, char, char, -) #endif /* CK_F_PR_SUB_CHAR */ #ifndef CK_F_PR_AND_CHAR #define CK_F_PR_AND_CHAR CK_PR_BIN_S(and, char, char, &) #endif /* CK_F_PR_AND_CHAR */ #ifndef CK_F_PR_XOR_CHAR #define CK_F_PR_XOR_CHAR CK_PR_BIN_S(xor, char, char, ^) #endif /* CK_F_PR_XOR_CHAR */ #ifndef CK_F_PR_OR_CHAR #define CK_F_PR_OR_CHAR CK_PR_BIN_S(or, char, char, |) #endif /* CK_F_PR_OR_CHAR */ #endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */ #if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE) #ifndef CK_F_PR_ADD_INT #define CK_F_PR_ADD_INT CK_PR_BIN_S(add, int, int, +) #endif /* CK_F_PR_ADD_INT */ #ifndef CK_F_PR_SUB_INT #define CK_F_PR_SUB_INT CK_PR_BIN_S(sub, int, int, -) #endif /* CK_F_PR_SUB_INT */ #ifndef CK_F_PR_AND_INT #define CK_F_PR_AND_INT CK_PR_BIN_S(and, int, int, &) #endif /* CK_F_PR_AND_INT */ #ifndef CK_F_PR_XOR_INT #define CK_F_PR_XOR_INT CK_PR_BIN_S(xor, int, int, ^) #endif /* CK_F_PR_XOR_INT */ #ifndef CK_F_PR_OR_INT #define CK_F_PR_OR_INT CK_PR_BIN_S(or, int, int, |) #endif /* CK_F_PR_OR_INT */ #endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */ #if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE) && \ !defined(CK_PR_DISABLE_DOUBLE) #ifndef CK_F_PR_ADD_DOUBLE #define CK_F_PR_ADD_DOUBLE CK_PR_BIN_S(add, double, double, +) #endif /* CK_F_PR_ADD_DOUBLE */ #ifndef CK_F_PR_SUB_DOUBLE #define CK_F_PR_SUB_DOUBLE CK_PR_BIN_S(sub, double, double, -) #endif /* CK_F_PR_SUB_DOUBLE */ #endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE && !CK_PR_DISABLE_DOUBLE */ #if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE) #ifndef CK_F_PR_ADD_UINT #define CK_F_PR_ADD_UINT CK_PR_BIN_S(add, uint, unsigned int, +) #endif /* CK_F_PR_ADD_UINT */ #ifndef CK_F_PR_SUB_UINT #define CK_F_PR_SUB_UINT CK_PR_BIN_S(sub, uint, unsigned int, -) #endif /* CK_F_PR_SUB_UINT */ #ifndef CK_F_PR_AND_UINT #define CK_F_PR_AND_UINT CK_PR_BIN_S(and, uint, unsigned int, &) #endif /* CK_F_PR_AND_UINT */ #ifndef CK_F_PR_XOR_UINT #define CK_F_PR_XOR_UINT CK_PR_BIN_S(xor, uint, unsigned int, ^) #endif /* CK_F_PR_XOR_UINT */ #ifndef CK_F_PR_OR_UINT #define CK_F_PR_OR_UINT CK_PR_BIN_S(or, uint, unsigned int, |) #endif /* CK_F_PR_OR_UINT */ #endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */ #if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE) #ifndef CK_F_PR_ADD_PTR #define CK_F_PR_ADD_PTR CK_PR_BIN(add, ptr, void, uintptr_t, +, void *) #endif /* CK_F_PR_ADD_PTR */ #ifndef CK_F_PR_SUB_PTR #define CK_F_PR_SUB_PTR CK_PR_BIN(sub, ptr, void, uintptr_t, -, void *) #endif /* CK_F_PR_SUB_PTR */ #ifndef CK_F_PR_AND_PTR #define CK_F_PR_AND_PTR CK_PR_BIN(and, ptr, void, uintptr_t, &, void *) #endif /* CK_F_PR_AND_PTR */ #ifndef CK_F_PR_XOR_PTR #define CK_F_PR_XOR_PTR CK_PR_BIN(xor, ptr, void, uintptr_t, ^, void *) #endif /* CK_F_PR_XOR_PTR */ #ifndef CK_F_PR_OR_PTR #define CK_F_PR_OR_PTR CK_PR_BIN(or, ptr, void, uintptr_t, |, void *) #endif /* CK_F_PR_OR_PTR */ #endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */ #if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE) #ifndef CK_F_PR_ADD_64 #define CK_F_PR_ADD_64 CK_PR_BIN_S(add, 64, uint64_t, +) #endif /* CK_F_PR_ADD_64 */ #ifndef CK_F_PR_SUB_64 #define CK_F_PR_SUB_64 CK_PR_BIN_S(sub, 64, uint64_t, -) #endif /* CK_F_PR_SUB_64 */ #ifndef CK_F_PR_AND_64 #define CK_F_PR_AND_64 CK_PR_BIN_S(and, 64, uint64_t, &) #endif /* CK_F_PR_AND_64 */ #ifndef CK_F_PR_XOR_64 #define CK_F_PR_XOR_64 CK_PR_BIN_S(xor, 64, uint64_t, ^) #endif /* CK_F_PR_XOR_64 */ #ifndef CK_F_PR_OR_64 #define CK_F_PR_OR_64 CK_PR_BIN_S(or, 64, uint64_t, |) #endif /* CK_F_PR_OR_64 */ #endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */ #if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE) #ifndef CK_F_PR_ADD_32 #define CK_F_PR_ADD_32 CK_PR_BIN_S(add, 32, uint32_t, +) #endif /* CK_F_PR_ADD_32 */ #ifndef CK_F_PR_SUB_32 #define CK_F_PR_SUB_32 CK_PR_BIN_S(sub, 32, uint32_t, -) #endif /* CK_F_PR_SUB_32 */ #ifndef CK_F_PR_AND_32 #define CK_F_PR_AND_32 CK_PR_BIN_S(and, 32, uint32_t, &) #endif /* CK_F_PR_AND_32 */ #ifndef CK_F_PR_XOR_32 #define CK_F_PR_XOR_32 CK_PR_BIN_S(xor, 32, uint32_t, ^) #endif /* CK_F_PR_XOR_32 */ #ifndef CK_F_PR_OR_32 #define CK_F_PR_OR_32 CK_PR_BIN_S(or, 32, uint32_t, |) #endif /* CK_F_PR_OR_32 */ #endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */ #if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE) #ifndef CK_F_PR_ADD_16 #define CK_F_PR_ADD_16 CK_PR_BIN_S(add, 16, uint16_t, +) #endif /* CK_F_PR_ADD_16 */ #ifndef CK_F_PR_SUB_16 #define CK_F_PR_SUB_16 CK_PR_BIN_S(sub, 16, uint16_t, -) #endif /* CK_F_PR_SUB_16 */ #ifndef CK_F_PR_AND_16 #define CK_F_PR_AND_16 CK_PR_BIN_S(and, 16, uint16_t, &) #endif /* CK_F_PR_AND_16 */ #ifndef CK_F_PR_XOR_16 #define CK_F_PR_XOR_16 CK_PR_BIN_S(xor, 16, uint16_t, ^) #endif /* CK_F_PR_XOR_16 */ #ifndef CK_F_PR_OR_16 #define CK_F_PR_OR_16 CK_PR_BIN_S(or, 16, uint16_t, |) #endif /* CK_F_PR_OR_16 */ #endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */ #if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE) #ifndef CK_F_PR_ADD_8 #define CK_F_PR_ADD_8 CK_PR_BIN_S(add, 8, uint8_t, +) #endif /* CK_F_PR_ADD_8 */ #ifndef CK_F_PR_SUB_8 #define CK_F_PR_SUB_8 CK_PR_BIN_S(sub, 8, uint8_t, -) #endif /* CK_F_PR_SUB_8 */ #ifndef CK_F_PR_AND_8 #define CK_F_PR_AND_8 CK_PR_BIN_S(and, 8, uint8_t, &) #endif /* CK_F_PR_AND_8 */ #ifndef CK_F_PR_XOR_8 #define CK_F_PR_XOR_8 CK_PR_BIN_S(xor, 8, uint8_t, ^) #endif /* CK_F_PR_XOR_8 */ #ifndef CK_F_PR_OR_8 #define CK_F_PR_OR_8 CK_PR_BIN_S(or, 8, uint8_t, |) #endif /* CK_F_PR_OR_8 */ #endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */ #undef CK_PR_BIN_S #undef CK_PR_BIN #define CK_PR_BTX(K, S, M, T, P, C, R) \ CK_CC_INLINE static bool \ ck_pr_##K##_##S(M *target, unsigned int offset) \ { \ T previous; \ C punt; \ punt = ck_pr_md_load_##S(target); \ previous = (T)punt; \ while (ck_pr_cas_##S##_value(target, (C)previous, \ (C)(previous P (R ((T)1 << offset))), &previous) == false) \ ck_pr_stall(); \ return ((previous >> offset) & 1); \ } #define CK_PR_BTX_S(K, S, T, P, R) CK_PR_BTX(K, S, T, T, P, T, R) #if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE) #ifndef CK_F_PR_BTC_INT #define CK_F_PR_BTC_INT CK_PR_BTX_S(btc, int, int, ^,) #endif /* CK_F_PR_BTC_INT */ #ifndef CK_F_PR_BTR_INT #define CK_F_PR_BTR_INT CK_PR_BTX_S(btr, int, int, &, ~) #endif /* CK_F_PR_BTR_INT */ #ifndef CK_F_PR_BTS_INT #define CK_F_PR_BTS_INT CK_PR_BTX_S(bts, int, int, |,) #endif /* CK_F_PR_BTS_INT */ #endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */ #if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE) #ifndef CK_F_PR_BTC_UINT #define CK_F_PR_BTC_UINT CK_PR_BTX_S(btc, uint, unsigned int, ^,) #endif /* CK_F_PR_BTC_UINT */ #ifndef CK_F_PR_BTR_UINT #define CK_F_PR_BTR_UINT CK_PR_BTX_S(btr, uint, unsigned int, &, ~) #endif /* CK_F_PR_BTR_UINT */ #ifndef CK_F_PR_BTS_UINT #define CK_F_PR_BTS_UINT CK_PR_BTX_S(bts, uint, unsigned int, |,) #endif /* CK_F_PR_BTS_UINT */ #endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */ #if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE) #ifndef CK_F_PR_BTC_PTR #define CK_F_PR_BTC_PTR CK_PR_BTX(btc, ptr, void, uintptr_t, ^, void *,) #endif /* CK_F_PR_BTC_PTR */ #ifndef CK_F_PR_BTR_PTR #define CK_F_PR_BTR_PTR CK_PR_BTX(btr, ptr, void, uintptr_t, &, void *, ~) #endif /* CK_F_PR_BTR_PTR */ #ifndef CK_F_PR_BTS_PTR #define CK_F_PR_BTS_PTR CK_PR_BTX(bts, ptr, void, uintptr_t, |, void *,) #endif /* CK_F_PR_BTS_PTR */ #endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */ #if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE) #ifndef CK_F_PR_BTC_64 #define CK_F_PR_BTC_64 CK_PR_BTX_S(btc, 64, uint64_t, ^,) #endif /* CK_F_PR_BTC_64 */ #ifndef CK_F_PR_BTR_64 #define CK_F_PR_BTR_64 CK_PR_BTX_S(btr, 64, uint64_t, &, ~) #endif /* CK_F_PR_BTR_64 */ #ifndef CK_F_PR_BTS_64 #define CK_F_PR_BTS_64 CK_PR_BTX_S(bts, 64, uint64_t, |,) #endif /* CK_F_PR_BTS_64 */ #endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */ #if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE) #ifndef CK_F_PR_BTC_32 #define CK_F_PR_BTC_32 CK_PR_BTX_S(btc, 32, uint32_t, ^,) #endif /* CK_F_PR_BTC_32 */ #ifndef CK_F_PR_BTR_32 #define CK_F_PR_BTR_32 CK_PR_BTX_S(btr, 32, uint32_t, &, ~) #endif /* CK_F_PR_BTR_32 */ #ifndef CK_F_PR_BTS_32 #define CK_F_PR_BTS_32 CK_PR_BTX_S(bts, 32, uint32_t, |,) #endif /* CK_F_PR_BTS_32 */ #endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */ #if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE) #ifndef CK_F_PR_BTC_16 #define CK_F_PR_BTC_16 CK_PR_BTX_S(btc, 16, uint16_t, ^,) #endif /* CK_F_PR_BTC_16 */ #ifndef CK_F_PR_BTR_16 #define CK_F_PR_BTR_16 CK_PR_BTX_S(btr, 16, uint16_t, &, ~) #endif /* CK_F_PR_BTR_16 */ #ifndef CK_F_PR_BTS_16 #define CK_F_PR_BTS_16 CK_PR_BTX_S(bts, 16, uint16_t, |,) #endif /* CK_F_PR_BTS_16 */ #endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */ #undef CK_PR_BTX_S #undef CK_PR_BTX #define CK_PR_UNARY(K, X, S, M, T) \ CK_CC_INLINE static void \ ck_pr_##K##_##S(M *target) \ { \ ck_pr_##X##_##S(target, (T)1); \ return; \ } #define CK_PR_UNARY_Z(K, S, M, T, P, C, Z) \ CK_CC_INLINE static void \ ck_pr_##K##_##S##_zero(M *target, bool *zero) \ { \ T previous; \ C punt; \ punt = (C)ck_pr_md_load_##S(target); \ previous = (T)punt; \ while (ck_pr_cas_##S##_value(target, \ (C)previous, \ (C)(previous P 1), \ &previous) == false) \ ck_pr_stall(); \ *zero = previous == (T)Z; \ return; \ } #define CK_PR_UNARY_S(K, X, S, M) CK_PR_UNARY(K, X, S, M, M) #define CK_PR_UNARY_Z_S(K, S, M, P, Z) CK_PR_UNARY_Z(K, S, M, M, P, M, Z) #if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE) #ifndef CK_F_PR_INC_CHAR #define CK_F_PR_INC_CHAR CK_PR_UNARY_S(inc, add, char, char) #endif /* CK_F_PR_INC_CHAR */ #ifndef CK_F_PR_INC_CHAR_ZERO #define CK_F_PR_INC_CHAR_ZERO CK_PR_UNARY_Z_S(inc, char, char, +, -1) #endif /* CK_F_PR_INC_CHAR_ZERO */ #ifndef CK_F_PR_DEC_CHAR #define CK_F_PR_DEC_CHAR CK_PR_UNARY_S(dec, sub, char, char) #endif /* CK_F_PR_DEC_CHAR */ #ifndef CK_F_PR_DEC_CHAR_ZERO #define CK_F_PR_DEC_CHAR_ZERO CK_PR_UNARY_Z_S(dec, char, char, -, 1) #endif /* CK_F_PR_DEC_CHAR_ZERO */ #endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */ #if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE) #ifndef CK_F_PR_INC_INT #define CK_F_PR_INC_INT CK_PR_UNARY_S(inc, add, int, int) #endif /* CK_F_PR_INC_INT */ #ifndef CK_F_PR_INC_INT_ZERO #define CK_F_PR_INC_INT_ZERO CK_PR_UNARY_Z_S(inc, int, int, +, -1) #endif /* CK_F_PR_INC_INT_ZERO */ #ifndef CK_F_PR_DEC_INT #define CK_F_PR_DEC_INT CK_PR_UNARY_S(dec, sub, int, int) #endif /* CK_F_PR_DEC_INT */ #ifndef CK_F_PR_DEC_INT_ZERO #define CK_F_PR_DEC_INT_ZERO CK_PR_UNARY_Z_S(dec, int, int, -, 1) #endif /* CK_F_PR_DEC_INT_ZERO */ #endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */ #if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE) && \ !defined(CK_PR_DISABLE_DOUBLE) #ifndef CK_F_PR_INC_DOUBLE #define CK_F_PR_INC_DOUBLE CK_PR_UNARY_S(inc, add, double, double) #endif /* CK_F_PR_INC_DOUBLE */ #ifndef CK_F_PR_DEC_DOUBLE #define CK_F_PR_DEC_DOUBLE CK_PR_UNARY_S(dec, sub, double, double) #endif /* CK_F_PR_DEC_DOUBLE */ #endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE && !CK_PR_DISABLE_DOUBLE */ #if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE) #ifndef CK_F_PR_INC_UINT #define CK_F_PR_INC_UINT CK_PR_UNARY_S(inc, add, uint, unsigned int) #endif /* CK_F_PR_INC_UINT */ #ifndef CK_F_PR_INC_UINT_ZERO #define CK_F_PR_INC_UINT_ZERO CK_PR_UNARY_Z_S(inc, uint, unsigned int, +, UINT_MAX) #endif /* CK_F_PR_INC_UINT_ZERO */ #ifndef CK_F_PR_DEC_UINT #define CK_F_PR_DEC_UINT CK_PR_UNARY_S(dec, sub, uint, unsigned int) #endif /* CK_F_PR_DEC_UINT */ #ifndef CK_F_PR_DEC_UINT_ZERO #define CK_F_PR_DEC_UINT_ZERO CK_PR_UNARY_Z_S(dec, uint, unsigned int, -, 1) #endif /* CK_F_PR_DEC_UINT_ZERO */ #endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */ #if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE) #ifndef CK_F_PR_INC_PTR #define CK_F_PR_INC_PTR CK_PR_UNARY(inc, add, ptr, void, uintptr_t) #endif /* CK_F_PR_INC_PTR */ #ifndef CK_F_PR_INC_PTR_ZERO #define CK_F_PR_INC_PTR_ZERO CK_PR_UNARY_Z(inc, ptr, void, uintptr_t, +, void *, UINT_MAX) #endif /* CK_F_PR_INC_PTR_ZERO */ #ifndef CK_F_PR_DEC_PTR #define CK_F_PR_DEC_PTR CK_PR_UNARY(dec, sub, ptr, void, uintptr_t) #endif /* CK_F_PR_DEC_PTR */ #ifndef CK_F_PR_DEC_PTR_ZERO #define CK_F_PR_DEC_PTR_ZERO CK_PR_UNARY_Z(dec, ptr, void, uintptr_t, -, void *, 1) #endif /* CK_F_PR_DEC_PTR_ZERO */ #endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */ #if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE) #ifndef CK_F_PR_INC_64 #define CK_F_PR_INC_64 CK_PR_UNARY_S(inc, add, 64, uint64_t) #endif /* CK_F_PR_INC_64 */ #ifndef CK_F_PR_INC_64_ZERO #define CK_F_PR_INC_64_ZERO CK_PR_UNARY_Z_S(inc, 64, uint64_t, +, UINT64_MAX) #endif /* CK_F_PR_INC_64_ZERO */ #ifndef CK_F_PR_DEC_64 #define CK_F_PR_DEC_64 CK_PR_UNARY_S(dec, sub, 64, uint64_t) #endif /* CK_F_PR_DEC_64 */ #ifndef CK_F_PR_DEC_64_ZERO #define CK_F_PR_DEC_64_ZERO CK_PR_UNARY_Z_S(dec, 64, uint64_t, -, 1) #endif /* CK_F_PR_DEC_64_ZERO */ #endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */ #if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE) #ifndef CK_F_PR_INC_32 #define CK_F_PR_INC_32 CK_PR_UNARY_S(inc, add, 32, uint32_t) #endif /* CK_F_PR_INC_32 */ #ifndef CK_F_PR_INC_32_ZERO #define CK_F_PR_INC_32_ZERO CK_PR_UNARY_Z_S(inc, 32, uint32_t, +, UINT32_MAX) #endif /* CK_F_PR_INC_32_ZERO */ #ifndef CK_F_PR_DEC_32 #define CK_F_PR_DEC_32 CK_PR_UNARY_S(dec, sub, 32, uint32_t) #endif /* CK_F_PR_DEC_32 */ #ifndef CK_F_PR_DEC_32_ZERO #define CK_F_PR_DEC_32_ZERO CK_PR_UNARY_Z_S(dec, 32, uint32_t, -, 1) #endif /* CK_F_PR_DEC_32_ZERO */ #endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */ #if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE) #ifndef CK_F_PR_INC_16 #define CK_F_PR_INC_16 CK_PR_UNARY_S(inc, add, 16, uint16_t) #endif /* CK_F_PR_INC_16 */ #ifndef CK_F_PR_INC_16_ZERO #define CK_F_PR_INC_16_ZERO CK_PR_UNARY_Z_S(inc, 16, uint16_t, +, UINT16_MAX) #endif /* CK_F_PR_INC_16_ZERO */ #ifndef CK_F_PR_DEC_16 #define CK_F_PR_DEC_16 CK_PR_UNARY_S(dec, sub, 16, uint16_t) #endif /* CK_F_PR_DEC_16 */ #ifndef CK_F_PR_DEC_16_ZERO #define CK_F_PR_DEC_16_ZERO CK_PR_UNARY_Z_S(dec, 16, uint16_t, -, 1) #endif /* CK_F_PR_DEC_16_ZERO */ #endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */ #if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE) #ifndef CK_F_PR_INC_8 #define CK_F_PR_INC_8 CK_PR_UNARY_S(inc, add, 8, uint8_t) #endif /* CK_F_PR_INC_8 */ #ifndef CK_F_PR_INC_8_ZERO #define CK_F_PR_INC_8_ZERO CK_PR_UNARY_Z_S(inc, 8, uint8_t, +, UINT8_MAX) #endif /* CK_F_PR_INC_8_ZERO */ #ifndef CK_F_PR_DEC_8 #define CK_F_PR_DEC_8 CK_PR_UNARY_S(dec, sub, 8, uint8_t) #endif /* CK_F_PR_DEC_8 */ #ifndef CK_F_PR_DEC_8_ZERO #define CK_F_PR_DEC_8_ZERO CK_PR_UNARY_Z_S(dec, 8, uint8_t, -, 1) #endif /* CK_F_PR_DEC_8_ZERO */ #endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */ #undef CK_PR_UNARY_Z_S #undef CK_PR_UNARY_S #undef CK_PR_UNARY_Z #undef CK_PR_UNARY #define CK_PR_N(K, S, M, T, P, C) \ CK_CC_INLINE static void \ ck_pr_##K##_##S(M *target) \ { \ T previous; \ C punt; \ punt = (C)ck_pr_md_load_##S(target); \ previous = (T)punt; \ while (ck_pr_cas_##S##_value(target, \ (C)previous, \ (C)(P previous), \ &previous) == false) \ ck_pr_stall(); \ \ return; \ } #define CK_PR_N_Z(S, M, T, C) \ CK_CC_INLINE static void \ ck_pr_neg_##S##_zero(M *target, bool *zero) \ { \ T previous; \ C punt; \ punt = (C)ck_pr_md_load_##S(target); \ previous = (T)punt; \ while (ck_pr_cas_##S##_value(target, \ (C)previous, \ (C)(-previous), \ &previous) == false) \ ck_pr_stall(); \ \ *zero = previous == 0; \ return; \ } #define CK_PR_N_S(K, S, M, P) CK_PR_N(K, S, M, M, P, M) #define CK_PR_N_Z_S(S, M) CK_PR_N_Z(S, M, M, M) #if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE) #ifndef CK_F_PR_NOT_CHAR #define CK_F_PR_NOT_CHAR CK_PR_N_S(not, char, char, ~) #endif /* CK_F_PR_NOT_CHAR */ #ifndef CK_F_PR_NEG_CHAR #define CK_F_PR_NEG_CHAR CK_PR_N_S(neg, char, char, -) #endif /* CK_F_PR_NEG_CHAR */ #ifndef CK_F_PR_NEG_CHAR_ZERO #define CK_F_PR_NEG_CHAR_ZERO CK_PR_N_Z_S(char, char) #endif /* CK_F_PR_NEG_CHAR_ZERO */ #endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */ #if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE) #ifndef CK_F_PR_NOT_INT #define CK_F_PR_NOT_INT CK_PR_N_S(not, int, int, ~) #endif /* CK_F_PR_NOT_INT */ #ifndef CK_F_PR_NEG_INT #define CK_F_PR_NEG_INT CK_PR_N_S(neg, int, int, -) #endif /* CK_F_PR_NEG_INT */ #ifndef CK_F_PR_NEG_INT_ZERO #define CK_F_PR_NEG_INT_ZERO CK_PR_N_Z_S(int, int) #endif /* CK_F_PR_NEG_INT_ZERO */ #endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */ #if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE) && \ !defined(CK_PR_DISABLE_DOUBLE) #ifndef CK_F_PR_NEG_DOUBLE #define CK_F_PR_NEG_DOUBLE CK_PR_N_S(neg, double, double, -) #endif /* CK_F_PR_NEG_DOUBLE */ #endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE && !CK_PR_DISABLE_DOUBLE */ #if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE) #ifndef CK_F_PR_NOT_UINT #define CK_F_PR_NOT_UINT CK_PR_N_S(not, uint, unsigned int, ~) #endif /* CK_F_PR_NOT_UINT */ #ifndef CK_F_PR_NEG_UINT #define CK_F_PR_NEG_UINT CK_PR_N_S(neg, uint, unsigned int, -) #endif /* CK_F_PR_NEG_UINT */ #ifndef CK_F_PR_NEG_UINT_ZERO #define CK_F_PR_NEG_UINT_ZERO CK_PR_N_Z_S(uint, unsigned int) #endif /* CK_F_PR_NEG_UINT_ZERO */ #endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */ #if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE) #ifndef CK_F_PR_NOT_PTR #define CK_F_PR_NOT_PTR CK_PR_N(not, ptr, void, uintptr_t, ~, void *) #endif /* CK_F_PR_NOT_PTR */ #ifndef CK_F_PR_NEG_PTR #define CK_F_PR_NEG_PTR CK_PR_N(neg, ptr, void, uintptr_t, -, void *) #endif /* CK_F_PR_NEG_PTR */ #ifndef CK_F_PR_NEG_PTR_ZERO #define CK_F_PR_NEG_PTR_ZERO CK_PR_N_Z(ptr, void, uintptr_t, void *) #endif /* CK_F_PR_NEG_PTR_ZERO */ #endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */ #if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE) #ifndef CK_F_PR_NOT_64 #define CK_F_PR_NOT_64 CK_PR_N_S(not, 64, uint64_t, ~) #endif /* CK_F_PR_NOT_64 */ #ifndef CK_F_PR_NEG_64 #define CK_F_PR_NEG_64 CK_PR_N_S(neg, 64, uint64_t, -) #endif /* CK_F_PR_NEG_64 */ #ifndef CK_F_PR_NEG_64_ZERO #define CK_F_PR_NEG_64_ZERO CK_PR_N_Z_S(64, uint64_t) #endif /* CK_F_PR_NEG_64_ZERO */ #endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */ #if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE) #ifndef CK_F_PR_NOT_32 #define CK_F_PR_NOT_32 CK_PR_N_S(not, 32, uint32_t, ~) #endif /* CK_F_PR_NOT_32 */ #ifndef CK_F_PR_NEG_32 #define CK_F_PR_NEG_32 CK_PR_N_S(neg, 32, uint32_t, -) #endif /* CK_F_PR_NEG_32 */ #ifndef CK_F_PR_NEG_32_ZERO #define CK_F_PR_NEG_32_ZERO CK_PR_N_Z_S(32, uint32_t) #endif /* CK_F_PR_NEG_32_ZERO */ #endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */ #if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE) #ifndef CK_F_PR_NOT_16 #define CK_F_PR_NOT_16 CK_PR_N_S(not, 16, uint16_t, ~) #endif /* CK_F_PR_NOT_16 */ #ifndef CK_F_PR_NEG_16 #define CK_F_PR_NEG_16 CK_PR_N_S(neg, 16, uint16_t, -) #endif /* CK_F_PR_NEG_16 */ #ifndef CK_F_PR_NEG_16_ZERO #define CK_F_PR_NEG_16_ZERO CK_PR_N_Z_S(16, uint16_t) #endif /* CK_F_PR_NEG_16_ZERO */ #endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */ #if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE) #ifndef CK_F_PR_NOT_8 #define CK_F_PR_NOT_8 CK_PR_N_S(not, 8, uint8_t, ~) #endif /* CK_F_PR_NOT_8 */ #ifndef CK_F_PR_NEG_8 #define CK_F_PR_NEG_8 CK_PR_N_S(neg, 8, uint8_t, -) #endif /* CK_F_PR_NEG_8 */ #ifndef CK_F_PR_NEG_8_ZERO #define CK_F_PR_NEG_8_ZERO CK_PR_N_Z_S(8, uint8_t) #endif /* CK_F_PR_NEG_8_ZERO */ #endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */ #undef CK_PR_N_Z_S #undef CK_PR_N_S #undef CK_PR_N_Z #undef CK_PR_N #define CK_PR_FAA(S, M, T, C) \ CK_CC_INLINE static C \ ck_pr_faa_##S(M *target, T delta) \ { \ T previous; \ C punt; \ punt = (C)ck_pr_md_load_##S(target); \ previous = (T)punt; \ while (ck_pr_cas_##S##_value(target, \ (C)previous, \ (C)(previous + delta), \ &previous) == false) \ ck_pr_stall(); \ \ return ((C)previous); \ } #define CK_PR_FAS(S, M, C) \ CK_CC_INLINE static C \ ck_pr_fas_##S(M *target, C update) \ { \ C previous; \ previous = ck_pr_md_load_##S(target); \ while (ck_pr_cas_##S##_value(target, \ previous, \ update, \ &previous) == false) \ ck_pr_stall(); \ \ return (previous); \ } #define CK_PR_FAA_S(S, M) CK_PR_FAA(S, M, M, M) #define CK_PR_FAS_S(S, M) CK_PR_FAS(S, M, M) #if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE) #ifndef CK_F_PR_FAA_CHAR #define CK_F_PR_FAA_CHAR CK_PR_FAA_S(char, char) #endif /* CK_F_PR_FAA_CHAR */ #ifndef CK_F_PR_FAS_CHAR #define CK_F_PR_FAS_CHAR CK_PR_FAS_S(char, char) #endif /* CK_F_PR_FAS_CHAR */ #endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */ #if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE) #ifndef CK_F_PR_FAA_INT #define CK_F_PR_FAA_INT CK_PR_FAA_S(int, int) #endif /* CK_F_PR_FAA_INT */ #ifndef CK_F_PR_FAS_INT #define CK_F_PR_FAS_INT CK_PR_FAS_S(int, int) #endif /* CK_F_PR_FAS_INT */ #endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */ #if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE) && \ !defined(CK_PR_DISABLE_DOUBLE) #ifndef CK_F_PR_FAA_DOUBLE #define CK_F_PR_FAA_DOUBLE CK_PR_FAA_S(double, double) #endif /* CK_F_PR_FAA_DOUBLE */ #ifndef CK_F_PR_FAS_DOUBLE #define CK_F_PR_FAS_DOUBLE CK_PR_FAS_S(double, double) #endif /* CK_F_PR_FAS_DOUBLE */ #endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE && !CK_PR_DISABLE_DOUBLE */ #if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE) #ifndef CK_F_PR_FAA_UINT #define CK_F_PR_FAA_UINT CK_PR_FAA_S(uint, unsigned int) #endif /* CK_F_PR_FAA_UINT */ #ifndef CK_F_PR_FAS_UINT #define CK_F_PR_FAS_UINT CK_PR_FAS_S(uint, unsigned int) #endif /* CK_F_PR_FAS_UINT */ #endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */ #if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE) #ifndef CK_F_PR_FAA_PTR #define CK_F_PR_FAA_PTR CK_PR_FAA(ptr, void, uintptr_t, void *) #endif /* CK_F_PR_FAA_PTR */ #ifndef CK_F_PR_FAS_PTR #define CK_F_PR_FAS_PTR CK_PR_FAS(ptr, void, void *) #endif /* CK_F_PR_FAS_PTR */ #endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */ #if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE) #ifndef CK_F_PR_FAA_64 #define CK_F_PR_FAA_64 CK_PR_FAA_S(64, uint64_t) #endif /* CK_F_PR_FAA_64 */ #ifndef CK_F_PR_FAS_64 #define CK_F_PR_FAS_64 CK_PR_FAS_S(64, uint64_t) #endif /* CK_F_PR_FAS_64 */ #endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */ #if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE) #ifndef CK_F_PR_FAA_32 #define CK_F_PR_FAA_32 CK_PR_FAA_S(32, uint32_t) #endif /* CK_F_PR_FAA_32 */ #ifndef CK_F_PR_FAS_32 #define CK_F_PR_FAS_32 CK_PR_FAS_S(32, uint32_t) #endif /* CK_F_PR_FAS_32 */ #endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */ #if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE) #ifndef CK_F_PR_FAA_16 #define CK_F_PR_FAA_16 CK_PR_FAA_S(16, uint16_t) #endif /* CK_F_PR_FAA_16 */ #ifndef CK_F_PR_FAS_16 #define CK_F_PR_FAS_16 CK_PR_FAS_S(16, uint16_t) #endif /* CK_F_PR_FAS_16 */ #endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */ #if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE) #ifndef CK_F_PR_FAA_8 #define CK_F_PR_FAA_8 CK_PR_FAA_S(8, uint8_t) #endif /* CK_F_PR_FAA_8 */ #ifndef CK_F_PR_FAS_8 #define CK_F_PR_FAS_8 CK_PR_FAS_S(8, uint8_t) #endif /* CK_F_PR_FAS_8 */ #endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */ #undef CK_PR_FAA_S #undef CK_PR_FAS_S #undef CK_PR_FAA #undef CK_PR_FAS #endif /* CK_PR_H */ Index: head/sys/contrib/ck/include/ck_queue.h =================================================================== --- head/sys/contrib/ck/include/ck_queue.h (revision 331897) +++ head/sys/contrib/ck/include/ck_queue.h (revision 331898) @@ -1,428 +1,428 @@ /* * Copyright 2012-2015 Samy Al Bahra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)queue.h 8.5 (Berkeley) 8/20/94 * $FreeBSD$ */ #ifndef CK_QUEUE_H #define CK_QUEUE_H #include /* * This file defines three types of data structures: singly-linked lists, * singly-linked tail queues and lists. * * A singly-linked list is headed by a single forward pointer. The elements * are singly linked for minimum space and pointer manipulation overhead at * the expense of O(n) removal for arbitrary elements. New elements can be * added to the list after an existing element or at the head of the list. * Elements being removed from the head of the list should use the explicit * macro for this purpose for optimum efficiency. A singly-linked list may * only be traversed in the forward direction. Singly-linked lists are ideal * for applications with large datasets and few or no removals or for * implementing a LIFO queue. * * A singly-linked tail queue is headed by a pair of pointers, one to the * head of the list and the other to the tail of the list. The elements are * singly linked for minimum space and pointer manipulation overhead at the * expense of O(n) removal for arbitrary elements. New elements can be added * to the list after an existing element, at the head of the list, or at the * end of the list. Elements being removed from the head of the tail queue * should use the explicit macro for this purpose for optimum efficiency. * A singly-linked tail queue may only be traversed in the forward direction. * Singly-linked tail queues are ideal for applications with large datasets * and few or no removals or for implementing a FIFO queue. * * A list is headed by a single forward pointer (or an array of forward * pointers for a hash table header). The elements are doubly linked * so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before * or after an existing element or at the head of the list. A list * may only be traversed in the forward direction. * * It is safe to use _FOREACH/_FOREACH_SAFE in the presence of concurrent * modifications to the list. Writers to these lists must, on the other hand, * implement writer-side synchronization. The _SWAP operations are not atomic. * This facility is currently unsupported on architectures such as the Alpha * which require load-depend memory fences. * * CK_SLIST CK_LIST CK_STAILQ * _HEAD + + + * _HEAD_INITIALIZER + + + * _ENTRY + + + * _INIT + + + * _EMPTY + + + * _FIRST + + + * _NEXT + + + * _FOREACH + + + * _FOREACH_SAFE + + + * _INSERT_HEAD + + + * _INSERT_BEFORE - + - * _INSERT_AFTER + + + * _INSERT_TAIL - - + * _REMOVE_AFTER + - + * _REMOVE_HEAD + - + * _REMOVE + + + * _SWAP + + + * _MOVE + + + */ /* * Singly-linked List declarations. */ #define CK_SLIST_HEAD(name, type) \ struct name { \ struct type *slh_first; /* first element */ \ } #define CK_SLIST_HEAD_INITIALIZER(head) \ { NULL } #define CK_SLIST_ENTRY(type) \ struct { \ struct type *sle_next; /* next element */ \ } /* * Singly-linked List functions. */ #define CK_SLIST_EMPTY(head) \ (ck_pr_load_ptr(&(head)->slh_first) == NULL) #define CK_SLIST_FIRST(head) \ (ck_pr_load_ptr(&(head)->slh_first)) #define CK_SLIST_NEXT(elm, field) \ ck_pr_load_ptr(&((elm)->field.sle_next)) #define CK_SLIST_FOREACH(var, head, field) \ for ((var) = CK_SLIST_FIRST((head)); \ (var) && (ck_pr_fence_load(), 1); \ (var) = CK_SLIST_NEXT((var), field)) #define CK_SLIST_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = CK_SLIST_FIRST(head); \ (var) && (ck_pr_fence_load(), (tvar) = CK_SLIST_NEXT(var, field), 1);\ (var) = (tvar)) #define CK_SLIST_FOREACH_PREVPTR(var, varp, head, field) \ for ((varp) = &(head)->slh_first; \ ((var) = ck_pr_load_ptr(varp)) != NULL && (ck_pr_fence_load(), 1); \ (varp) = &(var)->field.sle_next) #define CK_SLIST_INIT(head) do { \ ck_pr_store_ptr(&(head)->slh_first, NULL); \ ck_pr_fence_store(); \ } while (0) #define CK_SLIST_INSERT_AFTER(a, b, field) do { \ (b)->field.sle_next = (a)->field.sle_next; \ ck_pr_fence_store(); \ ck_pr_store_ptr(&(a)->field.sle_next, b); \ } while (0) #define CK_SLIST_INSERT_HEAD(head, elm, field) do { \ (elm)->field.sle_next = (head)->slh_first; \ ck_pr_fence_store(); \ ck_pr_store_ptr(&(head)->slh_first, elm); \ } while (0) #define CK_SLIST_REMOVE_AFTER(elm, field) do { \ ck_pr_store_ptr(&(elm)->field.sle_next, \ (elm)->field.sle_next->field.sle_next); \ } while (0) #define CK_SLIST_REMOVE(head, elm, type, field) do { \ if ((head)->slh_first == (elm)) { \ CK_SLIST_REMOVE_HEAD((head), field); \ } else { \ struct type *curelm = (head)->slh_first; \ while (curelm->field.sle_next != (elm)) \ curelm = curelm->field.sle_next; \ CK_SLIST_REMOVE_AFTER(curelm, field); \ } \ } while (0) #define CK_SLIST_REMOVE_HEAD(head, field) do { \ ck_pr_store_ptr(&(head)->slh_first, \ (head)->slh_first->field.sle_next); \ } while (0) #define CK_SLIST_MOVE(head1, head2, field) do { \ ck_pr_store_ptr(&(head1)->slh_first, (head2)->slh_first); \ } while (0) /* * This operation is not applied atomically. */ #define CK_SLIST_SWAP(a, b, type) do { \ struct type *swap_first = (a)->slh_first; \ (a)->slh_first = (b)->slh_first; \ (b)->slh_first = swap_first; \ } while (0) /* * Singly-linked Tail queue declarations. */ #define CK_STAILQ_HEAD(name, type) \ struct name { \ struct type *stqh_first;/* first element */ \ struct type **stqh_last;/* addr of last next element */ \ } #define CK_STAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).stqh_first } #define CK_STAILQ_ENTRY(type) \ struct { \ struct type *stqe_next; /* next element */ \ } /* * Singly-linked Tail queue functions. */ #define CK_STAILQ_CONCAT(head1, head2) do { \ - if ((head2)->stqh_first == NULL) { \ + if ((head2)->stqh_first != NULL) { \ ck_pr_store_ptr((head1)->stqh_last, (head2)->stqh_first); \ ck_pr_fence_store(); \ (head1)->stqh_last = (head2)->stqh_last; \ CK_STAILQ_INIT((head2)); \ } \ } while (0) #define CK_STAILQ_EMPTY(head) (ck_pr_load_ptr(&(head)->stqh_first) == NULL) #define CK_STAILQ_FIRST(head) (ck_pr_load_ptr(&(head)->stqh_first)) #define CK_STAILQ_FOREACH(var, head, field) \ for((var) = CK_STAILQ_FIRST((head)); \ (var) && (ck_pr_fence_load(), 1); \ (var) = CK_STAILQ_NEXT((var), field)) #define CK_STAILQ_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = CK_STAILQ_FIRST((head)); \ (var) && (ck_pr_fence_load(), (tvar) = \ CK_STAILQ_NEXT((var), field), 1); \ (var) = (tvar)) #define CK_STAILQ_INIT(head) do { \ ck_pr_store_ptr(&(head)->stqh_first, NULL); \ ck_pr_fence_store(); \ (head)->stqh_last = &(head)->stqh_first; \ } while (0) #define CK_STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ (elm)->field.stqe_next = (tqelm)->field.stqe_next; \ ck_pr_fence_store(); \ ck_pr_store_ptr(&(tqelm)->field.stqe_next, elm); \ if ((elm)->field.stqe_next == NULL) \ (head)->stqh_last = &(elm)->field.stqe_next; \ } while (0) #define CK_STAILQ_INSERT_HEAD(head, elm, field) do { \ (elm)->field.stqe_next = (head)->stqh_first; \ ck_pr_fence_store(); \ ck_pr_store_ptr(&(head)->stqh_first, elm); \ if ((elm)->field.stqe_next == NULL) \ (head)->stqh_last = &(elm)->field.stqe_next; \ } while (0) #define CK_STAILQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.stqe_next = NULL; \ ck_pr_fence_store(); \ ck_pr_store_ptr((head)->stqh_last, (elm)); \ (head)->stqh_last = &(elm)->field.stqe_next; \ } while (0) #define CK_STAILQ_NEXT(elm, field) \ (ck_pr_load_ptr(&(elm)->field.stqe_next)) #define CK_STAILQ_REMOVE(head, elm, type, field) do { \ if ((head)->stqh_first == (elm)) { \ CK_STAILQ_REMOVE_HEAD((head), field); \ } else { \ struct type *curelm = (head)->stqh_first; \ while (curelm->field.stqe_next != (elm)) \ curelm = curelm->field.stqe_next; \ CK_STAILQ_REMOVE_AFTER(head, curelm, field); \ } \ } while (0) #define CK_STAILQ_REMOVE_AFTER(head, elm, field) do { \ ck_pr_store_ptr(&(elm)->field.stqe_next, \ (elm)->field.stqe_next->field.stqe_next); \ if ((elm)->field.stqe_next == NULL) \ (head)->stqh_last = &(elm)->field.stqe_next; \ } while (0) #define CK_STAILQ_REMOVE_HEAD(head, field) do { \ ck_pr_store_ptr(&(head)->stqh_first, \ (head)->stqh_first->field.stqe_next); \ if ((head)->stqh_first == NULL) \ (head)->stqh_last = &(head)->stqh_first; \ } while (0) #define CK_STAILQ_MOVE(head1, head2, field) do { \ ck_pr_store_ptr(&(head1)->stqh_first, (head2)->stqh_first); \ (head1)->stqh_last = (head2)->stqh_last; \ if ((head2)->stqh_last == &(head2)->stqh_first) \ (head1)->stqh_last = &(head1)->stqh_first; \ } while (0) /* * This operation is not applied atomically. */ #define CK_STAILQ_SWAP(head1, head2, type) do { \ struct type *swap_first = CK_STAILQ_FIRST(head1); \ struct type **swap_last = (head1)->stqh_last; \ CK_STAILQ_FIRST(head1) = CK_STAILQ_FIRST(head2); \ (head1)->stqh_last = (head2)->stqh_last; \ CK_STAILQ_FIRST(head2) = swap_first; \ (head2)->stqh_last = swap_last; \ if (CK_STAILQ_EMPTY(head1)) \ (head1)->stqh_last = &(head1)->stqh_first; \ if (CK_STAILQ_EMPTY(head2)) \ (head2)->stqh_last = &(head2)->stqh_first; \ } while (0) /* * List declarations. */ #define CK_LIST_HEAD(name, type) \ struct name { \ struct type *lh_first; /* first element */ \ } #define CK_LIST_HEAD_INITIALIZER(head) \ { NULL } #define CK_LIST_ENTRY(type) \ struct { \ struct type *le_next; /* next element */ \ struct type **le_prev; /* address of previous next element */ \ } #define CK_LIST_FIRST(head) ck_pr_load_ptr(&(head)->lh_first) #define CK_LIST_EMPTY(head) (CK_LIST_FIRST(head) == NULL) #define CK_LIST_NEXT(elm, field) ck_pr_load_ptr(&(elm)->field.le_next) #define CK_LIST_FOREACH(var, head, field) \ for ((var) = CK_LIST_FIRST((head)); \ (var) && (ck_pr_fence_load(), 1); \ (var) = CK_LIST_NEXT((var), field)) #define CK_LIST_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = CK_LIST_FIRST((head)); \ (var) && (ck_pr_fence_load(), (tvar) = CK_LIST_NEXT((var), field), 1);\ (var) = (tvar)) #define CK_LIST_INIT(head) do { \ ck_pr_store_ptr(&(head)->lh_first, NULL); \ ck_pr_fence_store(); \ } while (0) #define CK_LIST_INSERT_AFTER(listelm, elm, field) do { \ (elm)->field.le_next = (listelm)->field.le_next; \ (elm)->field.le_prev = &(listelm)->field.le_next; \ ck_pr_fence_store(); \ if ((listelm)->field.le_next != NULL) \ (listelm)->field.le_next->field.le_prev = &(elm)->field.le_next;\ ck_pr_store_ptr(&(listelm)->field.le_next, elm); \ } while (0) #define CK_LIST_INSERT_BEFORE(listelm, elm, field) do { \ (elm)->field.le_prev = (listelm)->field.le_prev; \ (elm)->field.le_next = (listelm); \ ck_pr_fence_store(); \ ck_pr_store_ptr((listelm)->field.le_prev, (elm)); \ (listelm)->field.le_prev = &(elm)->field.le_next; \ } while (0) #define CK_LIST_INSERT_HEAD(head, elm, field) do { \ (elm)->field.le_next = (head)->lh_first; \ ck_pr_fence_store(); \ if ((elm)->field.le_next != NULL) \ (head)->lh_first->field.le_prev = &(elm)->field.le_next; \ ck_pr_store_ptr(&(head)->lh_first, elm); \ (elm)->field.le_prev = &(head)->lh_first; \ } while (0) #define CK_LIST_REMOVE(elm, field) do { \ ck_pr_store_ptr((elm)->field.le_prev, (elm)->field.le_next); \ if ((elm)->field.le_next != NULL) \ (elm)->field.le_next->field.le_prev = (elm)->field.le_prev; \ } while (0) #define CK_LIST_MOVE(head1, head2, field) do { \ ck_pr_store_ptr(&(head1)->lh_first, (head2)->lh_first); \ if ((head1)->lh_first != NULL) \ (head1)->lh_first->field.le_prev = &(head1)->lh_first; \ } while (0) /* * This operation is not applied atomically. */ #define CK_LIST_SWAP(head1, head2, type, field) do { \ struct type *swap_tmp = (head1)->lh_first; \ (head1)->lh_first = (head2)->lh_first; \ (head2)->lh_first = swap_tmp; \ if ((swap_tmp = (head1)->lh_first) != NULL) \ swap_tmp->field.le_prev = &(head1)->lh_first; \ if ((swap_tmp = (head2)->lh_first) != NULL) \ swap_tmp->field.le_prev = &(head2)->lh_first; \ } while (0) #endif /* CK_QUEUE_H */ Index: head/sys/contrib/ck/include/ck_ring.h =================================================================== --- head/sys/contrib/ck/include/ck_ring.h (revision 331897) +++ head/sys/contrib/ck/include/ck_ring.h (revision 331898) @@ -1,656 +1,687 @@ /* * Copyright 2009-2015 Samy Al Bahra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef CK_RING_H #define CK_RING_H #include #include #include #include #include /* * Concurrent ring buffer. */ struct ck_ring { unsigned int c_head; char pad[CK_MD_CACHELINE - sizeof(unsigned int)]; unsigned int p_tail; unsigned int p_head; char _pad[CK_MD_CACHELINE - sizeof(unsigned int) * 2]; unsigned int size; unsigned int mask; }; typedef struct ck_ring ck_ring_t; struct ck_ring_buffer { void *value; }; typedef struct ck_ring_buffer ck_ring_buffer_t; CK_CC_INLINE static unsigned int ck_ring_size(const struct ck_ring *ring) { unsigned int c, p; c = ck_pr_load_uint(&ring->c_head); p = ck_pr_load_uint(&ring->p_tail); return (p - c) & ring->mask; } CK_CC_INLINE static unsigned int ck_ring_capacity(const struct ck_ring *ring) { return ring->size; } CK_CC_INLINE static void ck_ring_init(struct ck_ring *ring, unsigned int size) { ring->size = size; ring->mask = size - 1; ring->p_tail = 0; ring->p_head = 0; ring->c_head = 0; return; } /* * The _ck_ring_* namespace is internal only and must not used externally. */ CK_CC_FORCE_INLINE static bool _ck_ring_enqueue_sp(struct ck_ring *ring, void *CK_CC_RESTRICT buffer, const void *CK_CC_RESTRICT entry, unsigned int ts, unsigned int *size) { const unsigned int mask = ring->mask; unsigned int consumer, producer, delta; consumer = ck_pr_load_uint(&ring->c_head); producer = ring->p_tail; delta = producer + 1; if (size != NULL) *size = (producer - consumer) & mask; if (CK_CC_UNLIKELY((delta & mask) == (consumer & mask))) return false; buffer = (char *)buffer + ts * (producer & mask); memcpy(buffer, entry, ts); /* * Make sure to update slot value before indicating * that the slot is available for consumption. */ ck_pr_fence_store(); ck_pr_store_uint(&ring->p_tail, delta); return true; } CK_CC_FORCE_INLINE static bool _ck_ring_enqueue_sp_size(struct ck_ring *ring, void *CK_CC_RESTRICT buffer, const void *CK_CC_RESTRICT entry, unsigned int ts, unsigned int *size) { unsigned int sz; bool r; r = _ck_ring_enqueue_sp(ring, buffer, entry, ts, &sz); *size = sz; return r; } CK_CC_FORCE_INLINE static bool _ck_ring_dequeue_sc(struct ck_ring *ring, const void *CK_CC_RESTRICT buffer, void *CK_CC_RESTRICT target, unsigned int size) { const unsigned int mask = ring->mask; unsigned int consumer, producer; consumer = ring->c_head; producer = ck_pr_load_uint(&ring->p_tail); if (CK_CC_UNLIKELY(consumer == producer)) return false; /* * Make sure to serialize with respect to our snapshot * of the producer counter. */ ck_pr_fence_load(); buffer = (const char *)buffer + size * (consumer & mask); memcpy(target, buffer, size); /* * Make sure copy is completed with respect to consumer * update. */ ck_pr_fence_store(); ck_pr_store_uint(&ring->c_head, consumer + 1); return true; } CK_CC_FORCE_INLINE static bool _ck_ring_enqueue_mp(struct ck_ring *ring, void *buffer, const void *entry, unsigned int ts, unsigned int *size) { const unsigned int mask = ring->mask; unsigned int producer, consumer, delta; bool r = true; producer = ck_pr_load_uint(&ring->p_head); - do { + for (;;) { /* - * The snapshot of producer must be up to date with - * respect to consumer. + * The snapshot of producer must be up to date with respect to + * consumer. */ ck_pr_fence_load(); consumer = ck_pr_load_uint(&ring->c_head); delta = producer + 1; - if (CK_CC_UNLIKELY((delta & mask) == (consumer & mask))) { - r = false; - goto leave; + + /* + * Only try to CAS if the producer is not clearly stale (not + * less than consumer) and the buffer is definitely not full. + */ + if (CK_CC_LIKELY((producer - consumer) < mask)) { + if (ck_pr_cas_uint_value(&ring->p_head, + producer, delta, &producer) == true) { + break; + } + } else { + unsigned int new_producer; + + /* + * Slow path. Either the buffer is full or we have a + * stale snapshot of p_head. Execute a second read of + * p_read that must be ordered wrt the snapshot of + * c_head. + */ + ck_pr_fence_load(); + new_producer = ck_pr_load_uint(&ring->p_head); + + /* + * Only fail if we haven't made forward progress in + * production: the buffer must have been full when we + * read new_producer (or we wrapped around UINT_MAX + * during this iteration). + */ + if (producer == new_producer) { + r = false; + goto leave; + } + + /* + * p_head advanced during this iteration. Try again. + */ + producer = new_producer; } - } while (ck_pr_cas_uint_value(&ring->p_head, - producer, - delta, - &producer) == false); + } buffer = (char *)buffer + ts * (producer & mask); memcpy(buffer, entry, ts); /* * Wait until all concurrent producers have completed writing * their data into the ring buffer. */ while (ck_pr_load_uint(&ring->p_tail) != producer) ck_pr_stall(); /* * Ensure that copy is completed before updating shared producer * counter. */ ck_pr_fence_store(); ck_pr_store_uint(&ring->p_tail, delta); leave: if (size != NULL) *size = (producer - consumer) & mask; return r; } CK_CC_FORCE_INLINE static bool _ck_ring_enqueue_mp_size(struct ck_ring *ring, void *buffer, const void *entry, unsigned int ts, unsigned int *size) { unsigned int sz; bool r; r = _ck_ring_enqueue_mp(ring, buffer, entry, ts, &sz); *size = sz; return r; } CK_CC_FORCE_INLINE static bool _ck_ring_trydequeue_mc(struct ck_ring *ring, const void *buffer, void *data, unsigned int size) { const unsigned int mask = ring->mask; unsigned int consumer, producer; consumer = ck_pr_load_uint(&ring->c_head); ck_pr_fence_load(); producer = ck_pr_load_uint(&ring->p_tail); if (CK_CC_UNLIKELY(consumer == producer)) return false; ck_pr_fence_load(); buffer = (const char *)buffer + size * (consumer & mask); memcpy(data, buffer, size); ck_pr_fence_store_atomic(); return ck_pr_cas_uint(&ring->c_head, consumer, consumer + 1); } CK_CC_FORCE_INLINE static bool _ck_ring_dequeue_mc(struct ck_ring *ring, const void *buffer, void *data, unsigned int ts) { const unsigned int mask = ring->mask; unsigned int consumer, producer; consumer = ck_pr_load_uint(&ring->c_head); do { const char *target; /* * Producer counter must represent state relative to * our latest consumer snapshot. */ ck_pr_fence_load(); producer = ck_pr_load_uint(&ring->p_tail); if (CK_CC_UNLIKELY(consumer == producer)) return false; ck_pr_fence_load(); target = (const char *)buffer + ts * (consumer & mask); memcpy(data, target, ts); /* Serialize load with respect to head update. */ ck_pr_fence_store_atomic(); } while (ck_pr_cas_uint_value(&ring->c_head, consumer, consumer + 1, &consumer) == false); return true; } /* * The ck_ring_*_spsc namespace is the public interface for interacting with a * ring buffer containing pointers. Correctness is only provided if there is up * to one concurrent consumer and up to one concurrent producer. */ CK_CC_INLINE static bool ck_ring_enqueue_spsc_size(struct ck_ring *ring, struct ck_ring_buffer *buffer, const void *entry, unsigned int *size) { return _ck_ring_enqueue_sp_size(ring, buffer, &entry, sizeof(entry), size); } CK_CC_INLINE static bool ck_ring_enqueue_spsc(struct ck_ring *ring, struct ck_ring_buffer *buffer, const void *entry) { return _ck_ring_enqueue_sp(ring, buffer, &entry, sizeof(entry), NULL); } CK_CC_INLINE static bool ck_ring_dequeue_spsc(struct ck_ring *ring, const struct ck_ring_buffer *buffer, void *data) { return _ck_ring_dequeue_sc(ring, buffer, (void **)data, sizeof(void *)); } /* * The ck_ring_*_mpmc namespace is the public interface for interacting with a * ring buffer containing pointers. Correctness is provided for any number of * producers and consumers. */ CK_CC_INLINE static bool ck_ring_enqueue_mpmc(struct ck_ring *ring, struct ck_ring_buffer *buffer, const void *entry) { return _ck_ring_enqueue_mp(ring, buffer, &entry, sizeof(entry), NULL); } CK_CC_INLINE static bool ck_ring_enqueue_mpmc_size(struct ck_ring *ring, struct ck_ring_buffer *buffer, const void *entry, unsigned int *size) { return _ck_ring_enqueue_mp_size(ring, buffer, &entry, sizeof(entry), size); } CK_CC_INLINE static bool ck_ring_trydequeue_mpmc(struct ck_ring *ring, const struct ck_ring_buffer *buffer, void *data) { return _ck_ring_trydequeue_mc(ring, buffer, (void **)data, sizeof(void *)); } CK_CC_INLINE static bool ck_ring_dequeue_mpmc(struct ck_ring *ring, const struct ck_ring_buffer *buffer, void *data) { return _ck_ring_dequeue_mc(ring, buffer, (void **)data, sizeof(void *)); } /* * The ck_ring_*_spmc namespace is the public interface for interacting with a * ring buffer containing pointers. Correctness is provided for any number of * consumers with up to one concurrent producer. */ CK_CC_INLINE static bool ck_ring_enqueue_spmc_size(struct ck_ring *ring, struct ck_ring_buffer *buffer, const void *entry, unsigned int *size) { return _ck_ring_enqueue_sp_size(ring, buffer, &entry, sizeof(entry), size); } CK_CC_INLINE static bool ck_ring_enqueue_spmc(struct ck_ring *ring, struct ck_ring_buffer *buffer, const void *entry) { return _ck_ring_enqueue_sp(ring, buffer, &entry, sizeof(entry), NULL); } CK_CC_INLINE static bool ck_ring_trydequeue_spmc(struct ck_ring *ring, const struct ck_ring_buffer *buffer, void *data) { return _ck_ring_trydequeue_mc(ring, buffer, (void **)data, sizeof(void *)); } CK_CC_INLINE static bool ck_ring_dequeue_spmc(struct ck_ring *ring, const struct ck_ring_buffer *buffer, void *data) { return _ck_ring_dequeue_mc(ring, buffer, (void **)data, sizeof(void *)); } /* * The ck_ring_*_mpsc namespace is the public interface for interacting with a * ring buffer containing pointers. Correctness is provided for any number of * producers with up to one concurrent consumers. */ CK_CC_INLINE static bool ck_ring_enqueue_mpsc(struct ck_ring *ring, struct ck_ring_buffer *buffer, const void *entry) { return _ck_ring_enqueue_mp(ring, buffer, &entry, sizeof(entry), NULL); } CK_CC_INLINE static bool ck_ring_enqueue_mpsc_size(struct ck_ring *ring, struct ck_ring_buffer *buffer, const void *entry, unsigned int *size) { return _ck_ring_enqueue_mp_size(ring, buffer, &entry, sizeof(entry), size); } CK_CC_INLINE static bool ck_ring_dequeue_mpsc(struct ck_ring *ring, const struct ck_ring_buffer *buffer, void *data) { return _ck_ring_dequeue_sc(ring, buffer, (void **)data, sizeof(void *)); } /* * CK_RING_PROTOTYPE is used to define a type-safe interface for inlining * values of a particular type in the ring the buffer. */ #define CK_RING_PROTOTYPE(name, type) \ CK_CC_INLINE static bool \ ck_ring_enqueue_spsc_size_##name(struct ck_ring *a, \ struct type *b, \ struct type *c, \ unsigned int *d) \ { \ \ return _ck_ring_enqueue_sp_size(a, b, c, \ sizeof(struct type), d); \ } \ \ CK_CC_INLINE static bool \ ck_ring_enqueue_spsc_##name(struct ck_ring *a, \ struct type *b, \ struct type *c) \ { \ \ return _ck_ring_enqueue_sp(a, b, c, \ sizeof(struct type), NULL); \ } \ \ CK_CC_INLINE static bool \ ck_ring_dequeue_spsc_##name(struct ck_ring *a, \ struct type *b, \ struct type *c) \ { \ \ return _ck_ring_dequeue_sc(a, b, c, \ sizeof(struct type)); \ } \ \ CK_CC_INLINE static bool \ ck_ring_enqueue_spmc_size_##name(struct ck_ring *a, \ struct type *b, \ struct type *c, \ unsigned int *d) \ { \ \ return _ck_ring_enqueue_sp_size(a, b, c, \ sizeof(struct type), d); \ } \ \ CK_CC_INLINE static bool \ ck_ring_enqueue_spmc_##name(struct ck_ring *a, \ struct type *b, \ struct type *c) \ { \ \ return _ck_ring_enqueue_sp(a, b, c, \ sizeof(struct type), NULL); \ } \ \ CK_CC_INLINE static bool \ ck_ring_trydequeue_spmc_##name(struct ck_ring *a, \ struct type *b, \ struct type *c) \ { \ \ return _ck_ring_trydequeue_mc(a, \ b, c, sizeof(struct type)); \ } \ \ CK_CC_INLINE static bool \ ck_ring_dequeue_spmc_##name(struct ck_ring *a, \ struct type *b, \ struct type *c) \ { \ \ return _ck_ring_dequeue_mc(a, b, c, \ sizeof(struct type)); \ } \ \ CK_CC_INLINE static bool \ ck_ring_enqueue_mpsc_##name(struct ck_ring *a, \ struct type *b, \ struct type *c) \ { \ \ return _ck_ring_enqueue_mp(a, b, c, \ sizeof(struct type), NULL); \ } \ \ CK_CC_INLINE static bool \ ck_ring_enqueue_mpsc_size_##name(struct ck_ring *a, \ struct type *b, \ struct type *c, \ unsigned int *d) \ { \ \ return _ck_ring_enqueue_mp_size(a, b, c, \ sizeof(struct type), d); \ } \ \ CK_CC_INLINE static bool \ ck_ring_dequeue_mpsc_##name(struct ck_ring *a, \ struct type *b, \ struct type *c) \ { \ \ return _ck_ring_dequeue_sc(a, b, c, \ sizeof(struct type)); \ } \ \ CK_CC_INLINE static bool \ ck_ring_enqueue_mpmc_size_##name(struct ck_ring *a, \ struct type *b, \ struct type *c, \ unsigned int *d) \ { \ \ return _ck_ring_enqueue_mp_size(a, b, c, \ sizeof(struct type), d); \ } \ \ CK_CC_INLINE static bool \ ck_ring_enqueue_mpmc_##name(struct ck_ring *a, \ struct type *b, \ struct type *c) \ { \ \ return _ck_ring_enqueue_mp(a, b, c, \ sizeof(struct type), NULL); \ } \ \ CK_CC_INLINE static bool \ ck_ring_trydequeue_mpmc_##name(struct ck_ring *a, \ struct type *b, \ struct type *c) \ { \ \ return _ck_ring_trydequeue_mc(a, \ b, c, sizeof(struct type)); \ } \ \ CK_CC_INLINE static bool \ ck_ring_dequeue_mpmc_##name(struct ck_ring *a, \ struct type *b, \ struct type *c) \ { \ \ return _ck_ring_dequeue_mc(a, b, c, \ sizeof(struct type)); \ } /* * A single producer with one concurrent consumer. */ #define CK_RING_ENQUEUE_SPSC(name, a, b, c) \ ck_ring_enqueue_spsc_##name(a, b, c) #define CK_RING_ENQUEUE_SPSC_SIZE(name, a, b, c, d) \ ck_ring_enqueue_spsc_size_##name(a, b, c, d) #define CK_RING_DEQUEUE_SPSC(name, a, b, c) \ ck_ring_dequeue_spsc_##name(a, b, c) /* * A single producer with any number of concurrent consumers. */ #define CK_RING_ENQUEUE_SPMC(name, a, b, c) \ ck_ring_enqueue_spmc_##name(a, b, c) #define CK_RING_ENQUEUE_SPMC_SIZE(name, a, b, c, d) \ ck_ring_enqueue_spmc_size_##name(a, b, c, d) #define CK_RING_TRYDEQUEUE_SPMC(name, a, b, c) \ ck_ring_trydequeue_spmc_##name(a, b, c) #define CK_RING_DEQUEUE_SPMC(name, a, b, c) \ ck_ring_dequeue_spmc_##name(a, b, c) /* * Any number of concurrent producers with up to one * concurrent consumer. */ #define CK_RING_ENQUEUE_MPSC(name, a, b, c) \ ck_ring_enqueue_mpsc_##name(a, b, c) #define CK_RING_ENQUEUE_MPSC_SIZE(name, a, b, c, d) \ ck_ring_enqueue_mpsc_size_##name(a, b, c, d) #define CK_RING_DEQUEUE_MPSC(name, a, b, c) \ ck_ring_dequeue_mpsc_##name(a, b, c) /* * Any number of concurrent producers and consumers. */ #define CK_RING_ENQUEUE_MPMC(name, a, b, c) \ ck_ring_enqueue_mpmc_##name(a, b, c) #define CK_RING_ENQUEUE_MPMC_SIZE(name, a, b, c, d) \ ck_ring_enqueue_mpmc_size_##name(a, b, c, d) #define CK_RING_TRYDEQUEUE_MPMC(name, a, b, c) \ ck_ring_trydequeue_mpmc_##name(a, b, c) #define CK_RING_DEQUEUE_MPMC(name, a, b, c) \ ck_ring_dequeue_mpmc_##name(a, b, c) #endif /* CK_RING_H */ Index: head/sys/contrib/ck/include/gcc/ck_cc.h =================================================================== --- head/sys/contrib/ck/include/gcc/ck_cc.h (revision 331897) +++ head/sys/contrib/ck/include/gcc/ck_cc.h (revision 331898) @@ -1,142 +1,141 @@ /* * Copyright 2009-2015 Samy Al Bahra. * Copyright 2014 Paul Khuong. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef CK_GCC_CC_H #define CK_GCC_CC_H #include #ifdef __SUNPRO_C #define CK_CC_UNUSED #define CK_CC_USED #define CK_CC_IMM #define CK_CC_IMM_U32 #else #define CK_CC_UNUSED __attribute__((unused)) #define CK_CC_USED __attribute__((used)) #define CK_CC_IMM "i" #if defined(__x86_64__) || defined(__x86__) #define CK_CC_IMM_U32 "Z" #define CK_CC_IMM_S32 "e" #else #define CK_CC_IMM_U32 CK_CC_IMM #define CK_CC_IMM_S32 CK_CC_IMM #endif /* __x86_64__ || __x86__ */ #endif #ifdef __OPTIMIZE__ #define CK_CC_INLINE CK_CC_UNUSED inline #else #define CK_CC_INLINE CK_CC_UNUSED #endif #define CK_CC_FORCE_INLINE CK_CC_UNUSED __attribute__((always_inline)) inline #define CK_CC_RESTRICT __restrict__ /* * Packed attribute. */ #define CK_CC_PACKED __attribute__((packed)) /* * Weak reference. */ #define CK_CC_WEAKREF __attribute__((weakref)) /* * Alignment attribute. */ #define CK_CC_ALIGN(B) __attribute__((aligned(B))) /* * Cache align. */ #define CK_CC_CACHELINE CK_CC_ALIGN(CK_MD_CACHELINE) /* * These are functions which should be avoided. */ #ifdef __freestanding__ #pragma GCC poison malloc free #endif /* * Branch execution hints. */ #define CK_CC_LIKELY(x) (__builtin_expect(!!(x), 1)) #define CK_CC_UNLIKELY(x) (__builtin_expect(!!(x), 0)) /* * Some compilers are overly strict regarding aliasing semantics. * Unfortunately, in many cases it makes more sense to pay aliasing * cost rather than overly expensive register spillage. */ #define CK_CC_ALIASED __attribute__((__may_alias__)) /* * Compile-time typeof */ #define CK_CC_TYPEOF(X, DEFAULT) __typeof__(X) /* - * Portability wrappers for bitwise ops. + * Portability wrappers for bitwise operations. */ - +#ifndef CK_MD_CC_BUILTIN_DISABLE #define CK_F_CC_FFS -#define CK_F_CC_CLZ -#define CK_F_CC_CTZ -#define CK_F_CC_POPCOUNT - CK_CC_INLINE static int ck_cc_ffs(unsigned int x) { - return __builtin_ffs(x); + return __builtin_ffsl(x); } +#define CK_F_CC_FFSL CK_CC_INLINE static int -ck_cc_clz(unsigned int x) +ck_cc_ffsl(unsigned long x) { - return __builtin_clz(x); + return __builtin_ffsll(x); } +#define CK_F_CC_CTZ CK_CC_INLINE static int ck_cc_ctz(unsigned int x) { return __builtin_ctz(x); } +#define CK_F_CC_POPCOUNT CK_CC_INLINE static int ck_cc_popcount(unsigned int x) { return __builtin_popcount(x); } - +#endif /* CK_MD_CC_BUILTIN_DISABLE */ #endif /* CK_GCC_CC_H */ Index: head/sys/contrib/ck/include/gcc/ck_pr.h =================================================================== --- head/sys/contrib/ck/include/gcc/ck_pr.h (revision 331897) +++ head/sys/contrib/ck/include/gcc/ck_pr.h (revision 331898) @@ -1,297 +1,297 @@ /* * Copyright 2010 Samy Al Bahra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef CK_PR_GCC_H #define CK_PR_GCC_H #ifndef CK_PR_H #error Do not include this file directly, use ck_pr.h #endif #include CK_CC_INLINE static void ck_pr_barrier(void) { __asm__ __volatile__("" ::: "memory"); return; } #ifndef CK_F_PR #define CK_F_PR #include #include /* * The following represent supported atomic operations. * These operations may be emulated. */ #include "ck_f_pr.h" #define CK_PR_ACCESS(x) (*(volatile __typeof__(x) *)&(x)) #define CK_PR_LOAD(S, M, T) \ CK_CC_INLINE static T \ ck_pr_md_load_##S(const M *target) \ { \ T r; \ ck_pr_barrier(); \ r = CK_PR_ACCESS(*(const T *)target); \ ck_pr_barrier(); \ return (r); \ } \ CK_CC_INLINE static void \ ck_pr_md_store_##S(M *target, T v) \ { \ ck_pr_barrier(); \ CK_PR_ACCESS(*(T *)target) = v; \ ck_pr_barrier(); \ return; \ } CK_CC_INLINE static void * ck_pr_md_load_ptr(const void *target) { void *r; ck_pr_barrier(); - r = CK_CC_DECONST_PTR(CK_PR_ACCESS(target)); + r = CK_CC_DECONST_PTR(*(volatile void *const*)(target)); ck_pr_barrier(); return r; } CK_CC_INLINE static void ck_pr_md_store_ptr(void *target, const void *v) { ck_pr_barrier(); - CK_PR_ACCESS(target) = CK_CC_DECONST_PTR(v); + *(volatile void **)target = CK_CC_DECONST_PTR(v); ck_pr_barrier(); return; } #define CK_PR_LOAD_S(S, T) CK_PR_LOAD(S, T, T) CK_PR_LOAD_S(char, char) CK_PR_LOAD_S(uint, unsigned int) CK_PR_LOAD_S(int, int) #ifndef CK_PR_DISABLE_DOUBLE CK_PR_LOAD_S(double, double) #endif CK_PR_LOAD_S(64, uint64_t) CK_PR_LOAD_S(32, uint32_t) CK_PR_LOAD_S(16, uint16_t) CK_PR_LOAD_S(8, uint8_t) #undef CK_PR_LOAD_S #undef CK_PR_LOAD CK_CC_INLINE static void ck_pr_stall(void) { ck_pr_barrier(); } /* * Load and store fences are equivalent to full fences in the GCC port. */ #define CK_PR_FENCE(T) \ CK_CC_INLINE static void \ ck_pr_fence_strict_##T(void) \ { \ __sync_synchronize(); \ } CK_PR_FENCE(atomic) CK_PR_FENCE(atomic_atomic) CK_PR_FENCE(atomic_load) CK_PR_FENCE(atomic_store) CK_PR_FENCE(store_atomic) CK_PR_FENCE(load_atomic) CK_PR_FENCE(load) CK_PR_FENCE(load_load) CK_PR_FENCE(load_store) CK_PR_FENCE(store) CK_PR_FENCE(store_store) CK_PR_FENCE(store_load) CK_PR_FENCE(memory) CK_PR_FENCE(acquire) CK_PR_FENCE(release) CK_PR_FENCE(acqrel) CK_PR_FENCE(lock) CK_PR_FENCE(unlock) #undef CK_PR_FENCE /* * Atomic compare and swap. */ #define CK_PR_CAS(S, M, T) \ CK_CC_INLINE static bool \ ck_pr_cas_##S(M *target, T compare, T set) \ { \ bool z; \ z = __sync_bool_compare_and_swap((T *)target, compare, set); \ return z; \ } CK_PR_CAS(ptr, void, void *) #define CK_PR_CAS_S(S, T) CK_PR_CAS(S, T, T) CK_PR_CAS_S(char, char) CK_PR_CAS_S(int, int) CK_PR_CAS_S(uint, unsigned int) CK_PR_CAS_S(64, uint64_t) CK_PR_CAS_S(32, uint32_t) CK_PR_CAS_S(16, uint16_t) CK_PR_CAS_S(8, uint8_t) #undef CK_PR_CAS_S #undef CK_PR_CAS /* * Compare and swap, set *v to old value of target. */ CK_CC_INLINE static bool ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *v) { set = __sync_val_compare_and_swap((void **)target, compare, set); *(void **)v = set; return (set == compare); } #define CK_PR_CAS_O(S, T) \ CK_CC_INLINE static bool \ ck_pr_cas_##S##_value(T *target, T compare, T set, T *v) \ { \ set = __sync_val_compare_and_swap(target, compare, set);\ *v = set; \ return (set == compare); \ } CK_PR_CAS_O(char, char) CK_PR_CAS_O(int, int) CK_PR_CAS_O(uint, unsigned int) CK_PR_CAS_O(64, uint64_t) CK_PR_CAS_O(32, uint32_t) CK_PR_CAS_O(16, uint16_t) CK_PR_CAS_O(8, uint8_t) #undef CK_PR_CAS_O /* * Atomic fetch-and-add operations. */ #define CK_PR_FAA(S, M, T) \ CK_CC_INLINE static T \ ck_pr_faa_##S(M *target, T d) \ { \ d = __sync_fetch_and_add((T *)target, d); \ return (d); \ } CK_PR_FAA(ptr, void, void *) #define CK_PR_FAA_S(S, T) CK_PR_FAA(S, T, T) CK_PR_FAA_S(char, char) CK_PR_FAA_S(uint, unsigned int) CK_PR_FAA_S(int, int) CK_PR_FAA_S(64, uint64_t) CK_PR_FAA_S(32, uint32_t) CK_PR_FAA_S(16, uint16_t) CK_PR_FAA_S(8, uint8_t) #undef CK_PR_FAA_S #undef CK_PR_FAA /* * Atomic store-only binary operations. */ #define CK_PR_BINARY(K, S, M, T) \ CK_CC_INLINE static void \ ck_pr_##K##_##S(M *target, T d) \ { \ d = __sync_fetch_and_##K((T *)target, d); \ return; \ } #define CK_PR_BINARY_S(K, S, T) CK_PR_BINARY(K, S, T, T) #define CK_PR_GENERATE(K) \ CK_PR_BINARY(K, ptr, void, void *) \ CK_PR_BINARY_S(K, char, char) \ CK_PR_BINARY_S(K, int, int) \ CK_PR_BINARY_S(K, uint, unsigned int) \ CK_PR_BINARY_S(K, 64, uint64_t) \ CK_PR_BINARY_S(K, 32, uint32_t) \ CK_PR_BINARY_S(K, 16, uint16_t) \ CK_PR_BINARY_S(K, 8, uint8_t) CK_PR_GENERATE(add) CK_PR_GENERATE(sub) CK_PR_GENERATE(and) CK_PR_GENERATE(or) CK_PR_GENERATE(xor) #undef CK_PR_GENERATE #undef CK_PR_BINARY_S #undef CK_PR_BINARY #define CK_PR_UNARY(S, M, T) \ CK_CC_INLINE static void \ ck_pr_inc_##S(M *target) \ { \ ck_pr_add_##S(target, (T)1); \ return; \ } \ CK_CC_INLINE static void \ ck_pr_dec_##S(M *target) \ { \ ck_pr_sub_##S(target, (T)1); \ return; \ } #define CK_PR_UNARY_S(S, M) CK_PR_UNARY(S, M, M) CK_PR_UNARY(ptr, void, void *) CK_PR_UNARY_S(char, char) CK_PR_UNARY_S(int, int) CK_PR_UNARY_S(uint, unsigned int) CK_PR_UNARY_S(64, uint64_t) CK_PR_UNARY_S(32, uint32_t) CK_PR_UNARY_S(16, uint16_t) CK_PR_UNARY_S(8, uint8_t) #undef CK_PR_UNARY_S #undef CK_PR_UNARY #endif /* !CK_F_PR */ #endif /* CK_PR_GCC_H */ Index: head/sys/contrib/ck/include/gcc/sparcv9/ck_pr.h =================================================================== --- head/sys/contrib/ck/include/gcc/sparcv9/ck_pr.h (revision 331897) +++ head/sys/contrib/ck/include/gcc/sparcv9/ck_pr.h (revision 331898) @@ -1,228 +1,228 @@ /* * Copyright 2009, 2010 Samy Al Bahra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef CK_PR_SPARCV9_H #define CK_PR_SPARCV9_H #ifndef CK_PR_H #error Do not include this file directly, use ck_pr.h #endif #include #include /* * The following represent supported atomic operations. * These operations may be emulated. */ #include "ck_f_pr.h" /* * Minimum interface requirement met. */ #define CK_F_PR /* * Order loads at the least. */ CK_CC_INLINE static void ck_pr_stall(void) { __asm__ __volatile__("membar #LoadLoad" ::: "memory"); return; } #define CK_PR_FENCE(T, I) \ CK_CC_INLINE static void \ ck_pr_fence_strict_##T(void) \ { \ __asm__ __volatile__(I ::: "memory"); \ } /* * Atomic operations are treated as both load and store * operations on SPARCv9. */ CK_PR_FENCE(atomic, "membar #StoreStore") CK_PR_FENCE(atomic_store, "membar #StoreStore") CK_PR_FENCE(atomic_load, "membar #StoreLoad") CK_PR_FENCE(store_atomic, "membar #StoreStore") CK_PR_FENCE(load_atomic, "membar #LoadStore") CK_PR_FENCE(store, "membar #StoreStore") CK_PR_FENCE(store_load, "membar #StoreLoad") CK_PR_FENCE(load, "membar #LoadLoad") CK_PR_FENCE(load_store, "membar #LoadStore") -CK_PR_FENCE(memory, "membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad") +CK_PR_FENCE(memory, "membar #MemIssue") CK_PR_FENCE(acquire, "membar #LoadLoad | #LoadStore") CK_PR_FENCE(release, "membar #LoadStore | #StoreStore") CK_PR_FENCE(acqrel, "membar #LoadLoad | #LoadStore | #StoreStore") CK_PR_FENCE(lock, "membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad") CK_PR_FENCE(unlock, "membar #LoadStore | #StoreStore") #undef CK_PR_FENCE #define CK_PR_LOAD(S, M, T, C, I) \ CK_CC_INLINE static T \ ck_pr_md_load_##S(const M *target) \ { \ T r; \ __asm__ __volatile__(I " [%1], %0" \ : "=&r" (r) \ : "r" (target) \ : "memory"); \ return (r); \ } CK_PR_LOAD(ptr, void, void *, uint64_t, "ldx") #define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I) CK_PR_LOAD_S(64, uint64_t, "ldx") CK_PR_LOAD_S(32, uint32_t, "lduw") CK_PR_LOAD_S(uint, unsigned int, "lduw") CK_PR_LOAD_S(double, double, "ldx") CK_PR_LOAD_S(int, int, "ldsw") #undef CK_PR_LOAD_S #undef CK_PR_LOAD #define CK_PR_STORE(S, M, T, C, I) \ CK_CC_INLINE static void \ ck_pr_md_store_##S(M *target, T v) \ { \ __asm__ __volatile__(I " %0, [%1]" \ : \ : "r" (v), \ "r" (target) \ : "memory"); \ return; \ } CK_PR_STORE(ptr, void, const void *, uint64_t, "stx") #define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I) CK_PR_STORE_S(8, uint8_t, "stub") CK_PR_STORE_S(64, uint64_t, "stx") CK_PR_STORE_S(32, uint32_t, "stuw") CK_PR_STORE_S(uint, unsigned int, "stuw") CK_PR_STORE_S(double, double, "stx") CK_PR_STORE_S(int, int, "stsw") #undef CK_PR_STORE_S #undef CK_PR_STORE CK_CC_INLINE static bool ck_pr_cas_64_value(uint64_t *target, uint64_t compare, uint64_t set, uint64_t *value) { __asm__ __volatile__("casx [%1], %2, %0" : "+&r" (set) : "r" (target), "r" (compare) : "memory"); *value = set; return (compare == set); } CK_CC_INLINE static bool ck_pr_cas_64(uint64_t *target, uint64_t compare, uint64_t set) { __asm__ __volatile__("casx [%1], %2, %0" : "+&r" (set) : "r" (target), "r" (compare) : "memory"); return (compare == set); } CK_CC_INLINE static bool ck_pr_cas_ptr(void *target, void *compare, void *set) { return ck_pr_cas_64(target, (uint64_t)compare, (uint64_t)set); } CK_CC_INLINE static bool ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *previous) { return ck_pr_cas_64_value(target, (uint64_t)compare, (uint64_t)set, previous); } #define CK_PR_CAS(N, T) \ CK_CC_INLINE static bool \ ck_pr_cas_##N##_value(T *target, T compare, T set, T *value) \ { \ __asm__ __volatile__("cas [%1], %2, %0" \ : "+&r" (set) \ : "r" (target), \ "r" (compare) \ : "memory"); \ *value = set; \ return (compare == set); \ } \ CK_CC_INLINE static bool \ ck_pr_cas_##N(T *target, T compare, T set) \ { \ __asm__ __volatile__("cas [%1], %2, %0" \ : "+&r" (set) \ : "r" (target), \ "r" (compare) \ : "memory"); \ return (compare == set); \ } CK_PR_CAS(32, uint32_t) CK_PR_CAS(uint, unsigned int) CK_PR_CAS(int, int) #undef CK_PR_CAS #define CK_PR_FAS(N, T) \ CK_CC_INLINE static T \ ck_pr_fas_##N(T *target, T update) \ { \ \ __asm__ __volatile__("swap [%1], %0" \ : "+&r" (update) \ : "r" (target) \ : "memory"); \ return (update); \ } CK_PR_FAS(int, int) CK_PR_FAS(uint, unsigned int) CK_PR_FAS(32, uint32_t) #undef CK_PR_FAS #endif /* CK_PR_SPARCV9_H */ Index: head/sys/contrib/ck/include/gcc/x86/ck_pr.h =================================================================== --- head/sys/contrib/ck/include/gcc/x86/ck_pr.h (revision 331897) +++ head/sys/contrib/ck/include/gcc/x86/ck_pr.h (revision 331898) @@ -1,390 +1,408 @@ /* * Copyright 2009-2015 Samy Al Bahra. * Copyright 2011 Devon H. O'Dell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef CK_PR_X86_H #define CK_PR_X86_H #ifndef CK_PR_H #error Do not include this file directly, use ck_pr.h #endif #include #include #include /* * The following represent supported atomic operations. * These operations may be emulated. */ #include "ck_f_pr.h" /* Minimum requirements for the CK_PR interface are met. */ #define CK_F_PR -#ifdef CK_MD_UMP -#define CK_PR_LOCK_PREFIX -#else -#define CK_PR_LOCK_PREFIX "lock " -#endif - /* - * Prevent speculative execution in busy-wait loops (P4 <=) - * or "predefined delay". + * Prevent speculative execution in busy-wait loops (P4 <=) or "predefined + * delay". */ CK_CC_INLINE static void ck_pr_stall(void) { __asm__ __volatile__("pause" ::: "memory"); return; } +#ifdef CK_MD_UMP +#define CK_PR_LOCK_PREFIX #define CK_PR_FENCE(T, I) \ CK_CC_INLINE static void \ ck_pr_fence_strict_##T(void) \ { \ + __asm__ __volatile__("" ::: "memory"); \ + return; \ + } +#else +#define CK_PR_LOCK_PREFIX "lock " +#define CK_PR_FENCE(T, I) \ + CK_CC_INLINE static void \ + ck_pr_fence_strict_##T(void) \ + { \ __asm__ __volatile__(I ::: "memory"); \ + return; \ } +#endif /* CK_MD_UMP */ -CK_PR_FENCE(atomic, "sfence") -CK_PR_FENCE(atomic_store, "sfence") -CK_PR_FENCE(atomic_load, "mfence") -CK_PR_FENCE(store_atomic, "sfence") -CK_PR_FENCE(load_atomic, "mfence") -CK_PR_FENCE(load, "lfence") -CK_PR_FENCE(load_store, "mfence") -CK_PR_FENCE(store, "sfence") -CK_PR_FENCE(store_load, "mfence") -CK_PR_FENCE(memory, "mfence") -CK_PR_FENCE(release, "mfence") -CK_PR_FENCE(acquire, "mfence") -CK_PR_FENCE(acqrel, "mfence") -CK_PR_FENCE(lock, "mfence") -CK_PR_FENCE(unlock, "mfence") +#if defined(CK_MD_SSE_DISABLE) +/* If SSE is disabled, then use atomic operations for serialization. */ +#define CK_MD_X86_MFENCE "lock addl $0, (%%esp)" +#define CK_MD_X86_SFENCE CK_MD_X86_MFENCE +#define CK_MD_X86_LFENCE CK_MD_X86_MFENCE +#else +#define CK_MD_X86_SFENCE "sfence" +#define CK_MD_X86_LFENCE "lfence" +#define CK_MD_X86_MFENCE "mfence" +#endif /* !CK_MD_SSE_DISABLE */ + +CK_PR_FENCE(atomic, "") +CK_PR_FENCE(atomic_store, "") +CK_PR_FENCE(atomic_load, "") +CK_PR_FENCE(store_atomic, "") +CK_PR_FENCE(load_atomic, "") +CK_PR_FENCE(load, CK_MD_X86_LFENCE) +CK_PR_FENCE(load_store, CK_MD_X86_MFENCE) +CK_PR_FENCE(store, CK_MD_X86_SFENCE) +CK_PR_FENCE(store_load, CK_MD_X86_MFENCE) +CK_PR_FENCE(memory, CK_MD_X86_MFENCE) +CK_PR_FENCE(release, CK_MD_X86_MFENCE) +CK_PR_FENCE(acquire, CK_MD_X86_MFENCE) +CK_PR_FENCE(acqrel, CK_MD_X86_MFENCE) +CK_PR_FENCE(lock, CK_MD_X86_MFENCE) +CK_PR_FENCE(unlock, CK_MD_X86_MFENCE) #undef CK_PR_FENCE /* * Atomic fetch-and-store operations. */ #define CK_PR_FAS(S, M, T, C, I) \ CK_CC_INLINE static T \ ck_pr_fas_##S(M *target, T v) \ { \ __asm__ __volatile__(I " %0, %1" \ : "+m" (*(C *)target), \ "+q" (v) \ : \ : "memory"); \ return v; \ } CK_PR_FAS(ptr, void, void *, char, "xchgl") #define CK_PR_FAS_S(S, T, I) CK_PR_FAS(S, T, T, T, I) CK_PR_FAS_S(char, char, "xchgb") CK_PR_FAS_S(uint, unsigned int, "xchgl") CK_PR_FAS_S(int, int, "xchgl") CK_PR_FAS_S(32, uint32_t, "xchgl") CK_PR_FAS_S(16, uint16_t, "xchgw") CK_PR_FAS_S(8, uint8_t, "xchgb") #undef CK_PR_FAS_S #undef CK_PR_FAS #define CK_PR_LOAD(S, M, T, C, I) \ CK_CC_INLINE static T \ ck_pr_md_load_##S(const M *target) \ { \ T r; \ __asm__ __volatile__(I " %1, %0" \ : "=q" (r) \ : "m" (*(const C *)target) \ : "memory"); \ return (r); \ } CK_PR_LOAD(ptr, void, void *, char, "movl") #define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I) CK_PR_LOAD_S(char, char, "movb") CK_PR_LOAD_S(uint, unsigned int, "movl") CK_PR_LOAD_S(int, int, "movl") CK_PR_LOAD_S(32, uint32_t, "movl") CK_PR_LOAD_S(16, uint16_t, "movw") CK_PR_LOAD_S(8, uint8_t, "movb") #undef CK_PR_LOAD_S #undef CK_PR_LOAD #define CK_PR_STORE(S, M, T, C, I) \ CK_CC_INLINE static void \ ck_pr_md_store_##S(M *target, T v) \ { \ __asm__ __volatile__(I " %1, %0" \ : "=m" (*(C *)target) \ : CK_CC_IMM "q" (v) \ : "memory"); \ return; \ } CK_PR_STORE(ptr, void, const void *, char, "movl") #define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I) CK_PR_STORE_S(char, char, "movb") CK_PR_STORE_S(uint, unsigned int, "movl") CK_PR_STORE_S(int, int, "movl") CK_PR_STORE_S(32, uint32_t, "movl") CK_PR_STORE_S(16, uint16_t, "movw") CK_PR_STORE_S(8, uint8_t, "movb") #undef CK_PR_STORE_S #undef CK_PR_STORE /* * Atomic fetch-and-add operations. */ #define CK_PR_FAA(S, M, T, C, I) \ CK_CC_INLINE static T \ ck_pr_faa_##S(M *target, T d) \ { \ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \ : "+m" (*(C *)target), \ "+q" (d) \ : \ : "memory", "cc"); \ return (d); \ } CK_PR_FAA(ptr, void, uintptr_t, char, "xaddl") #define CK_PR_FAA_S(S, T, I) CK_PR_FAA(S, T, T, T, I) CK_PR_FAA_S(char, char, "xaddb") CK_PR_FAA_S(uint, unsigned int, "xaddl") CK_PR_FAA_S(int, int, "xaddl") CK_PR_FAA_S(32, uint32_t, "xaddl") CK_PR_FAA_S(16, uint16_t, "xaddw") CK_PR_FAA_S(8, uint8_t, "xaddb") #undef CK_PR_FAA_S #undef CK_PR_FAA /* * Atomic store-only unary operations. */ #define CK_PR_UNARY(K, S, T, C, I) \ CK_PR_UNARY_R(K, S, T, C, I) \ CK_PR_UNARY_V(K, S, T, C, I) #define CK_PR_UNARY_R(K, S, T, C, I) \ CK_CC_INLINE static void \ ck_pr_##K##_##S(T *target) \ { \ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0" \ : "+m" (*(C *)target) \ : \ : "memory", "cc"); \ return; \ } #define CK_PR_UNARY_V(K, S, T, C, I) \ CK_CC_INLINE static void \ ck_pr_##K##_##S##_zero(T *target, bool *r) \ { \ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0; setz %1" \ : "+m" (*(C *)target), \ "=m" (*r) \ : \ : "memory", "cc"); \ return; \ } #define CK_PR_UNARY_S(K, S, T, I) CK_PR_UNARY(K, S, T, T, I) #define CK_PR_GENERATE(K) \ CK_PR_UNARY(K, ptr, void, char, #K "l") \ CK_PR_UNARY_S(K, char, char, #K "b") \ CK_PR_UNARY_S(K, int, int, #K "l") \ CK_PR_UNARY_S(K, uint, unsigned int, #K "l") \ CK_PR_UNARY_S(K, 32, uint32_t, #K "l") \ CK_PR_UNARY_S(K, 16, uint16_t, #K "w") \ CK_PR_UNARY_S(K, 8, uint8_t, #K "b") CK_PR_GENERATE(inc) CK_PR_GENERATE(dec) CK_PR_GENERATE(neg) /* not does not affect condition flags. */ #undef CK_PR_UNARY_V #define CK_PR_UNARY_V(a, b, c, d, e) CK_PR_GENERATE(not) #undef CK_PR_GENERATE #undef CK_PR_UNARY_S #undef CK_PR_UNARY_V #undef CK_PR_UNARY_R #undef CK_PR_UNARY /* * Atomic store-only binary operations. */ #define CK_PR_BINARY(K, S, M, T, C, I) \ CK_CC_INLINE static void \ ck_pr_##K##_##S(M *target, T d) \ { \ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \ : "+m" (*(C *)target) \ : CK_CC_IMM "q" (d) \ : "memory", "cc"); \ return; \ } #define CK_PR_BINARY_S(K, S, T, I) CK_PR_BINARY(K, S, T, T, T, I) #define CK_PR_GENERATE(K) \ CK_PR_BINARY(K, ptr, void, uintptr_t, char, #K "l") \ CK_PR_BINARY_S(K, char, char, #K "b") \ CK_PR_BINARY_S(K, int, int, #K "l") \ CK_PR_BINARY_S(K, uint, unsigned int, #K "l") \ CK_PR_BINARY_S(K, 32, uint32_t, #K "l") \ CK_PR_BINARY_S(K, 16, uint16_t, #K "w") \ CK_PR_BINARY_S(K, 8, uint8_t, #K "b") CK_PR_GENERATE(add) CK_PR_GENERATE(sub) CK_PR_GENERATE(and) CK_PR_GENERATE(or) CK_PR_GENERATE(xor) #undef CK_PR_GENERATE #undef CK_PR_BINARY_S #undef CK_PR_BINARY /* * Atomic compare and swap. */ #define CK_PR_CAS(S, M, T, C, I) \ CK_CC_INLINE static bool \ ck_pr_cas_##S(M *target, T compare, T set) \ { \ bool z; \ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %2, %0; setz %1" \ : "+m" (*(C *)target), \ "=a" (z) \ : "q" (set), \ "a" (compare) \ : "memory", "cc"); \ return z; \ } CK_PR_CAS(ptr, void, void *, char, "cmpxchgl") #define CK_PR_CAS_S(S, T, I) CK_PR_CAS(S, T, T, T, I) CK_PR_CAS_S(char, char, "cmpxchgb") CK_PR_CAS_S(int, int, "cmpxchgl") CK_PR_CAS_S(uint, unsigned int, "cmpxchgl") CK_PR_CAS_S(32, uint32_t, "cmpxchgl") CK_PR_CAS_S(16, uint16_t, "cmpxchgw") CK_PR_CAS_S(8, uint8_t, "cmpxchgb") #undef CK_PR_CAS_S #undef CK_PR_CAS /* * Compare and swap, set *v to old value of target. */ #define CK_PR_CAS_O(S, M, T, C, I, R) \ CK_CC_INLINE static bool \ ck_pr_cas_##S##_value(M *target, T compare, T set, M *v) \ { \ bool z; \ __asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg" I " %3, %0;" \ "mov %% " R ", %2;" \ "setz %1;" \ : "+m" (*(C *)target), \ "=a" (z), \ "=m" (*(C *)v) \ : "q" (set), \ "a" (compare) \ : "memory", "cc"); \ return (bool)z; \ } CK_PR_CAS_O(ptr, void, void *, char, "l", "eax") #define CK_PR_CAS_O_S(S, T, I, R) \ CK_PR_CAS_O(S, T, T, T, I, R) CK_PR_CAS_O_S(char, char, "b", "al") CK_PR_CAS_O_S(int, int, "l", "eax") CK_PR_CAS_O_S(uint, unsigned int, "l", "eax") CK_PR_CAS_O_S(32, uint32_t, "l", "eax") CK_PR_CAS_O_S(16, uint16_t, "w", "ax") CK_PR_CAS_O_S(8, uint8_t, "b", "al") #undef CK_PR_CAS_O_S #undef CK_PR_CAS_O /* * Atomic bit test operations. */ #define CK_PR_BT(K, S, T, P, C, I) \ CK_CC_INLINE static bool \ ck_pr_##K##_##S(T *target, unsigned int b) \ { \ bool c; \ __asm__ __volatile__(CK_PR_LOCK_PREFIX I "; setc %1" \ : "+m" (*(C *)target), \ "=q" (c) \ : "q" ((P)b) \ : "memory", "cc"); \ return (bool)c; \ } #define CK_PR_BT_S(K, S, T, I) CK_PR_BT(K, S, T, T, T, I) #define CK_PR_GENERATE(K) \ CK_PR_BT(K, ptr, void, uint32_t, char, #K "l %2, %0") \ CK_PR_BT_S(K, uint, unsigned int, #K "l %2, %0") \ CK_PR_BT_S(K, int, int, #K "l %2, %0") \ CK_PR_BT_S(K, 32, uint32_t, #K "l %2, %0") \ CK_PR_BT_S(K, 16, uint16_t, #K "w %w2, %0") CK_PR_GENERATE(btc) CK_PR_GENERATE(bts) CK_PR_GENERATE(btr) #undef CK_PR_GENERATE #undef CK_PR_BT #endif /* CK_PR_X86_H */ Index: head/sys/contrib/ck/include/gcc/x86_64/ck_pr.h =================================================================== --- head/sys/contrib/ck/include/gcc/x86_64/ck_pr.h (revision 331897) +++ head/sys/contrib/ck/include/gcc/x86_64/ck_pr.h (revision 331898) @@ -1,585 +1,606 @@ /* * Copyright 2009-2015 Samy Al Bahra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef CK_PR_X86_64_H #define CK_PR_X86_64_H #ifndef CK_PR_H #error Do not include this file directly, use ck_pr.h #endif #include #include #include /* * The following represent supported atomic operations. * These operations may be emulated. */ #include "ck_f_pr.h" /* * Support for TSX extensions. */ #ifdef CK_MD_RTM_ENABLE #include "ck_pr_rtm.h" #endif /* Minimum requirements for the CK_PR interface are met. */ #define CK_F_PR #ifdef CK_MD_UMP #define CK_PR_LOCK_PREFIX #else #define CK_PR_LOCK_PREFIX "lock " #endif /* - * Prevent speculative execution in busy-wait loops (P4 <=) - * or "predefined delay". + * Prevent speculative execution in busy-wait loops (P4 <=) or "predefined + * delay". */ CK_CC_INLINE static void ck_pr_stall(void) { __asm__ __volatile__("pause" ::: "memory"); return; } #define CK_PR_FENCE(T, I) \ CK_CC_INLINE static void \ ck_pr_fence_strict_##T(void) \ { \ __asm__ __volatile__(I ::: "memory"); \ } -CK_PR_FENCE(atomic, "sfence") -CK_PR_FENCE(atomic_store, "sfence") -CK_PR_FENCE(atomic_load, "mfence") -CK_PR_FENCE(store_atomic, "sfence") -CK_PR_FENCE(load_atomic, "mfence") +/* Atomic operations are always serializing. */ +CK_PR_FENCE(atomic, "") +CK_PR_FENCE(atomic_store, "") +CK_PR_FENCE(atomic_load, "") +CK_PR_FENCE(store_atomic, "") +CK_PR_FENCE(load_atomic, "") + +/* Traditional fence interface. */ CK_PR_FENCE(load, "lfence") CK_PR_FENCE(load_store, "mfence") CK_PR_FENCE(store, "sfence") CK_PR_FENCE(store_load, "mfence") CK_PR_FENCE(memory, "mfence") + +/* Below are stdatomic-style fences. */ + +/* + * Provides load-store and store-store ordering. However, Intel specifies that + * the WC memory model is relaxed. It is likely an sfence *is* sufficient (in + * particular, stores are not re-ordered with respect to prior loads and it is + * really just the stores that are subject to re-ordering). However, we take + * the conservative route as the manuals are too ambiguous for my taste. + */ CK_PR_FENCE(release, "mfence") + +/* + * Provides load-load and load-store ordering. The lfence instruction ensures + * all prior load operations are complete before any subsequent instructions + * actually begin execution. However, the manual also ends up going to describe + * WC memory as a relaxed model. + */ CK_PR_FENCE(acquire, "mfence") + CK_PR_FENCE(acqrel, "mfence") CK_PR_FENCE(lock, "mfence") CK_PR_FENCE(unlock, "mfence") #undef CK_PR_FENCE /* * Read for ownership. Older compilers will generate the 32-bit * 3DNow! variant which is binary compatible with x86-64 variant * of prefetchw. */ #ifndef CK_F_PR_RFO #define CK_F_PR_RFO CK_CC_INLINE static void ck_pr_rfo(const void *m) { __asm__ __volatile__("prefetchw (%0)" : : "r" (m) : "memory"); return; } #endif /* CK_F_PR_RFO */ /* * Atomic fetch-and-store operations. */ #define CK_PR_FAS(S, M, T, C, I) \ CK_CC_INLINE static T \ ck_pr_fas_##S(M *target, T v) \ { \ __asm__ __volatile__(I " %0, %1" \ : "+m" (*(C *)target), \ "+q" (v) \ : \ : "memory"); \ return v; \ } CK_PR_FAS(ptr, void, void *, char, "xchgq") #define CK_PR_FAS_S(S, T, I) CK_PR_FAS(S, T, T, T, I) #ifndef CK_PR_DISABLE_DOUBLE CK_PR_FAS_S(double, double, "xchgq") #endif CK_PR_FAS_S(char, char, "xchgb") CK_PR_FAS_S(uint, unsigned int, "xchgl") CK_PR_FAS_S(int, int, "xchgl") CK_PR_FAS_S(64, uint64_t, "xchgq") CK_PR_FAS_S(32, uint32_t, "xchgl") CK_PR_FAS_S(16, uint16_t, "xchgw") CK_PR_FAS_S(8, uint8_t, "xchgb") #undef CK_PR_FAS_S #undef CK_PR_FAS /* * Atomic load-from-memory operations. */ #define CK_PR_LOAD(S, M, T, C, I) \ CK_CC_INLINE static T \ ck_pr_md_load_##S(const M *target) \ { \ T r; \ __asm__ __volatile__(I " %1, %0" \ : "=q" (r) \ : "m" (*(const C *)target) \ : "memory"); \ return (r); \ } CK_PR_LOAD(ptr, void, void *, char, "movq") #define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I) CK_PR_LOAD_S(char, char, "movb") CK_PR_LOAD_S(uint, unsigned int, "movl") CK_PR_LOAD_S(int, int, "movl") #ifndef CK_PR_DISABLE_DOUBLE CK_PR_LOAD_S(double, double, "movq") #endif CK_PR_LOAD_S(64, uint64_t, "movq") CK_PR_LOAD_S(32, uint32_t, "movl") CK_PR_LOAD_S(16, uint16_t, "movw") CK_PR_LOAD_S(8, uint8_t, "movb") #undef CK_PR_LOAD_S #undef CK_PR_LOAD CK_CC_INLINE static void ck_pr_load_64_2(const uint64_t target[2], uint64_t v[2]) { __asm__ __volatile__("movq %%rdx, %%rcx;" "movq %%rax, %%rbx;" CK_PR_LOCK_PREFIX "cmpxchg16b %2;" : "=a" (v[0]), "=d" (v[1]) : "m" (*(const uint64_t *)target) : "rbx", "rcx", "memory", "cc"); return; } CK_CC_INLINE static void ck_pr_load_ptr_2(const void *t, void *v) { ck_pr_load_64_2(CK_CPP_CAST(const uint64_t *, t), CK_CPP_CAST(uint64_t *, v)); return; } #define CK_PR_LOAD_2(S, W, T) \ CK_CC_INLINE static void \ ck_pr_md_load_##S##_##W(const T t[2], T v[2]) \ { \ ck_pr_load_64_2((const uint64_t *)(const void *)t, \ (uint64_t *)(void *)v); \ return; \ } CK_PR_LOAD_2(char, 16, char) CK_PR_LOAD_2(int, 4, int) CK_PR_LOAD_2(uint, 4, unsigned int) CK_PR_LOAD_2(32, 4, uint32_t) CK_PR_LOAD_2(16, 8, uint16_t) CK_PR_LOAD_2(8, 16, uint8_t) #undef CK_PR_LOAD_2 /* * Atomic store-to-memory operations. */ #define CK_PR_STORE_IMM(S, M, T, C, I, K) \ CK_CC_INLINE static void \ ck_pr_md_store_##S(M *target, T v) \ { \ __asm__ __volatile__(I " %1, %0" \ : "=m" (*(C *)target) \ : K "q" (v) \ : "memory"); \ return; \ } #define CK_PR_STORE(S, M, T, C, I) \ CK_CC_INLINE static void \ ck_pr_md_store_##S(M *target, T v) \ { \ __asm__ __volatile__(I " %1, %0" \ : "=m" (*(C *)target) \ : "q" (v) \ : "memory"); \ return; \ } CK_PR_STORE_IMM(ptr, void, const void *, char, "movq", CK_CC_IMM_U32) #ifndef CK_PR_DISABLE_DOUBLE CK_PR_STORE(double, double, double, double, "movq") #endif #define CK_PR_STORE_S(S, T, I, K) CK_PR_STORE_IMM(S, T, T, T, I, K) CK_PR_STORE_S(char, char, "movb", CK_CC_IMM_S32) CK_PR_STORE_S(int, int, "movl", CK_CC_IMM_S32) CK_PR_STORE_S(uint, unsigned int, "movl", CK_CC_IMM_U32) CK_PR_STORE_S(64, uint64_t, "movq", CK_CC_IMM_U32) CK_PR_STORE_S(32, uint32_t, "movl", CK_CC_IMM_U32) CK_PR_STORE_S(16, uint16_t, "movw", CK_CC_IMM_U32) CK_PR_STORE_S(8, uint8_t, "movb", CK_CC_IMM_U32) #undef CK_PR_STORE_S #undef CK_PR_STORE_IMM #undef CK_PR_STORE /* * Atomic fetch-and-add operations. */ #define CK_PR_FAA(S, M, T, C, I) \ CK_CC_INLINE static T \ ck_pr_faa_##S(M *target, T d) \ { \ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \ : "+m" (*(C *)target), \ "+q" (d) \ : \ : "memory", "cc"); \ return (d); \ } CK_PR_FAA(ptr, void, uintptr_t, char, "xaddq") #define CK_PR_FAA_S(S, T, I) CK_PR_FAA(S, T, T, T, I) CK_PR_FAA_S(char, char, "xaddb") CK_PR_FAA_S(uint, unsigned int, "xaddl") CK_PR_FAA_S(int, int, "xaddl") CK_PR_FAA_S(64, uint64_t, "xaddq") CK_PR_FAA_S(32, uint32_t, "xaddl") CK_PR_FAA_S(16, uint16_t, "xaddw") CK_PR_FAA_S(8, uint8_t, "xaddb") #undef CK_PR_FAA_S #undef CK_PR_FAA /* * Atomic store-only unary operations. */ #define CK_PR_UNARY(K, S, T, C, I) \ CK_PR_UNARY_R(K, S, T, C, I) \ CK_PR_UNARY_V(K, S, T, C, I) #define CK_PR_UNARY_R(K, S, T, C, I) \ CK_CC_INLINE static void \ ck_pr_##K##_##S(T *target) \ { \ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0" \ : "+m" (*(C *)target) \ : \ : "memory", "cc"); \ return; \ } #define CK_PR_UNARY_V(K, S, T, C, I) \ CK_CC_INLINE static void \ ck_pr_##K##_##S##_zero(T *target, bool *r) \ { \ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0; setz %1" \ : "+m" (*(C *)target), \ "=m" (*r) \ : \ : "memory", "cc"); \ return; \ } #define CK_PR_UNARY_S(K, S, T, I) CK_PR_UNARY(K, S, T, T, I) #define CK_PR_GENERATE(K) \ CK_PR_UNARY(K, ptr, void, char, #K "q") \ CK_PR_UNARY_S(K, char, char, #K "b") \ CK_PR_UNARY_S(K, int, int, #K "l") \ CK_PR_UNARY_S(K, uint, unsigned int, #K "l") \ CK_PR_UNARY_S(K, 64, uint64_t, #K "q") \ CK_PR_UNARY_S(K, 32, uint32_t, #K "l") \ CK_PR_UNARY_S(K, 16, uint16_t, #K "w") \ CK_PR_UNARY_S(K, 8, uint8_t, #K "b") CK_PR_GENERATE(inc) CK_PR_GENERATE(dec) CK_PR_GENERATE(neg) /* not does not affect condition flags. */ #undef CK_PR_UNARY_V #define CK_PR_UNARY_V(a, b, c, d, e) CK_PR_GENERATE(not) #undef CK_PR_GENERATE #undef CK_PR_UNARY_S #undef CK_PR_UNARY_V #undef CK_PR_UNARY_R #undef CK_PR_UNARY /* * Atomic store-only binary operations. */ #define CK_PR_BINARY(K, S, M, T, C, I, O) \ CK_CC_INLINE static void \ ck_pr_##K##_##S(M *target, T d) \ { \ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \ : "+m" (*(C *)target) \ : O "q" (d) \ : "memory", "cc"); \ return; \ } #define CK_PR_BINARY_S(K, S, T, I, O) CK_PR_BINARY(K, S, T, T, T, I, O) #define CK_PR_GENERATE(K) \ CK_PR_BINARY(K, ptr, void, uintptr_t, char, #K "q", CK_CC_IMM_U32) \ CK_PR_BINARY_S(K, char, char, #K "b", CK_CC_IMM_S32) \ CK_PR_BINARY_S(K, int, int, #K "l", CK_CC_IMM_S32) \ CK_PR_BINARY_S(K, uint, unsigned int, #K "l", CK_CC_IMM_U32) \ CK_PR_BINARY_S(K, 64, uint64_t, #K "q", CK_CC_IMM_U32) \ CK_PR_BINARY_S(K, 32, uint32_t, #K "l", CK_CC_IMM_U32) \ CK_PR_BINARY_S(K, 16, uint16_t, #K "w", CK_CC_IMM_U32) \ CK_PR_BINARY_S(K, 8, uint8_t, #K "b", CK_CC_IMM_U32) CK_PR_GENERATE(add) CK_PR_GENERATE(sub) CK_PR_GENERATE(and) CK_PR_GENERATE(or) CK_PR_GENERATE(xor) #undef CK_PR_GENERATE #undef CK_PR_BINARY_S #undef CK_PR_BINARY /* * Atomic compare and swap. */ #define CK_PR_CAS(S, M, T, C, I) \ CK_CC_INLINE static bool \ ck_pr_cas_##S(M *target, T compare, T set) \ { \ bool z; \ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %2, %0; setz %1" \ : "+m" (*(C *)target), \ "=a" (z) \ : "q" (set), \ "a" (compare) \ : "memory", "cc"); \ return z; \ } CK_PR_CAS(ptr, void, void *, char, "cmpxchgq") #define CK_PR_CAS_S(S, T, I) CK_PR_CAS(S, T, T, T, I) CK_PR_CAS_S(char, char, "cmpxchgb") CK_PR_CAS_S(int, int, "cmpxchgl") CK_PR_CAS_S(uint, unsigned int, "cmpxchgl") #ifndef CK_PR_DISABLE_DOUBLE CK_PR_CAS_S(double, double, "cmpxchgq") #endif CK_PR_CAS_S(64, uint64_t, "cmpxchgq") CK_PR_CAS_S(32, uint32_t, "cmpxchgl") CK_PR_CAS_S(16, uint16_t, "cmpxchgw") CK_PR_CAS_S(8, uint8_t, "cmpxchgb") #undef CK_PR_CAS_S #undef CK_PR_CAS /* * Compare and swap, set *v to old value of target. */ #define CK_PR_CAS_O(S, M, T, C, I, R) \ CK_CC_INLINE static bool \ ck_pr_cas_##S##_value(M *target, T compare, T set, M *v) \ { \ bool z; \ __asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg" I " %3, %0;" \ "mov %% " R ", %2;" \ "setz %1;" \ : "+m" (*(C *)target), \ "=a" (z), \ "=m" (*(C *)v) \ : "q" (set), \ "a" (compare) \ : "memory", "cc"); \ return z; \ } CK_PR_CAS_O(ptr, void, void *, char, "q", "rax") #define CK_PR_CAS_O_S(S, T, I, R) \ CK_PR_CAS_O(S, T, T, T, I, R) CK_PR_CAS_O_S(char, char, "b", "al") CK_PR_CAS_O_S(int, int, "l", "eax") CK_PR_CAS_O_S(uint, unsigned int, "l", "eax") #ifndef CK_PR_DISABLE_DOUBLE CK_PR_CAS_O_S(double, double, "q", "rax") #endif CK_PR_CAS_O_S(64, uint64_t, "q", "rax") CK_PR_CAS_O_S(32, uint32_t, "l", "eax") CK_PR_CAS_O_S(16, uint16_t, "w", "ax") CK_PR_CAS_O_S(8, uint8_t, "b", "al") #undef CK_PR_CAS_O_S #undef CK_PR_CAS_O /* * Contrary to C-interface, alignment requirements are that of uint64_t[2]. */ CK_CC_INLINE static bool ck_pr_cas_64_2(uint64_t target[2], uint64_t compare[2], uint64_t set[2]) { bool z; __asm__ __volatile__("movq 0(%4), %%rax;" "movq 8(%4), %%rdx;" CK_PR_LOCK_PREFIX "cmpxchg16b %0; setz %1" : "+m" (*target), "=q" (z) : "b" (set[0]), "c" (set[1]), "q" (compare) : "memory", "cc", "%rax", "%rdx"); return z; } CK_CC_INLINE static bool ck_pr_cas_ptr_2(void *t, void *c, void *s) { return ck_pr_cas_64_2(CK_CPP_CAST(uint64_t *, t), CK_CPP_CAST(uint64_t *, c), CK_CPP_CAST(uint64_t *, s)); } CK_CC_INLINE static bool ck_pr_cas_64_2_value(uint64_t target[2], uint64_t compare[2], uint64_t set[2], uint64_t v[2]) { bool z; __asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg16b %0;" "setz %3" : "+m" (*target), "=a" (v[0]), "=d" (v[1]), "=q" (z) : "a" (compare[0]), "d" (compare[1]), "b" (set[0]), "c" (set[1]) : "memory", "cc"); return z; } CK_CC_INLINE static bool ck_pr_cas_ptr_2_value(void *t, void *c, void *s, void *v) { return ck_pr_cas_64_2_value(CK_CPP_CAST(uint64_t *,t), CK_CPP_CAST(uint64_t *,c), CK_CPP_CAST(uint64_t *,s), CK_CPP_CAST(uint64_t *,v)); } #define CK_PR_CAS_V(S, W, T) \ CK_CC_INLINE static bool \ ck_pr_cas_##S##_##W(T t[W], T c[W], T s[W]) \ { \ return ck_pr_cas_64_2((uint64_t *)(void *)t, \ (uint64_t *)(void *)c, \ (uint64_t *)(void *)s); \ } \ CK_CC_INLINE static bool \ ck_pr_cas_##S##_##W##_value(T *t, T c[W], T s[W], T *v) \ { \ return ck_pr_cas_64_2_value((uint64_t *)(void *)t, \ (uint64_t *)(void *)c, \ (uint64_t *)(void *)s, \ (uint64_t *)(void *)v); \ } #ifndef CK_PR_DISABLE_DOUBLE CK_PR_CAS_V(double, 2, double) #endif CK_PR_CAS_V(char, 16, char) CK_PR_CAS_V(int, 4, int) CK_PR_CAS_V(uint, 4, unsigned int) CK_PR_CAS_V(32, 4, uint32_t) CK_PR_CAS_V(16, 8, uint16_t) CK_PR_CAS_V(8, 16, uint8_t) #undef CK_PR_CAS_V /* * Atomic bit test operations. */ #define CK_PR_BT(K, S, T, P, C, I) \ CK_CC_INLINE static bool \ ck_pr_##K##_##S(T *target, unsigned int b) \ { \ bool c; \ __asm__ __volatile__(CK_PR_LOCK_PREFIX I "; setc %1" \ : "+m" (*(C *)target), \ "=q" (c) \ : "q" ((P)b) \ : "memory", "cc"); \ return c; \ } #define CK_PR_BT_S(K, S, T, I) CK_PR_BT(K, S, T, T, T, I) #define CK_PR_GENERATE(K) \ CK_PR_BT(K, ptr, void, uint64_t, char, #K "q %2, %0") \ CK_PR_BT_S(K, uint, unsigned int, #K "l %2, %0") \ CK_PR_BT_S(K, int, int, #K "l %2, %0") \ CK_PR_BT_S(K, 64, uint64_t, #K "q %2, %0") \ CK_PR_BT_S(K, 32, uint32_t, #K "l %2, %0") \ CK_PR_BT_S(K, 16, uint16_t, #K "w %w2, %0") CK_PR_GENERATE(btc) CK_PR_GENERATE(bts) CK_PR_GENERATE(btr) #undef CK_PR_GENERATE #undef CK_PR_BT #endif /* CK_PR_X86_64_H */ Index: head/sys/contrib/ck/include/spinlock/dec.h =================================================================== --- head/sys/contrib/ck/include/spinlock/dec.h (revision 331897) +++ head/sys/contrib/ck/include/spinlock/dec.h (revision 331898) @@ -1,143 +1,144 @@ /* * Copyright 2010-2015 Samy Al Bahra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef CK_SPINLOCK_DEC_H #define CK_SPINLOCK_DEC_H #include #include #include #include #include #ifndef CK_F_SPINLOCK_DEC #define CK_F_SPINLOCK_DEC /* * This is similar to the CACAS lock but makes use of an atomic decrement * operation to check if the lock value was decremented to 0 from 1. The * idea is that a decrement operation is cheaper than a compare-and-swap. */ struct ck_spinlock_dec { unsigned int value; }; typedef struct ck_spinlock_dec ck_spinlock_dec_t; #define CK_SPINLOCK_DEC_INITIALIZER {1} CK_CC_INLINE static void ck_spinlock_dec_init(struct ck_spinlock_dec *lock) { lock->value = 1; ck_pr_barrier(); return; } CK_CC_INLINE static bool ck_spinlock_dec_trylock(struct ck_spinlock_dec *lock) { unsigned int value; value = ck_pr_fas_uint(&lock->value, 0); ck_pr_fence_lock(); return value == 1; } CK_CC_INLINE static bool ck_spinlock_dec_locked(struct ck_spinlock_dec *lock) { bool r; r = ck_pr_load_uint(&lock->value) != 1; ck_pr_fence_acquire(); return r; } CK_CC_INLINE static void ck_spinlock_dec_lock(struct ck_spinlock_dec *lock) { bool r; for (;;) { /* * Only one thread is guaranteed to decrement lock to 0. * Overflow must be protected against. No more than * UINT_MAX lock requests can happen while the lock is held. */ ck_pr_dec_uint_zero(&lock->value, &r); if (r == true) break; /* Load value without generating write cycles. */ while (ck_pr_load_uint(&lock->value) != 1) ck_pr_stall(); } ck_pr_fence_lock(); return; } CK_CC_INLINE static void ck_spinlock_dec_lock_eb(struct ck_spinlock_dec *lock) { ck_backoff_t backoff = CK_BACKOFF_INITIALIZER; bool r; for (;;) { ck_pr_dec_uint_zero(&lock->value, &r); if (r == true) break; - ck_backoff_eb(&backoff); + while (ck_pr_load_uint(&lock->value) != 1) + ck_backoff_eb(&backoff); } ck_pr_fence_lock(); return; } CK_CC_INLINE static void ck_spinlock_dec_unlock(struct ck_spinlock_dec *lock) { ck_pr_fence_unlock(); /* * Unconditionally set lock value to 1 so someone can decrement lock * to 0. */ ck_pr_store_uint(&lock->value, 1); return; } CK_ELIDE_PROTOTYPE(ck_spinlock_dec, ck_spinlock_dec_t, ck_spinlock_dec_locked, ck_spinlock_dec_lock, ck_spinlock_dec_locked, ck_spinlock_dec_unlock) CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_dec, ck_spinlock_dec_t, ck_spinlock_dec_locked, ck_spinlock_dec_trylock) #endif /* CK_F_SPINLOCK_DEC */ #endif /* CK_SPINLOCK_DEC_H */ Index: head/sys/contrib/ck/src/ck_hs.c =================================================================== --- head/sys/contrib/ck/src/ck_hs.c (revision 331897) +++ head/sys/contrib/ck/src/ck_hs.c (revision 331898) @@ -1,941 +1,958 @@ /* * Copyright 2012-2015 Samy Al Bahra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include "ck_internal.h" #ifndef CK_HS_PROBE_L1_SHIFT #define CK_HS_PROBE_L1_SHIFT 3ULL #endif /* CK_HS_PROBE_L1_SHIFT */ #define CK_HS_PROBE_L1 (1 << CK_HS_PROBE_L1_SHIFT) #define CK_HS_PROBE_L1_MASK (CK_HS_PROBE_L1 - 1) #ifndef CK_HS_PROBE_L1_DEFAULT #define CK_HS_PROBE_L1_DEFAULT CK_MD_CACHELINE #endif #define CK_HS_VMA_MASK ((uintptr_t)((1ULL << CK_MD_VMA_BITS) - 1)) #define CK_HS_VMA(x) \ ((void *)((uintptr_t)(x) & CK_HS_VMA_MASK)) #define CK_HS_EMPTY NULL #define CK_HS_TOMBSTONE ((void *)~(uintptr_t)0) #define CK_HS_G (2) #define CK_HS_G_MASK (CK_HS_G - 1) #if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_STORE_8) #define CK_HS_WORD uint8_t #define CK_HS_WORD_MAX UINT8_MAX #define CK_HS_STORE(x, y) ck_pr_store_8(x, y) #define CK_HS_LOAD(x) ck_pr_load_8(x) #elif defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_STORE_16) #define CK_HS_WORD uint16_t #define CK_HS_WORD_MAX UINT16_MAX #define CK_HS_STORE(x, y) ck_pr_store_16(x, y) #define CK_HS_LOAD(x) ck_pr_load_16(x) #elif defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_STORE_32) #define CK_HS_WORD uint32_t #define CK_HS_WORD_MAX UINT32_MAX #define CK_HS_STORE(x, y) ck_pr_store_32(x, y) #define CK_HS_LOAD(x) ck_pr_load_32(x) #else #error "ck_hs is not supported on your platform." #endif enum ck_hs_probe_behavior { CK_HS_PROBE = 0, /* Default behavior. */ CK_HS_PROBE_TOMBSTONE, /* Short-circuit on tombstone. */ CK_HS_PROBE_INSERT /* Short-circuit on probe bound if tombstone found. */ }; struct ck_hs_map { unsigned int generation[CK_HS_G]; unsigned int probe_maximum; unsigned long mask; unsigned long step; unsigned int probe_limit; unsigned int tombstones; unsigned long n_entries; unsigned long capacity; unsigned long size; CK_HS_WORD *probe_bound; const void **entries; }; static inline void ck_hs_map_signal(struct ck_hs_map *map, unsigned long h) { h &= CK_HS_G_MASK; ck_pr_store_uint(&map->generation[h], map->generation[h] + 1); ck_pr_fence_store(); return; } -void -ck_hs_iterator_init(struct ck_hs_iterator *iterator) +static bool +_ck_hs_next(struct ck_hs *hs, struct ck_hs_map *map, struct ck_hs_iterator *i, void **key) { - - iterator->cursor = NULL; - iterator->offset = 0; - return; -} - -bool -ck_hs_next(struct ck_hs *hs, struct ck_hs_iterator *i, void **key) -{ - struct ck_hs_map *map = hs->map; void *value; - if (i->offset >= map->capacity) return false; do { value = CK_CC_DECONST_PTR(map->entries[i->offset]); if (value != CK_HS_EMPTY && value != CK_HS_TOMBSTONE) { #ifdef CK_HS_PP if (hs->mode & CK_HS_MODE_OBJECT) value = CK_HS_VMA(value); +#else + (void)hs; /* Avoid unused parameter warning. */ #endif i->offset++; *key = value; return true; } } while (++i->offset < map->capacity); return false; } void +ck_hs_iterator_init(struct ck_hs_iterator *iterator) +{ + + iterator->cursor = NULL; + iterator->offset = 0; + iterator->map = NULL; + return; +} + +bool +ck_hs_next(struct ck_hs *hs, struct ck_hs_iterator *i, void **key) +{ + return _ck_hs_next(hs, hs->map, i, key); +} + +bool +ck_hs_next_spmc(struct ck_hs *hs, struct ck_hs_iterator *i, void **key) +{ + struct ck_hs_map *m = i->map; + if (m == NULL) { + m = i->map = ck_pr_load_ptr(&hs->map); + } + return _ck_hs_next(hs, m, i, key); +} + +void ck_hs_stat(struct ck_hs *hs, struct ck_hs_stat *st) { struct ck_hs_map *map = hs->map; st->n_entries = map->n_entries; st->tombstones = map->tombstones; st->probe_maximum = map->probe_maximum; return; } unsigned long ck_hs_count(struct ck_hs *hs) { return hs->map->n_entries; } static void ck_hs_map_destroy(struct ck_malloc *m, struct ck_hs_map *map, bool defer) { m->free(map, map->size, defer); return; } void ck_hs_destroy(struct ck_hs *hs) { ck_hs_map_destroy(hs->m, hs->map, false); return; } static struct ck_hs_map * ck_hs_map_create(struct ck_hs *hs, unsigned long entries) { struct ck_hs_map *map; unsigned long size, n_entries, prefix, limit; n_entries = ck_internal_power_2(entries); if (n_entries < CK_HS_PROBE_L1) n_entries = CK_HS_PROBE_L1; size = sizeof(struct ck_hs_map) + (sizeof(void *) * n_entries + CK_MD_CACHELINE - 1); if (hs->mode & CK_HS_MODE_DELETE) { prefix = sizeof(CK_HS_WORD) * n_entries; size += prefix; } else { prefix = 0; } map = hs->m->malloc(size); if (map == NULL) return NULL; map->size = size; /* We should probably use a more intelligent heuristic for default probe length. */ limit = ck_internal_max(n_entries >> (CK_HS_PROBE_L1_SHIFT + 2), CK_HS_PROBE_L1_DEFAULT); if (limit > UINT_MAX) limit = UINT_MAX; map->probe_limit = (unsigned int)limit; map->probe_maximum = 0; map->capacity = n_entries; - map->step = ck_internal_bsf(n_entries); + map->step = ck_cc_ffsl(n_entries); map->mask = n_entries - 1; map->n_entries = 0; /* Align map allocation to cache line. */ map->entries = (void *)(((uintptr_t)&map[1] + prefix + CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1)); memset(map->entries, 0, sizeof(void *) * n_entries); memset(map->generation, 0, sizeof map->generation); if (hs->mode & CK_HS_MODE_DELETE) { map->probe_bound = (CK_HS_WORD *)&map[1]; memset(map->probe_bound, 0, prefix); } else { map->probe_bound = NULL; } /* Commit entries purge with respect to map publication. */ ck_pr_fence_store(); return map; } bool ck_hs_reset_size(struct ck_hs *hs, unsigned long capacity) { struct ck_hs_map *map, *previous; previous = hs->map; map = ck_hs_map_create(hs, capacity); if (map == NULL) return false; ck_pr_store_ptr(&hs->map, map); ck_hs_map_destroy(hs->m, previous, true); return true; } bool ck_hs_reset(struct ck_hs *hs) { struct ck_hs_map *previous; previous = hs->map; return ck_hs_reset_size(hs, previous->capacity); } static inline unsigned long ck_hs_map_probe_next(struct ck_hs_map *map, unsigned long offset, unsigned long h, unsigned long level, unsigned long probes) { unsigned long r, stride; r = (h >> map->step) >> level; stride = (r & ~CK_HS_PROBE_L1_MASK) << 1 | (r & CK_HS_PROBE_L1_MASK); return (offset + (probes >> CK_HS_PROBE_L1_SHIFT) + (stride | CK_HS_PROBE_L1)) & map->mask; } static inline void ck_hs_map_bound_set(struct ck_hs_map *m, unsigned long h, unsigned long n_probes) { unsigned long offset = h & m->mask; if (n_probes > m->probe_maximum) ck_pr_store_uint(&m->probe_maximum, n_probes); if (m->probe_bound != NULL && m->probe_bound[offset] < n_probes) { if (n_probes > CK_HS_WORD_MAX) n_probes = CK_HS_WORD_MAX; CK_HS_STORE(&m->probe_bound[offset], n_probes); ck_pr_fence_store(); } return; } static inline unsigned int ck_hs_map_bound_get(struct ck_hs_map *m, unsigned long h) { unsigned long offset = h & m->mask; unsigned int r = CK_HS_WORD_MAX; if (m->probe_bound != NULL) { r = CK_HS_LOAD(&m->probe_bound[offset]); if (r == CK_HS_WORD_MAX) r = ck_pr_load_uint(&m->probe_maximum); } else { r = ck_pr_load_uint(&m->probe_maximum); } return r; } bool ck_hs_grow(struct ck_hs *hs, unsigned long capacity) { struct ck_hs_map *map, *update; unsigned long k, i, j, offset, probes; const void *previous, **bucket; restart: map = hs->map; if (map->capacity > capacity) return false; update = ck_hs_map_create(hs, capacity); if (update == NULL) return false; for (k = 0; k < map->capacity; k++) { unsigned long h; previous = map->entries[k]; if (previous == CK_HS_EMPTY || previous == CK_HS_TOMBSTONE) continue; #ifdef CK_HS_PP if (hs->mode & CK_HS_MODE_OBJECT) previous = CK_HS_VMA(previous); #endif h = hs->hf(previous, hs->seed); offset = h & update->mask; i = probes = 0; for (;;) { bucket = (const void **)((uintptr_t)&update->entries[offset] & ~(CK_MD_CACHELINE - 1)); for (j = 0; j < CK_HS_PROBE_L1; j++) { const void **cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1)); if (probes++ == update->probe_limit) break; if (CK_CC_LIKELY(*cursor == CK_HS_EMPTY)) { *cursor = map->entries[k]; update->n_entries++; ck_hs_map_bound_set(update, h, probes); break; } } if (j < CK_HS_PROBE_L1) break; offset = ck_hs_map_probe_next(update, offset, h, i++, probes); } if (probes > update->probe_limit) { /* * We have hit the probe limit, map needs to be even larger. */ ck_hs_map_destroy(hs->m, update, false); capacity <<= 1; goto restart; } } ck_pr_fence_store(); ck_pr_store_ptr(&hs->map, update); ck_hs_map_destroy(hs->m, map, true); return true; } static void ck_hs_map_postinsert(struct ck_hs *hs, struct ck_hs_map *map) { map->n_entries++; if ((map->n_entries << 1) > map->capacity) ck_hs_grow(hs, map->capacity << 1); return; } bool ck_hs_rebuild(struct ck_hs *hs) { return ck_hs_grow(hs, hs->map->capacity); } static const void ** ck_hs_map_probe(struct ck_hs *hs, struct ck_hs_map *map, unsigned long *n_probes, const void ***priority, unsigned long h, const void *key, const void **object, unsigned long probe_limit, enum ck_hs_probe_behavior behavior) { const void **bucket, **cursor, *k, *compare; const void **pr = NULL; unsigned long offset, j, i, probes, opl; #ifdef CK_HS_PP /* If we are storing object pointers, then we may leverage pointer packing. */ unsigned long hv = 0; if (hs->mode & CK_HS_MODE_OBJECT) { hv = (h >> 25) & CK_HS_KEY_MASK; compare = CK_HS_VMA(key); } else { compare = key; } #else compare = key; #endif offset = h & map->mask; *object = NULL; i = probes = 0; opl = probe_limit; if (behavior == CK_HS_PROBE_INSERT) probe_limit = ck_hs_map_bound_get(map, h); for (;;) { bucket = (const void **)((uintptr_t)&map->entries[offset] & ~(CK_MD_CACHELINE - 1)); for (j = 0; j < CK_HS_PROBE_L1; j++) { cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1)); if (probes++ == probe_limit) { if (probe_limit == opl || pr != NULL) { k = CK_HS_EMPTY; goto leave; } /* * If no eligible slot has been found yet, continue probe * sequence with original probe limit. */ probe_limit = opl; } k = ck_pr_load_ptr(cursor); if (k == CK_HS_EMPTY) goto leave; if (k == CK_HS_TOMBSTONE) { if (pr == NULL) { pr = cursor; *n_probes = probes; if (behavior == CK_HS_PROBE_TOMBSTONE) { k = CK_HS_EMPTY; goto leave; } } continue; } #ifdef CK_HS_PP if (hs->mode & CK_HS_MODE_OBJECT) { if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv) continue; k = CK_HS_VMA(k); } #endif if (k == compare) goto leave; if (hs->compare == NULL) continue; if (hs->compare(k, key) == true) goto leave; } offset = ck_hs_map_probe_next(map, offset, h, i++, probes); } leave: if (probes > probe_limit) { cursor = NULL; } else { *object = k; } if (pr == NULL) *n_probes = probes; *priority = pr; return cursor; } static inline const void * ck_hs_marshal(unsigned int mode, const void *key, unsigned long h) { #ifdef CK_HS_PP const void *insert; if (mode & CK_HS_MODE_OBJECT) { insert = (void *)((uintptr_t)CK_HS_VMA(key) | ((h >> 25) << CK_MD_VMA_BITS)); } else { insert = key; } return insert; #else (void)mode; (void)h; return key; #endif } bool ck_hs_gc(struct ck_hs *hs, unsigned long cycles, unsigned long seed) { unsigned long size = 0; unsigned long i; struct ck_hs_map *map = hs->map; unsigned int maximum; CK_HS_WORD *bounds = NULL; if (map->n_entries == 0) { ck_pr_store_uint(&map->probe_maximum, 0); if (map->probe_bound != NULL) memset(map->probe_bound, 0, sizeof(CK_HS_WORD) * map->capacity); return true; } if (cycles == 0) { maximum = 0; if (map->probe_bound != NULL) { size = sizeof(CK_HS_WORD) * map->capacity; bounds = hs->m->malloc(size); if (bounds == NULL) return false; memset(bounds, 0, size); } } else { maximum = map->probe_maximum; } for (i = 0; i < map->capacity; i++) { const void **first, *object, **slot, *entry; unsigned long n_probes, offset, h; entry = map->entries[(i + seed) & map->mask]; if (entry == CK_HS_EMPTY || entry == CK_HS_TOMBSTONE) continue; #ifdef CK_HS_PP if (hs->mode & CK_HS_MODE_OBJECT) entry = CK_HS_VMA(entry); #endif h = hs->hf(entry, hs->seed); offset = h & map->mask; slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, entry, &object, ck_hs_map_bound_get(map, h), CK_HS_PROBE); if (first != NULL) { const void *insert = ck_hs_marshal(hs->mode, entry, h); ck_pr_store_ptr(first, insert); ck_hs_map_signal(map, h); ck_pr_store_ptr(slot, CK_HS_TOMBSTONE); } if (cycles == 0) { if (n_probes > maximum) maximum = n_probes; if (n_probes > CK_HS_WORD_MAX) n_probes = CK_HS_WORD_MAX; if (bounds != NULL && n_probes > bounds[offset]) bounds[offset] = n_probes; } else if (--cycles == 0) break; } /* * The following only apply to garbage collection involving * a full scan of all entries. */ if (maximum != map->probe_maximum) ck_pr_store_uint(&map->probe_maximum, maximum); if (bounds != NULL) { for (i = 0; i < map->capacity; i++) CK_HS_STORE(&map->probe_bound[i], bounds[i]); hs->m->free(bounds, size, false); } return true; } bool ck_hs_fas(struct ck_hs *hs, unsigned long h, const void *key, void **previous) { const void **slot, **first, *object, *insert; struct ck_hs_map *map = hs->map; unsigned long n_probes; *previous = NULL; slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, ck_hs_map_bound_get(map, h), CK_HS_PROBE); /* Replacement semantics presume existence. */ if (object == NULL) return false; insert = ck_hs_marshal(hs->mode, key, h); if (first != NULL) { ck_pr_store_ptr(first, insert); ck_hs_map_signal(map, h); ck_pr_store_ptr(slot, CK_HS_TOMBSTONE); } else { ck_pr_store_ptr(slot, insert); } *previous = CK_CC_DECONST_PTR(object); return true; } /* * An apply function takes two arguments. The first argument is a pointer to a * pre-existing object. The second argument is a pointer to the fifth argument * passed to ck_hs_apply. If a non-NULL pointer is passed to the first argument * and the return value of the apply function is NULL, then the pre-existing * value is deleted. If the return pointer is the same as the one passed to the * apply function then no changes are made to the hash table. If the first * argument is non-NULL and the return pointer is different than that passed to * the apply function, then the pre-existing value is replaced. For * replacement, it is required that the value itself is identical to the * previous value. */ bool ck_hs_apply(struct ck_hs *hs, unsigned long h, const void *key, ck_hs_apply_fn_t *fn, void *cl) { const void **slot, **first, *object, *delta, *insert; unsigned long n_probes; struct ck_hs_map *map; restart: map = hs->map; slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_HS_PROBE_INSERT); if (slot == NULL && first == NULL) { if (ck_hs_grow(hs, map->capacity << 1) == false) return false; goto restart; } delta = fn(CK_CC_DECONST_PTR(object), cl); if (delta == NULL) { /* * The apply function has requested deletion. If the object doesn't exist, * then exit early. */ if (CK_CC_UNLIKELY(object == NULL)) return true; /* Otherwise, mark slot as deleted. */ ck_pr_store_ptr(slot, CK_HS_TOMBSTONE); map->n_entries--; map->tombstones++; return true; } /* The apply function has not requested hash set modification so exit early. */ if (delta == object) return true; /* A modification or insertion has been requested. */ ck_hs_map_bound_set(map, h, n_probes); insert = ck_hs_marshal(hs->mode, delta, h); if (first != NULL) { /* * This follows the same semantics as ck_hs_set, please refer to that * function for documentation. */ ck_pr_store_ptr(first, insert); if (object != NULL) { ck_hs_map_signal(map, h); ck_pr_store_ptr(slot, CK_HS_TOMBSTONE); } } else { /* * If we are storing into same slot, then atomic store is sufficient * for replacement. */ ck_pr_store_ptr(slot, insert); } if (object == NULL) ck_hs_map_postinsert(hs, map); return true; } bool ck_hs_set(struct ck_hs *hs, unsigned long h, const void *key, void **previous) { const void **slot, **first, *object, *insert; unsigned long n_probes; struct ck_hs_map *map; *previous = NULL; restart: map = hs->map; slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_HS_PROBE_INSERT); if (slot == NULL && first == NULL) { if (ck_hs_grow(hs, map->capacity << 1) == false) return false; goto restart; } ck_hs_map_bound_set(map, h, n_probes); insert = ck_hs_marshal(hs->mode, key, h); if (first != NULL) { /* If an earlier bucket was found, then store entry there. */ ck_pr_store_ptr(first, insert); /* * If a duplicate key was found, then delete it after * signaling concurrent probes to restart. Optionally, * it is possible to install tombstone after grace * period if we can guarantee earlier position of * duplicate key. */ if (object != NULL) { ck_hs_map_signal(map, h); ck_pr_store_ptr(slot, CK_HS_TOMBSTONE); } } else { /* * If we are storing into same slot, then atomic store is sufficient * for replacement. */ ck_pr_store_ptr(slot, insert); } if (object == NULL) ck_hs_map_postinsert(hs, map); *previous = CK_CC_DECONST_PTR(object); return true; } CK_CC_INLINE static bool ck_hs_put_internal(struct ck_hs *hs, unsigned long h, const void *key, enum ck_hs_probe_behavior behavior) { const void **slot, **first, *object, *insert; unsigned long n_probes; struct ck_hs_map *map; restart: map = hs->map; slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, behavior); if (slot == NULL && first == NULL) { if (ck_hs_grow(hs, map->capacity << 1) == false) return false; goto restart; } /* Fail operation if a match was found. */ if (object != NULL) return false; ck_hs_map_bound_set(map, h, n_probes); insert = ck_hs_marshal(hs->mode, key, h); if (first != NULL) { /* Insert key into first bucket in probe sequence. */ ck_pr_store_ptr(first, insert); } else { /* An empty slot was found. */ ck_pr_store_ptr(slot, insert); } ck_hs_map_postinsert(hs, map); return true; } bool ck_hs_put(struct ck_hs *hs, unsigned long h, const void *key) { return ck_hs_put_internal(hs, h, key, CK_HS_PROBE_INSERT); } bool ck_hs_put_unique(struct ck_hs *hs, unsigned long h, const void *key) { return ck_hs_put_internal(hs, h, key, CK_HS_PROBE_TOMBSTONE); } void * ck_hs_get(struct ck_hs *hs, unsigned long h, const void *key) { const void **first, *object; struct ck_hs_map *map; unsigned long n_probes; unsigned int g, g_p, probe; unsigned int *generation; do { map = ck_pr_load_ptr(&hs->map); generation = &map->generation[h & CK_HS_G_MASK]; g = ck_pr_load_uint(generation); probe = ck_hs_map_bound_get(map, h); ck_pr_fence_load(); ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, probe, CK_HS_PROBE); ck_pr_fence_load(); g_p = ck_pr_load_uint(generation); } while (g != g_p); return CK_CC_DECONST_PTR(object); } void * ck_hs_remove(struct ck_hs *hs, unsigned long h, const void *key) { const void **slot, **first, *object; struct ck_hs_map *map = hs->map; unsigned long n_probes; slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, ck_hs_map_bound_get(map, h), CK_HS_PROBE); if (object == NULL) return NULL; ck_pr_store_ptr(slot, CK_HS_TOMBSTONE); map->n_entries--; map->tombstones++; return CK_CC_DECONST_PTR(object); } bool ck_hs_move(struct ck_hs *hs, struct ck_hs *source, ck_hs_hash_cb_t *hf, ck_hs_compare_cb_t *compare, struct ck_malloc *m) { if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL) return false; hs->mode = source->mode; hs->seed = source->seed; hs->map = source->map; hs->m = m; hs->hf = hf; hs->compare = compare; return true; } bool ck_hs_init(struct ck_hs *hs, unsigned int mode, ck_hs_hash_cb_t *hf, ck_hs_compare_cb_t *compare, struct ck_malloc *m, unsigned long n_entries, unsigned long seed) { if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL) return false; hs->m = m; hs->mode = mode; hs->seed = seed; hs->hf = hf; hs->compare = compare; hs->map = ck_hs_map_create(hs, n_entries); return hs->map != NULL; } Index: head/sys/contrib/ck/src/ck_ht.c =================================================================== --- head/sys/contrib/ck/src/ck_ht.c (revision 331897) +++ head/sys/contrib/ck/src/ck_ht.c (revision 331898) @@ -1,1036 +1,1036 @@ /* * Copyright 2012-2015 Samy Al Bahra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #define CK_HT_IM #include /* * This implementation borrows several techniques from Josh Dybnis's * nbds library which can be found at http://code.google.com/p/nbds * * This release currently only includes support for 64-bit platforms. * We can address 32-bit platforms in a future release. */ #include #include #include #include #include #include #include "ck_ht_hash.h" #include "ck_internal.h" #ifndef CK_HT_BUCKET_LENGTH #ifdef CK_HT_PP #define CK_HT_BUCKET_SHIFT 2ULL #else #define CK_HT_BUCKET_SHIFT 1ULL #endif #define CK_HT_BUCKET_LENGTH (1U << CK_HT_BUCKET_SHIFT) #define CK_HT_BUCKET_MASK (CK_HT_BUCKET_LENGTH - 1) #endif #ifndef CK_HT_PROBE_DEFAULT #define CK_HT_PROBE_DEFAULT 64ULL #endif #if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_STORE_8) #define CK_HT_WORD uint8_t #define CK_HT_WORD_MAX UINT8_MAX #define CK_HT_STORE(x, y) ck_pr_store_8(x, y) #define CK_HT_LOAD(x) ck_pr_load_8(x) #elif defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_STORE_16) #define CK_HT_WORD uint16_t #define CK_HT_WORD_MAX UINT16_MAX #define CK_HT_STORE(x, y) ck_pr_store_16(x, y) #define CK_HT_LOAD(x) ck_pr_load_16(x) #elif defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_STORE_32) #define CK_HT_WORD uint32_t #define CK_HT_WORD_MAX UINT32_MAX #define CK_HT_STORE(x, y) ck_pr_store_32(x, y) #define CK_HT_LOAD(x) ck_pr_load_32(x) #else #error "ck_ht is not supported on your platform." #endif struct ck_ht_map { unsigned int mode; CK_HT_TYPE deletions; CK_HT_TYPE probe_maximum; CK_HT_TYPE probe_length; CK_HT_TYPE probe_limit; CK_HT_TYPE size; CK_HT_TYPE n_entries; CK_HT_TYPE mask; CK_HT_TYPE capacity; CK_HT_TYPE step; CK_HT_WORD *probe_bound; struct ck_ht_entry *entries; }; void ck_ht_stat(struct ck_ht *table, struct ck_ht_stat *st) { struct ck_ht_map *map = table->map; st->n_entries = map->n_entries; st->probe_maximum = map->probe_maximum; return; } void ck_ht_hash(struct ck_ht_hash *h, struct ck_ht *table, const void *key, uint16_t key_length) { table->h(h, key, key_length, table->seed); return; } void ck_ht_hash_direct(struct ck_ht_hash *h, struct ck_ht *table, uintptr_t key) { ck_ht_hash(h, table, &key, sizeof(key)); return; } static void ck_ht_hash_wrapper(struct ck_ht_hash *h, const void *key, size_t length, uint64_t seed) { h->value = (unsigned long)MurmurHash64A(key, length, seed); return; } static struct ck_ht_map * ck_ht_map_create(struct ck_ht *table, CK_HT_TYPE entries) { struct ck_ht_map *map; CK_HT_TYPE size; uintptr_t prefix; uint32_t n_entries; n_entries = ck_internal_power_2(entries); if (n_entries < CK_HT_BUCKET_LENGTH) n_entries = CK_HT_BUCKET_LENGTH; size = sizeof(struct ck_ht_map) + (sizeof(struct ck_ht_entry) * n_entries + CK_MD_CACHELINE - 1); if (table->mode & CK_HT_WORKLOAD_DELETE) { prefix = sizeof(CK_HT_WORD) * n_entries; size += prefix; } else { prefix = 0; } map = table->m->malloc(size); if (map == NULL) return NULL; map->mode = table->mode; map->size = size; map->probe_limit = ck_internal_max_64(n_entries >> (CK_HT_BUCKET_SHIFT + 2), CK_HT_PROBE_DEFAULT); map->deletions = 0; map->probe_maximum = 0; map->capacity = n_entries; - map->step = ck_internal_bsf_64(map->capacity); + map->step = ck_cc_ffsll(map->capacity); map->mask = map->capacity - 1; map->n_entries = 0; map->entries = (struct ck_ht_entry *)(((uintptr_t)&map[1] + prefix + CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1)); if (table->mode & CK_HT_WORKLOAD_DELETE) { map->probe_bound = (CK_HT_WORD *)&map[1]; memset(map->probe_bound, 0, prefix); } else { map->probe_bound = NULL; } memset(map->entries, 0, sizeof(struct ck_ht_entry) * n_entries); ck_pr_fence_store(); return map; } static inline void ck_ht_map_bound_set(struct ck_ht_map *m, struct ck_ht_hash h, CK_HT_TYPE n_probes) { CK_HT_TYPE offset = h.value & m->mask; if (n_probes > m->probe_maximum) CK_HT_TYPE_STORE(&m->probe_maximum, n_probes); if (m->probe_bound != NULL && m->probe_bound[offset] < n_probes) { if (n_probes >= CK_HT_WORD_MAX) n_probes = CK_HT_WORD_MAX; CK_HT_STORE(&m->probe_bound[offset], n_probes); ck_pr_fence_store(); } return; } static inline CK_HT_TYPE ck_ht_map_bound_get(struct ck_ht_map *m, struct ck_ht_hash h) { CK_HT_TYPE offset = h.value & m->mask; CK_HT_TYPE r = CK_HT_WORD_MAX; if (m->probe_bound != NULL) { r = CK_HT_LOAD(&m->probe_bound[offset]); if (r == CK_HT_WORD_MAX) r = CK_HT_TYPE_LOAD(&m->probe_maximum); } else { r = CK_HT_TYPE_LOAD(&m->probe_maximum); } return r; } static void ck_ht_map_destroy(struct ck_malloc *m, struct ck_ht_map *map, bool defer) { m->free(map, map->size, defer); return; } static inline size_t ck_ht_map_probe_next(struct ck_ht_map *map, size_t offset, ck_ht_hash_t h, size_t probes) { ck_ht_hash_t r; size_t stride; unsigned long level = (unsigned long)probes >> CK_HT_BUCKET_SHIFT; r.value = (h.value >> map->step) >> level; stride = (r.value & ~CK_HT_BUCKET_MASK) << 1 | (r.value & CK_HT_BUCKET_MASK); return (offset + level + (stride | CK_HT_BUCKET_LENGTH)) & map->mask; } bool ck_ht_init(struct ck_ht *table, unsigned int mode, ck_ht_hash_cb_t *h, struct ck_malloc *m, CK_HT_TYPE entries, uint64_t seed) { if (m == NULL || m->malloc == NULL || m->free == NULL) return false; table->m = m; table->mode = mode; table->seed = seed; if (h == NULL) { table->h = ck_ht_hash_wrapper; } else { table->h = h; } table->map = ck_ht_map_create(table, entries); return table->map != NULL; } static struct ck_ht_entry * ck_ht_map_probe_wr(struct ck_ht_map *map, ck_ht_hash_t h, ck_ht_entry_t *snapshot, ck_ht_entry_t **available, const void *key, uint16_t key_length, CK_HT_TYPE *probe_limit, CK_HT_TYPE *probe_wr) { struct ck_ht_entry *bucket, *cursor; struct ck_ht_entry *first = NULL; size_t offset, i, j; CK_HT_TYPE probes = 0; CK_HT_TYPE limit; if (probe_limit == NULL) { limit = ck_ht_map_bound_get(map, h); } else { limit = CK_HT_TYPE_MAX; } offset = h.value & map->mask; for (i = 0; i < map->probe_limit; i++) { /* * Probe on a complete cache line first. Scan forward and wrap around to * the beginning of the cache line. Only when the complete cache line has * been scanned do we move on to the next row. */ bucket = (void *)((uintptr_t)(map->entries + offset) & ~(CK_MD_CACHELINE - 1)); for (j = 0; j < CK_HT_BUCKET_LENGTH; j++) { uint16_t k; if (probes++ > limit) break; cursor = bucket + ((j + offset) & (CK_HT_BUCKET_LENGTH - 1)); /* * It is probably worth it to encapsulate probe state * in order to prevent a complete reprobe sequence in * the case of intermittent writers. */ if (cursor->key == CK_HT_KEY_TOMBSTONE) { if (first == NULL) { first = cursor; *probe_wr = probes; } continue; } if (cursor->key == CK_HT_KEY_EMPTY) goto leave; if (cursor->key == (uintptr_t)key) goto leave; if (map->mode & CK_HT_MODE_BYTESTRING) { void *pointer; /* * Check memoized portion of hash value before * expensive full-length comparison. */ k = ck_ht_entry_key_length(cursor); if (k != key_length) continue; #ifdef CK_HT_PP if ((cursor->value >> CK_MD_VMA_BITS) != ((h.value >> 32) & CK_HT_KEY_MASK)) continue; #else if (cursor->hash != h.value) continue; #endif pointer = ck_ht_entry_key(cursor); if (memcmp(pointer, key, key_length) == 0) goto leave; } } offset = ck_ht_map_probe_next(map, offset, h, probes); } cursor = NULL; leave: if (probe_limit != NULL) { *probe_limit = probes; } else if (first == NULL) { *probe_wr = probes; } *available = first; if (cursor != NULL) { *snapshot = *cursor; } return cursor; } bool ck_ht_gc(struct ck_ht *ht, unsigned long cycles, unsigned long seed) { CK_HT_WORD *bounds = NULL; struct ck_ht_map *map = ht->map; CK_HT_TYPE maximum, i; CK_HT_TYPE size = 0; if (map->n_entries == 0) { CK_HT_TYPE_STORE(&map->probe_maximum, 0); if (map->probe_bound != NULL) memset(map->probe_bound, 0, sizeof(CK_HT_WORD) * map->capacity); return true; } if (cycles == 0) { maximum = 0; if (map->probe_bound != NULL) { size = sizeof(CK_HT_WORD) * map->capacity; bounds = ht->m->malloc(size); if (bounds == NULL) return false; memset(bounds, 0, size); } } else { maximum = map->probe_maximum; } for (i = 0; i < map->capacity; i++) { struct ck_ht_entry *entry, *priority, snapshot; struct ck_ht_hash h; CK_HT_TYPE probes_wr; CK_HT_TYPE offset; entry = &map->entries[(i + seed) & map->mask]; if (entry->key == CK_HT_KEY_EMPTY || entry->key == CK_HT_KEY_TOMBSTONE) { continue; } if (ht->mode & CK_HT_MODE_BYTESTRING) { #ifndef CK_HT_PP h.value = entry->hash; #else ht->h(&h, ck_ht_entry_key(entry), ck_ht_entry_key_length(entry), ht->seed); #endif entry = ck_ht_map_probe_wr(map, h, &snapshot, &priority, ck_ht_entry_key(entry), ck_ht_entry_key_length(entry), NULL, &probes_wr); } else { #ifndef CK_HT_PP h.value = entry->hash; #else ht->h(&h, &entry->key, sizeof(entry->key), ht->seed); #endif entry = ck_ht_map_probe_wr(map, h, &snapshot, &priority, (void *)entry->key, sizeof(entry->key), NULL, &probes_wr); } offset = h.value & map->mask; if (priority != NULL) { CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1); ck_pr_fence_store(); #ifndef CK_HT_PP CK_HT_TYPE_STORE(&priority->key_length, entry->key_length); CK_HT_TYPE_STORE(&priority->hash, entry->hash); #endif ck_pr_store_ptr_unsafe(&priority->value, (void *)entry->value); ck_pr_fence_store(); ck_pr_store_ptr_unsafe(&priority->key, (void *)entry->key); ck_pr_fence_store(); CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1); ck_pr_fence_store(); ck_pr_store_ptr_unsafe(&entry->key, (void *)CK_HT_KEY_TOMBSTONE); ck_pr_fence_store(); } if (cycles == 0) { if (probes_wr > maximum) maximum = probes_wr; if (probes_wr >= CK_HT_WORD_MAX) probes_wr = CK_HT_WORD_MAX; if (bounds != NULL && probes_wr > bounds[offset]) bounds[offset] = probes_wr; } else if (--cycles == 0) break; } if (maximum != map->probe_maximum) CK_HT_TYPE_STORE(&map->probe_maximum, maximum); if (bounds != NULL) { for (i = 0; i < map->capacity; i++) CK_HT_STORE(&map->probe_bound[i], bounds[i]); ht->m->free(bounds, size, false); } return true; } static struct ck_ht_entry * ck_ht_map_probe_rd(struct ck_ht_map *map, ck_ht_hash_t h, ck_ht_entry_t *snapshot, const void *key, uint16_t key_length) { struct ck_ht_entry *bucket, *cursor; size_t offset, i, j; CK_HT_TYPE probes = 0; CK_HT_TYPE probe_maximum; #ifndef CK_HT_PP CK_HT_TYPE d = 0; CK_HT_TYPE d_prime = 0; retry: #endif probe_maximum = ck_ht_map_bound_get(map, h); offset = h.value & map->mask; for (i = 0; i < map->probe_limit; i++) { /* * Probe on a complete cache line first. Scan forward and wrap around to * the beginning of the cache line. Only when the complete cache line has * been scanned do we move on to the next row. */ bucket = (void *)((uintptr_t)(map->entries + offset) & ~(CK_MD_CACHELINE - 1)); for (j = 0; j < CK_HT_BUCKET_LENGTH; j++) { uint16_t k; if (probes++ > probe_maximum) return NULL; cursor = bucket + ((j + offset) & (CK_HT_BUCKET_LENGTH - 1)); #ifdef CK_HT_PP snapshot->key = (uintptr_t)ck_pr_load_ptr(&cursor->key); ck_pr_fence_load(); snapshot->value = (uintptr_t)ck_pr_load_ptr(&cursor->value); #else d = CK_HT_TYPE_LOAD(&map->deletions); snapshot->key = (uintptr_t)ck_pr_load_ptr(&cursor->key); ck_pr_fence_load(); snapshot->key_length = CK_HT_TYPE_LOAD(&cursor->key_length); snapshot->hash = CK_HT_TYPE_LOAD(&cursor->hash); snapshot->value = (uintptr_t)ck_pr_load_ptr(&cursor->value); #endif /* * It is probably worth it to encapsulate probe state * in order to prevent a complete reprobe sequence in * the case of intermittent writers. */ if (snapshot->key == CK_HT_KEY_TOMBSTONE) continue; if (snapshot->key == CK_HT_KEY_EMPTY) goto leave; if (snapshot->key == (uintptr_t)key) goto leave; if (map->mode & CK_HT_MODE_BYTESTRING) { void *pointer; /* * Check memoized portion of hash value before * expensive full-length comparison. */ k = ck_ht_entry_key_length(snapshot); if (k != key_length) continue; #ifdef CK_HT_PP if ((snapshot->value >> CK_MD_VMA_BITS) != ((h.value >> 32) & CK_HT_KEY_MASK)) continue; #else if (snapshot->hash != h.value) continue; d_prime = CK_HT_TYPE_LOAD(&map->deletions); /* * It is possible that the slot was * replaced, initiate a re-probe. */ if (d != d_prime) goto retry; #endif pointer = ck_ht_entry_key(snapshot); if (memcmp(pointer, key, key_length) == 0) goto leave; } } offset = ck_ht_map_probe_next(map, offset, h, probes); } return NULL; leave: return cursor; } CK_HT_TYPE ck_ht_count(struct ck_ht *table) { struct ck_ht_map *map = ck_pr_load_ptr(&table->map); return CK_HT_TYPE_LOAD(&map->n_entries); } bool ck_ht_next(struct ck_ht *table, struct ck_ht_iterator *i, struct ck_ht_entry **entry) { struct ck_ht_map *map = table->map; uintptr_t key; if (i->offset >= map->capacity) return false; do { key = map->entries[i->offset].key; if (key != CK_HT_KEY_EMPTY && key != CK_HT_KEY_TOMBSTONE) break; } while (++i->offset < map->capacity); if (i->offset >= map->capacity) return false; *entry = map->entries + i->offset++; return true; } bool ck_ht_reset_size_spmc(struct ck_ht *table, CK_HT_TYPE size) { struct ck_ht_map *map, *update; map = table->map; update = ck_ht_map_create(table, size); if (update == NULL) return false; ck_pr_store_ptr_unsafe(&table->map, update); ck_ht_map_destroy(table->m, map, true); return true; } bool ck_ht_reset_spmc(struct ck_ht *table) { struct ck_ht_map *map = table->map; return ck_ht_reset_size_spmc(table, map->capacity); } bool ck_ht_grow_spmc(struct ck_ht *table, CK_HT_TYPE capacity) { struct ck_ht_map *map, *update; struct ck_ht_entry *bucket, *previous; struct ck_ht_hash h; size_t k, i, j, offset; CK_HT_TYPE probes; restart: map = table->map; if (map->capacity >= capacity) return false; update = ck_ht_map_create(table, capacity); if (update == NULL) return false; for (k = 0; k < map->capacity; k++) { previous = &map->entries[k]; if (previous->key == CK_HT_KEY_EMPTY || previous->key == CK_HT_KEY_TOMBSTONE) continue; if (table->mode & CK_HT_MODE_BYTESTRING) { #ifdef CK_HT_PP void *key; uint16_t key_length; key = ck_ht_entry_key(previous); key_length = ck_ht_entry_key_length(previous); #endif #ifndef CK_HT_PP h.value = previous->hash; #else table->h(&h, key, key_length, table->seed); #endif } else { #ifndef CK_HT_PP h.value = previous->hash; #else table->h(&h, &previous->key, sizeof(previous->key), table->seed); #endif } offset = h.value & update->mask; probes = 0; for (i = 0; i < update->probe_limit; i++) { bucket = (void *)((uintptr_t)(update->entries + offset) & ~(CK_MD_CACHELINE - 1)); for (j = 0; j < CK_HT_BUCKET_LENGTH; j++) { struct ck_ht_entry *cursor = bucket + ((j + offset) & (CK_HT_BUCKET_LENGTH - 1)); probes++; if (CK_CC_LIKELY(cursor->key == CK_HT_KEY_EMPTY)) { *cursor = *previous; update->n_entries++; ck_ht_map_bound_set(update, h, probes); break; } } if (j < CK_HT_BUCKET_LENGTH) break; offset = ck_ht_map_probe_next(update, offset, h, probes); } if (i == update->probe_limit) { /* * We have hit the probe limit, the map needs to be even * larger. */ ck_ht_map_destroy(table->m, update, false); capacity <<= 1; goto restart; } } ck_pr_fence_store(); ck_pr_store_ptr_unsafe(&table->map, update); ck_ht_map_destroy(table->m, map, true); return true; } bool ck_ht_remove_spmc(struct ck_ht *table, ck_ht_hash_t h, ck_ht_entry_t *entry) { struct ck_ht_map *map; struct ck_ht_entry *candidate, snapshot; map = table->map; if (table->mode & CK_HT_MODE_BYTESTRING) { candidate = ck_ht_map_probe_rd(map, h, &snapshot, ck_ht_entry_key(entry), ck_ht_entry_key_length(entry)); } else { candidate = ck_ht_map_probe_rd(map, h, &snapshot, (void *)entry->key, sizeof(entry->key)); } /* No matching entry was found. */ if (candidate == NULL || snapshot.key == CK_HT_KEY_EMPTY) return false; *entry = snapshot; ck_pr_store_ptr_unsafe(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE); ck_pr_fence_store(); CK_HT_TYPE_STORE(&map->n_entries, map->n_entries - 1); return true; } bool ck_ht_get_spmc(struct ck_ht *table, ck_ht_hash_t h, ck_ht_entry_t *entry) { struct ck_ht_entry *candidate, snapshot; struct ck_ht_map *map; CK_HT_TYPE d, d_prime; restart: map = ck_pr_load_ptr(&table->map); /* * Platforms that cannot read key and key_length atomically must reprobe * on the scan of any single entry. */ d = CK_HT_TYPE_LOAD(&map->deletions); if (table->mode & CK_HT_MODE_BYTESTRING) { candidate = ck_ht_map_probe_rd(map, h, &snapshot, ck_ht_entry_key(entry), ck_ht_entry_key_length(entry)); } else { candidate = ck_ht_map_probe_rd(map, h, &snapshot, (void *)entry->key, sizeof(entry->key)); } d_prime = CK_HT_TYPE_LOAD(&map->deletions); if (d != d_prime) { /* * It is possible we have read (K, V'). Only valid states are * (K, V), (K', V') and (T, V). Restart load operation in face * of concurrent deletions or replacements. */ goto restart; } if (candidate == NULL || snapshot.key == CK_HT_KEY_EMPTY) return false; *entry = snapshot; return true; } bool ck_ht_set_spmc(struct ck_ht *table, ck_ht_hash_t h, ck_ht_entry_t *entry) { struct ck_ht_entry snapshot, *candidate, *priority; struct ck_ht_map *map; CK_HT_TYPE probes, probes_wr; bool empty = false; for (;;) { map = table->map; if (table->mode & CK_HT_MODE_BYTESTRING) { candidate = ck_ht_map_probe_wr(map, h, &snapshot, &priority, ck_ht_entry_key(entry), ck_ht_entry_key_length(entry), &probes, &probes_wr); } else { candidate = ck_ht_map_probe_wr(map, h, &snapshot, &priority, (void *)entry->key, sizeof(entry->key), &probes, &probes_wr); } if (priority != NULL) { probes = probes_wr; break; } if (candidate != NULL) break; if (ck_ht_grow_spmc(table, map->capacity << 1) == false) return false; } if (candidate == NULL) { candidate = priority; empty = true; } if (candidate->key != CK_HT_KEY_EMPTY && priority != NULL && candidate != priority) { /* * Entry is moved into another position in probe sequence. * We avoid a state of (K, B) (where [K, B] -> [K', B]) by * guaranteeing a forced reprobe before transitioning from K to * T. (K, B) implies (K, B, D') so we will reprobe successfully * from this transient state. */ probes = probes_wr; #ifndef CK_HT_PP CK_HT_TYPE_STORE(&priority->key_length, entry->key_length); CK_HT_TYPE_STORE(&priority->hash, entry->hash); #endif /* * Readers must observe version counter change before they * observe re-use. If they observe re-use, it is at most * a tombstone. */ if (priority->value == CK_HT_KEY_TOMBSTONE) { CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1); ck_pr_fence_store(); } ck_pr_store_ptr_unsafe(&priority->value, (void *)entry->value); ck_pr_fence_store(); ck_pr_store_ptr_unsafe(&priority->key, (void *)entry->key); ck_pr_fence_store(); /* * Make sure that readers who observe the tombstone would * also observe counter change. */ CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1); ck_pr_fence_store(); ck_pr_store_ptr_unsafe(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE); ck_pr_fence_store(); } else { /* * In this case we are inserting a new entry or replacing * an existing entry. Yes, this can be combined into above branch, * but isn't because you are actually looking at dying code * (ck_ht is effectively deprecated and is being replaced soon). */ bool replace = candidate->key != CK_HT_KEY_EMPTY && candidate->key != CK_HT_KEY_TOMBSTONE; if (priority != NULL) { if (priority->key == CK_HT_KEY_TOMBSTONE) { CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1); ck_pr_fence_store(); } candidate = priority; probes = probes_wr; } #ifdef CK_HT_PP ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value); ck_pr_fence_store(); ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key); #else CK_HT_TYPE_STORE(&candidate->key_length, entry->key_length); CK_HT_TYPE_STORE(&candidate->hash, entry->hash); ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value); ck_pr_fence_store(); ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key); #endif /* * If we are insert a new entry then increment number * of entries associated with map. */ if (replace == false) CK_HT_TYPE_STORE(&map->n_entries, map->n_entries + 1); } ck_ht_map_bound_set(map, h, probes); /* Enforce a load factor of 0.5. */ if (map->n_entries * 2 > map->capacity) ck_ht_grow_spmc(table, map->capacity << 1); if (empty == true) { entry->key = CK_HT_KEY_EMPTY; } else { *entry = snapshot; } return true; } bool ck_ht_put_spmc(struct ck_ht *table, ck_ht_hash_t h, ck_ht_entry_t *entry) { struct ck_ht_entry snapshot, *candidate, *priority; struct ck_ht_map *map; CK_HT_TYPE probes, probes_wr; for (;;) { map = table->map; if (table->mode & CK_HT_MODE_BYTESTRING) { candidate = ck_ht_map_probe_wr(map, h, &snapshot, &priority, ck_ht_entry_key(entry), ck_ht_entry_key_length(entry), &probes, &probes_wr); } else { candidate = ck_ht_map_probe_wr(map, h, &snapshot, &priority, (void *)entry->key, sizeof(entry->key), &probes, &probes_wr); } if (candidate != NULL || priority != NULL) break; if (ck_ht_grow_spmc(table, map->capacity << 1) == false) return false; } if (priority != NULL) { /* Version counter is updated before re-use. */ CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1); ck_pr_fence_store(); /* Re-use tombstone if one was found. */ candidate = priority; probes = probes_wr; } else if (candidate->key != CK_HT_KEY_EMPTY && candidate->key != CK_HT_KEY_TOMBSTONE) { /* * If the snapshot key is non-empty and the value field is not * a tombstone then an identical key was found. As store does * not implement replacement, we will fail. */ return false; } ck_ht_map_bound_set(map, h, probes); #ifdef CK_HT_PP ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value); ck_pr_fence_store(); ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key); #else CK_HT_TYPE_STORE(&candidate->key_length, entry->key_length); CK_HT_TYPE_STORE(&candidate->hash, entry->hash); ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value); ck_pr_fence_store(); ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key); #endif CK_HT_TYPE_STORE(&map->n_entries, map->n_entries + 1); /* Enforce a load factor of 0.5. */ if (map->n_entries * 2 > map->capacity) ck_ht_grow_spmc(table, map->capacity << 1); return true; } void ck_ht_destroy(struct ck_ht *table) { ck_ht_map_destroy(table->m, table->map, false); return; } Index: head/sys/contrib/ck/src/ck_ht_hash.h =================================================================== --- head/sys/contrib/ck/src/ck_ht_hash.h (revision 331897) +++ head/sys/contrib/ck/src/ck_ht_hash.h (revision 331898) @@ -1,269 +1,287 @@ /* * Copyright 2012-2015 Samy Al Bahra * Copyright 2011-2014 AppNexus, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef CK_HT_HASH_H #define CK_HT_HASH_H /* * This is the Murmur hash written by Austin Appleby. */ #include #include //----------------------------------------------------------------------------- // MurmurHash3 was written by Austin Appleby, and is placed in the public // domain. The author hereby disclaims copyright to this source code. // Note - The x86 and x64 versions do _not_ produce the same results, as the // algorithms are optimized for their respective platforms. You can still // compile and run any of them on any platform, but your performance with the // non-native version will be less than optimal. //----------------------------------------------------------------------------- // Platform-specific functions and macros // Microsoft Visual Studio #if defined(_MSC_VER) #define FORCE_INLINE __forceinline #include #define ROTL32(x,y) _rotl(x,y) #define ROTL64(x,y) _rotl64(x,y) #define BIG_CONSTANT(x) (x) // Other compilers #else // defined(_MSC_VER) #define FORCE_INLINE inline __attribute__((always_inline)) static inline uint32_t rotl32 ( uint32_t x, int8_t r ) { return (x << r) | (x >> (32 - r)); } static inline uint64_t rotl64 ( uint64_t x, int8_t r ) { return (x << r) | (x >> (64 - r)); } #define ROTL32(x,y) rotl32(x,y) #define ROTL64(x,y) rotl64(x,y) #define BIG_CONSTANT(x) (x##LLU) #endif // !defined(_MSC_VER) //----------------------------------------------------------------------------- // Block read - if your platform needs to do endian-swapping or can only // handle aligned reads, do the conversion here FORCE_INLINE static uint32_t getblock ( const uint32_t * p, int i ) { +#ifdef __s390x__ + uint32_t res; + + __asm__ (" lrv %0,%1\n" + : "=r" (res) : "Q" (p[i]) : "cc", "mem"); + return res; +#else return p[i]; +#endif /* !__s390x__ */ } //----------------------------------------------------------------------------- // Finalization mix - force all bits of a hash block to avalanche FORCE_INLINE static uint32_t fmix ( uint32_t h ) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return h; } //----------------------------------------------------------------------------- static inline void MurmurHash3_x86_32 ( const void * key, int len, uint32_t seed, uint32_t * out ) { const uint8_t * data = (const uint8_t*)key; const int nblocks = len / 4; int i; uint32_t h1 = seed; uint32_t c1 = 0xcc9e2d51; uint32_t c2 = 0x1b873593; //---------- // body const uint32_t * blocks = (const uint32_t *)(const void *)(data + nblocks*4); for(i = -nblocks; i; i++) { uint32_t k1 = getblock(blocks,i); k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; h1 = ROTL32(h1,13); h1 = h1*5+0xe6546b64; } //---------- // tail const uint8_t * tail = (const uint8_t*)(data + nblocks*4); uint32_t k1 = 0; switch(len & 3) { case 3: k1 ^= tail[2] << 16; + /* fall through */ case 2: k1 ^= tail[1] << 8; + /* fall through */ case 1: k1 ^= tail[0]; k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; }; //---------- // finalization h1 ^= len; h1 = fmix(h1); *(uint32_t *)out = h1; } static inline uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed ) { const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995); const int r = 47; uint64_t h = seed ^ (len * m); const uint64_t * data = (const uint64_t *)key; const uint64_t * end = data + (len/8); while(data != end) { uint64_t k; if (!((uintptr_t)data & 0x7)) k = *data++; else { memcpy(&k, data, sizeof(k)); data++; } k *= m; k ^= k >> r; k *= m; h ^= k; h *= m; } const unsigned char * data2 = (const unsigned char*)data; switch(len & 7) { case 7: h ^= (uint64_t)(data2[6]) << 48; + /* fall through */ case 6: h ^= (uint64_t)(data2[5]) << 40; + /* fall through */ case 5: h ^= (uint64_t)(data2[4]) << 32; + /* fall through */ case 4: h ^= (uint64_t)(data2[3]) << 24; + /* fall through */ case 3: h ^= (uint64_t)(data2[2]) << 16; + /* fall through */ case 2: h ^= (uint64_t)(data2[1]) << 8; + /* fall through */ case 1: h ^= (uint64_t)(data2[0]); h *= m; }; h ^= h >> r; h *= m; h ^= h >> r; return h; } // 64-bit hash for 32-bit platforms static inline uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed ) { const uint32_t m = 0x5bd1e995; const int r = 24; uint32_t h1 = (uint32_t)(seed) ^ len; uint32_t h2 = (uint32_t)(seed >> 32); const uint32_t * data = (const uint32_t *)key; while(len >= 8) { uint32_t k1 = *data++; k1 *= m; k1 ^= k1 >> r; k1 *= m; h1 *= m; h1 ^= k1; len -= 4; uint32_t k2 = *data++; k2 *= m; k2 ^= k2 >> r; k2 *= m; h2 *= m; h2 ^= k2; len -= 4; } if(len >= 4) { uint32_t k1 = *data++; k1 *= m; k1 ^= k1 >> r; k1 *= m; h1 *= m; h1 ^= k1; len -= 4; } switch(len) { case 3: h2 ^= ((const unsigned char*)data)[2] << 16; + /* fall through */ case 2: h2 ^= ((const unsigned char*)data)[1] << 8; + /* fall through */ case 1: h2 ^= ((const unsigned char*)data)[0]; h2 *= m; }; h1 ^= h2 >> 18; h1 *= m; h2 ^= h1 >> 22; h2 *= m; h1 ^= h2 >> 17; h1 *= m; h2 ^= h1 >> 19; h2 *= m; uint64_t h = h1; h = (h << 32) | h2; return h; } #endif /* CK_HT_HASH_H */ Index: head/sys/contrib/ck/src/ck_internal.h =================================================================== --- head/sys/contrib/ck/src/ck_internal.h (revision 331897) +++ head/sys/contrib/ck/src/ck_internal.h (revision 331898) @@ -1,119 +1,82 @@ /* * Copyright 2011-2015 Samy Al Bahra. * Copyright 2011 David Joseph. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Several of these are from: http://graphics.stanford.edu/~seander/bithacks.html */ #define CK_INTERNAL_LOG_0 (0xAAAAAAAA) #define CK_INTERNAL_LOG_1 (0xCCCCCCCC) #define CK_INTERNAL_LOG_2 (0xF0F0F0F0) #define CK_INTERNAL_LOG_3 (0xFF00FF00) #define CK_INTERNAL_LOG_4 (0xFFFF0000) CK_CC_INLINE static uint32_t ck_internal_log(uint32_t v) { uint32_t r = (v & CK_INTERNAL_LOG_0) != 0; r |= ((v & CK_INTERNAL_LOG_4) != 0) << 4; r |= ((v & CK_INTERNAL_LOG_3) != 0) << 3; r |= ((v & CK_INTERNAL_LOG_2) != 0) << 2; r |= ((v & CK_INTERNAL_LOG_1) != 0) << 1; return (r); } CK_CC_INLINE static uint32_t ck_internal_power_2(uint32_t v) { --v; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; return (++v); } CK_CC_INLINE static unsigned long ck_internal_max(unsigned long x, unsigned long y) { return x ^ ((x ^ y) & -(x < y)); } CK_CC_INLINE static uint64_t ck_internal_max_64(uint64_t x, uint64_t y) { return x ^ ((x ^ y) & -(x < y)); } CK_CC_INLINE static uint32_t ck_internal_max_32(uint32_t x, uint32_t y) { return x ^ ((x ^ y) & -(x < y)); } - -CK_CC_INLINE static unsigned long -ck_internal_bsf(unsigned long v) -{ -#if defined(__GNUC__) - return __builtin_ffs(v); -#else - unsigned int i; - const unsigned int s = sizeof(unsigned long) * 8 - 1; - - for (i = 0; i < s; i++) { - if (v & (1UL << (s - i))) - return sizeof(unsigned long) * 8 - i; - } - - return 1; -#endif /* !__GNUC__ */ -} - -CK_CC_INLINE static uint64_t -ck_internal_bsf_64(uint64_t v) -{ -#if defined(__GNUC__) - return __builtin_ffs(v); -#else - unsigned int i; - const unsigned int s = sizeof(unsigned long) * 8 - 1; - - for (i = 0; i < s; i++) { - if (v & (1ULL << (63U - i))) - return i; - } -#endif /* !__GNUC__ */ - - return 1; -} - Index: head/sys/contrib/ck/src/ck_rhs.c =================================================================== --- head/sys/contrib/ck/src/ck_rhs.c (revision 331897) +++ head/sys/contrib/ck/src/ck_rhs.c (revision 331898) @@ -1,1480 +1,1480 @@ /* * Copyright 2014-2015 Olivier Houchard. * Copyright 2012-2015 Samy Al Bahra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include "ck_internal.h" #ifndef CK_RHS_PROBE_L1_SHIFT #define CK_RHS_PROBE_L1_SHIFT 3ULL #endif /* CK_RHS_PROBE_L1_SHIFT */ #define CK_RHS_PROBE_L1 (1 << CK_RHS_PROBE_L1_SHIFT) #define CK_RHS_PROBE_L1_MASK (CK_RHS_PROBE_L1 - 1) #ifndef CK_RHS_PROBE_L1_DEFAULT #define CK_RHS_PROBE_L1_DEFAULT CK_MD_CACHELINE #endif #define CK_RHS_VMA_MASK ((uintptr_t)((1ULL << CK_MD_VMA_BITS) - 1)) #define CK_RHS_VMA(x) \ ((void *)((uintptr_t)(x) & CK_RHS_VMA_MASK)) #define CK_RHS_EMPTY NULL #define CK_RHS_G (1024) #define CK_RHS_G_MASK (CK_RHS_G - 1) #if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_STORE_8) #define CK_RHS_WORD uint8_t #define CK_RHS_WORD_MAX UINT8_MAX #define CK_RHS_STORE(x, y) ck_pr_store_8(x, y) #define CK_RHS_LOAD(x) ck_pr_load_8(x) #elif defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_STORE_16) #define CK_RHS_WORD uint16_t #define CK_RHS_WORD_MAX UINT16_MAX #define CK_RHS_STORE(x, y) ck_pr_store_16(x, y) #define CK_RHS_LOAD(x) ck_pr_load_16(x) #elif defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_STORE_32) #define CK_RHS_WORD uint32_t #define CK_RHS_WORD_MAX UINT32_MAX #define CK_RHS_STORE(x, y) ck_pr_store_32(x, y) #define CK_RHS_LOAD(x) ck_pr_load_32(x) #else #error "ck_rhs is not supported on your platform." #endif #define CK_RHS_MAX_WANTED 0xffff enum ck_rhs_probe_behavior { CK_RHS_PROBE = 0, /* Default behavior. */ CK_RHS_PROBE_RH, /* Short-circuit if RH slot found. */ CK_RHS_PROBE_INSERT, /* Short-circuit on probe bound if tombstone found. */ CK_RHS_PROBE_ROBIN_HOOD,/* Look for the first slot available for the entry we are about to replace, only used to internally implement Robin Hood */ CK_RHS_PROBE_NO_RH, /* Don't do the RH dance */ }; struct ck_rhs_entry_desc { unsigned int probes; unsigned short wanted; CK_RHS_WORD probe_bound; bool in_rh; const void *entry; } CK_CC_ALIGN(16); struct ck_rhs_no_entry_desc { unsigned int probes; unsigned short wanted; CK_RHS_WORD probe_bound; bool in_rh; } CK_CC_ALIGN(8); typedef long ck_rhs_probe_cb_t(struct ck_rhs *hs, struct ck_rhs_map *map, unsigned long *n_probes, long *priority, unsigned long h, const void *key, const void **object, unsigned long probe_limit, enum ck_rhs_probe_behavior behavior); struct ck_rhs_map { unsigned int generation[CK_RHS_G]; unsigned int probe_maximum; unsigned long mask; unsigned long step; unsigned int probe_limit; unsigned long n_entries; unsigned long capacity; unsigned long size; unsigned long max_entries; char offset_mask; union { struct ck_rhs_entry_desc *descs; struct ck_rhs_no_entry { const void **entries; struct ck_rhs_no_entry_desc *descs; } no_entries; } entries; bool read_mostly; ck_rhs_probe_cb_t *probe_func; }; static CK_CC_INLINE const void * ck_rhs_entry(struct ck_rhs_map *map, long offset) { if (map->read_mostly) return (map->entries.no_entries.entries[offset]); else return (map->entries.descs[offset].entry); } static CK_CC_INLINE const void ** ck_rhs_entry_addr(struct ck_rhs_map *map, long offset) { if (map->read_mostly) return (&map->entries.no_entries.entries[offset]); else return (&map->entries.descs[offset].entry); } static CK_CC_INLINE struct ck_rhs_entry_desc * ck_rhs_desc(struct ck_rhs_map *map, long offset) { if (CK_CC_UNLIKELY(map->read_mostly)) return ((struct ck_rhs_entry_desc *)(void *)&map->entries.no_entries.descs[offset]); else return (&map->entries.descs[offset]); } static CK_CC_INLINE void ck_rhs_wanted_inc(struct ck_rhs_map *map, long offset) { if (CK_CC_UNLIKELY(map->read_mostly)) map->entries.no_entries.descs[offset].wanted++; else map->entries.descs[offset].wanted++; } static CK_CC_INLINE unsigned int ck_rhs_probes(struct ck_rhs_map *map, long offset) { if (CK_CC_UNLIKELY(map->read_mostly)) return (map->entries.no_entries.descs[offset].probes); else return (map->entries.descs[offset].probes); } static CK_CC_INLINE void ck_rhs_set_probes(struct ck_rhs_map *map, long offset, unsigned int value) { if (CK_CC_UNLIKELY(map->read_mostly)) map->entries.no_entries.descs[offset].probes = value; else map->entries.descs[offset].probes = value; } static CK_CC_INLINE CK_RHS_WORD ck_rhs_probe_bound(struct ck_rhs_map *map, long offset) { if (CK_CC_UNLIKELY(map->read_mostly)) return (map->entries.no_entries.descs[offset].probe_bound); else return (map->entries.descs[offset].probe_bound); } static CK_CC_INLINE CK_RHS_WORD * ck_rhs_probe_bound_addr(struct ck_rhs_map *map, long offset) { if (CK_CC_UNLIKELY(map->read_mostly)) return (&map->entries.no_entries.descs[offset].probe_bound); else return (&map->entries.descs[offset].probe_bound); } static CK_CC_INLINE bool ck_rhs_in_rh(struct ck_rhs_map *map, long offset) { if (CK_CC_UNLIKELY(map->read_mostly)) return (map->entries.no_entries.descs[offset].in_rh); else return (map->entries.descs[offset].in_rh); } static CK_CC_INLINE void ck_rhs_set_rh(struct ck_rhs_map *map, long offset) { if (CK_CC_UNLIKELY(map->read_mostly)) map->entries.no_entries.descs[offset].in_rh = true; else map->entries.descs[offset].in_rh = true; } static CK_CC_INLINE void ck_rhs_unset_rh(struct ck_rhs_map *map, long offset) { if (CK_CC_UNLIKELY(map->read_mostly)) map->entries.no_entries.descs[offset].in_rh = false; else map->entries.descs[offset].in_rh = false; } #define CK_RHS_DEFAULT_LOAD_FACTOR 50 static ck_rhs_probe_cb_t ck_rhs_map_probe; static ck_rhs_probe_cb_t ck_rhs_map_probe_rm; bool ck_rhs_set_load_factor(struct ck_rhs *hs, unsigned int load_factor) { struct ck_rhs_map *map = hs->map; if (load_factor == 0 || load_factor > 100) return false; hs->load_factor = load_factor; map->max_entries = (map->capacity * (unsigned long)hs->load_factor) / 100; while (map->n_entries > map->max_entries) { if (ck_rhs_grow(hs, map->capacity << 1) == false) return false; map = hs->map; } return true; } void ck_rhs_iterator_init(struct ck_rhs_iterator *iterator) { iterator->cursor = NULL; iterator->offset = 0; return; } bool ck_rhs_next(struct ck_rhs *hs, struct ck_rhs_iterator *i, void **key) { struct ck_rhs_map *map = hs->map; void *value; if (i->offset >= map->capacity) return false; do { value = CK_CC_DECONST_PTR(ck_rhs_entry(map, i->offset)); if (value != CK_RHS_EMPTY) { #ifdef CK_RHS_PP if (hs->mode & CK_RHS_MODE_OBJECT) value = CK_RHS_VMA(value); #endif i->offset++; *key = value; return true; } } while (++i->offset < map->capacity); return false; } void ck_rhs_stat(struct ck_rhs *hs, struct ck_rhs_stat *st) { struct ck_rhs_map *map = hs->map; st->n_entries = map->n_entries; st->probe_maximum = map->probe_maximum; return; } unsigned long ck_rhs_count(struct ck_rhs *hs) { return hs->map->n_entries; } static void ck_rhs_map_destroy(struct ck_malloc *m, struct ck_rhs_map *map, bool defer) { m->free(map, map->size, defer); return; } void ck_rhs_destroy(struct ck_rhs *hs) { ck_rhs_map_destroy(hs->m, hs->map, false); return; } static struct ck_rhs_map * ck_rhs_map_create(struct ck_rhs *hs, unsigned long entries) { struct ck_rhs_map *map; unsigned long size, n_entries, limit; n_entries = ck_internal_power_2(entries); if (n_entries < CK_RHS_PROBE_L1) n_entries = CK_RHS_PROBE_L1; if (hs->mode & CK_RHS_MODE_READ_MOSTLY) size = sizeof(struct ck_rhs_map) + (sizeof(void *) * n_entries + sizeof(struct ck_rhs_no_entry_desc) * n_entries + 2 * CK_MD_CACHELINE - 1); else size = sizeof(struct ck_rhs_map) + (sizeof(struct ck_rhs_entry_desc) * n_entries + CK_MD_CACHELINE - 1); map = hs->m->malloc(size); if (map == NULL) return NULL; map->read_mostly = !!(hs->mode & CK_RHS_MODE_READ_MOSTLY); map->size = size; /* We should probably use a more intelligent heuristic for default probe length. */ limit = ck_internal_max(n_entries >> (CK_RHS_PROBE_L1_SHIFT + 2), CK_RHS_PROBE_L1_DEFAULT); if (limit > UINT_MAX) limit = UINT_MAX; map->probe_limit = (unsigned int)limit; map->probe_maximum = 0; map->capacity = n_entries; - map->step = ck_internal_bsf(n_entries); + map->step = ck_cc_ffsl(n_entries); map->mask = n_entries - 1; map->n_entries = 0; map->max_entries = (map->capacity * (unsigned long)hs->load_factor) / 100; /* Align map allocation to cache line. */ if (map->read_mostly) { map->entries.no_entries.entries = (void *)(((uintptr_t)&map[1] + CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1)); map->entries.no_entries.descs = (void *)(((uintptr_t)map->entries.no_entries.entries + (sizeof(void *) * n_entries) + CK_MD_CACHELINE - 1) &~ (CK_MD_CACHELINE - 1)); memset(map->entries.no_entries.entries, 0, sizeof(void *) * n_entries); memset(map->entries.no_entries.descs, 0, sizeof(struct ck_rhs_no_entry_desc) * n_entries); map->offset_mask = (CK_MD_CACHELINE / sizeof(void *)) - 1; map->probe_func = ck_rhs_map_probe_rm; } else { map->entries.descs = (void *)(((uintptr_t)&map[1] + CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1)); memset(map->entries.descs, 0, sizeof(struct ck_rhs_entry_desc) * n_entries); map->offset_mask = (CK_MD_CACHELINE / sizeof(struct ck_rhs_entry_desc)) - 1; map->probe_func = ck_rhs_map_probe; } memset(map->generation, 0, sizeof map->generation); /* Commit entries purge with respect to map publication. */ ck_pr_fence_store(); return map; } bool ck_rhs_reset_size(struct ck_rhs *hs, unsigned long capacity) { struct ck_rhs_map *map, *previous; previous = hs->map; map = ck_rhs_map_create(hs, capacity); if (map == NULL) return false; ck_pr_store_ptr(&hs->map, map); ck_rhs_map_destroy(hs->m, previous, true); return true; } bool ck_rhs_reset(struct ck_rhs *hs) { struct ck_rhs_map *previous; previous = hs->map; return ck_rhs_reset_size(hs, previous->capacity); } static inline unsigned long ck_rhs_map_probe_next(struct ck_rhs_map *map, unsigned long offset, unsigned long probes) { if (probes & map->offset_mask) { offset = (offset &~ map->offset_mask) + ((offset + 1) & map->offset_mask); return offset; } else return (offset + probes) & map->mask; } static inline unsigned long ck_rhs_map_probe_prev(struct ck_rhs_map *map, unsigned long offset, unsigned long probes) { if (probes & map->offset_mask) { offset = (offset &~ map->offset_mask) + ((offset - 1) & map->offset_mask); return offset; } else return ((offset - probes) & map->mask); } static inline void ck_rhs_map_bound_set(struct ck_rhs_map *m, unsigned long h, unsigned long n_probes) { unsigned long offset = h & m->mask; struct ck_rhs_entry_desc *desc; if (n_probes > m->probe_maximum) ck_pr_store_uint(&m->probe_maximum, n_probes); if (!(m->read_mostly)) { desc = &m->entries.descs[offset]; if (desc->probe_bound < n_probes) { if (n_probes > CK_RHS_WORD_MAX) n_probes = CK_RHS_WORD_MAX; CK_RHS_STORE(&desc->probe_bound, n_probes); ck_pr_fence_store(); } } return; } static inline unsigned int ck_rhs_map_bound_get(struct ck_rhs_map *m, unsigned long h) { unsigned long offset = h & m->mask; unsigned int r = CK_RHS_WORD_MAX; if (m->read_mostly) r = ck_pr_load_uint(&m->probe_maximum); else { r = CK_RHS_LOAD(&m->entries.descs[offset].probe_bound); if (r == CK_RHS_WORD_MAX) r = ck_pr_load_uint(&m->probe_maximum); } return r; } bool ck_rhs_grow(struct ck_rhs *hs, unsigned long capacity) { struct ck_rhs_map *map, *update; const void *previous, *prev_saved; unsigned long k, offset, probes; restart: map = hs->map; if (map->capacity > capacity) return false; update = ck_rhs_map_create(hs, capacity); if (update == NULL) return false; for (k = 0; k < map->capacity; k++) { unsigned long h; prev_saved = previous = ck_rhs_entry(map, k); if (previous == CK_RHS_EMPTY) continue; #ifdef CK_RHS_PP if (hs->mode & CK_RHS_MODE_OBJECT) previous = CK_RHS_VMA(previous); #endif h = hs->hf(previous, hs->seed); offset = h & update->mask; probes = 0; for (;;) { const void **cursor = ck_rhs_entry_addr(update, offset); if (probes++ == update->probe_limit) { /* * We have hit the probe limit, map needs to be even larger. */ ck_rhs_map_destroy(hs->m, update, false); capacity <<= 1; goto restart; } if (CK_CC_LIKELY(*cursor == CK_RHS_EMPTY)) { *cursor = prev_saved; update->n_entries++; ck_rhs_set_probes(update, offset, probes); ck_rhs_map_bound_set(update, h, probes); break; } else if (ck_rhs_probes(update, offset) < probes) { const void *tmp = prev_saved; unsigned int old_probes; prev_saved = previous = *cursor; #ifdef CK_RHS_PP if (hs->mode & CK_RHS_MODE_OBJECT) previous = CK_RHS_VMA(previous); #endif *cursor = tmp; ck_rhs_map_bound_set(update, h, probes); h = hs->hf(previous, hs->seed); old_probes = ck_rhs_probes(update, offset); ck_rhs_set_probes(update, offset, probes); probes = old_probes - 1; continue; } ck_rhs_wanted_inc(update, offset); offset = ck_rhs_map_probe_next(update, offset, probes); } } ck_pr_fence_store(); ck_pr_store_ptr(&hs->map, update); ck_rhs_map_destroy(hs->m, map, true); return true; } bool ck_rhs_rebuild(struct ck_rhs *hs) { return ck_rhs_grow(hs, hs->map->capacity); } static long ck_rhs_map_probe_rm(struct ck_rhs *hs, struct ck_rhs_map *map, unsigned long *n_probes, long *priority, unsigned long h, const void *key, const void **object, unsigned long probe_limit, enum ck_rhs_probe_behavior behavior) { const void *k; const void *compare; long pr = -1; unsigned long offset, probes, opl; #ifdef CK_RHS_PP /* If we are storing object pointers, then we may leverage pointer packing. */ unsigned long hv = 0; if (hs->mode & CK_RHS_MODE_OBJECT) { hv = (h >> 25) & CK_RHS_KEY_MASK; compare = CK_RHS_VMA(key); } else { compare = key; } #else compare = key; #endif *object = NULL; if (behavior != CK_RHS_PROBE_ROBIN_HOOD) { probes = 0; offset = h & map->mask; } else { /* Restart from the bucket we were previously in */ probes = *n_probes; offset = ck_rhs_map_probe_next(map, *priority, probes); } opl = probe_limit; for (;;) { if (probes++ == probe_limit) { if (probe_limit == opl || pr != -1) { k = CK_RHS_EMPTY; goto leave; } /* * If no eligible slot has been found yet, continue probe * sequence with original probe limit. */ probe_limit = opl; } k = ck_pr_load_ptr(&map->entries.no_entries.entries[offset]); if (k == CK_RHS_EMPTY) goto leave; if (behavior != CK_RHS_PROBE_NO_RH) { struct ck_rhs_entry_desc *desc = (void *)&map->entries.no_entries.descs[offset]; if (pr == -1 && desc->in_rh == false && desc->probes < probes) { pr = offset; *n_probes = probes; if (behavior == CK_RHS_PROBE_RH || behavior == CK_RHS_PROBE_ROBIN_HOOD) { k = CK_RHS_EMPTY; goto leave; } } } if (behavior != CK_RHS_PROBE_ROBIN_HOOD) { #ifdef CK_RHS_PP if (hs->mode & CK_RHS_MODE_OBJECT) { if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv) { offset = ck_rhs_map_probe_next(map, offset, probes); continue; } k = CK_RHS_VMA(k); } #endif if (k == compare) goto leave; if (hs->compare == NULL) { offset = ck_rhs_map_probe_next(map, offset, probes); continue; } if (hs->compare(k, key) == true) goto leave; } offset = ck_rhs_map_probe_next(map, offset, probes); } leave: if (probes > probe_limit) { offset = -1; } else { *object = k; } if (pr == -1) *n_probes = probes; *priority = pr; return offset; } static long ck_rhs_map_probe(struct ck_rhs *hs, struct ck_rhs_map *map, unsigned long *n_probes, long *priority, unsigned long h, const void *key, const void **object, unsigned long probe_limit, enum ck_rhs_probe_behavior behavior) { const void *k; const void *compare; long pr = -1; unsigned long offset, probes, opl; #ifdef CK_RHS_PP /* If we are storing object pointers, then we may leverage pointer packing. */ unsigned long hv = 0; if (hs->mode & CK_RHS_MODE_OBJECT) { hv = (h >> 25) & CK_RHS_KEY_MASK; compare = CK_RHS_VMA(key); } else { compare = key; } #else compare = key; #endif *object = NULL; if (behavior != CK_RHS_PROBE_ROBIN_HOOD) { probes = 0; offset = h & map->mask; } else { /* Restart from the bucket we were previously in */ probes = *n_probes; offset = ck_rhs_map_probe_next(map, *priority, probes); } opl = probe_limit; if (behavior == CK_RHS_PROBE_INSERT) probe_limit = ck_rhs_map_bound_get(map, h); for (;;) { if (probes++ == probe_limit) { if (probe_limit == opl || pr != -1) { k = CK_RHS_EMPTY; goto leave; } /* * If no eligible slot has been found yet, continue probe * sequence with original probe limit. */ probe_limit = opl; } k = ck_pr_load_ptr(&map->entries.descs[offset].entry); if (k == CK_RHS_EMPTY) goto leave; if ((behavior != CK_RHS_PROBE_NO_RH)) { struct ck_rhs_entry_desc *desc = &map->entries.descs[offset]; if (pr == -1 && desc->in_rh == false && desc->probes < probes) { pr = offset; *n_probes = probes; if (behavior == CK_RHS_PROBE_RH || behavior == CK_RHS_PROBE_ROBIN_HOOD) { k = CK_RHS_EMPTY; goto leave; } } } if (behavior != CK_RHS_PROBE_ROBIN_HOOD) { #ifdef CK_RHS_PP if (hs->mode & CK_RHS_MODE_OBJECT) { if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv) { offset = ck_rhs_map_probe_next(map, offset, probes); continue; } k = CK_RHS_VMA(k); } #endif if (k == compare) goto leave; if (hs->compare == NULL) { offset = ck_rhs_map_probe_next(map, offset, probes); continue; } if (hs->compare(k, key) == true) goto leave; } offset = ck_rhs_map_probe_next(map, offset, probes); } leave: if (probes > probe_limit) { offset = -1; } else { *object = k; } if (pr == -1) *n_probes = probes; *priority = pr; return offset; } static inline const void * ck_rhs_marshal(unsigned int mode, const void *key, unsigned long h) { #ifdef CK_RHS_PP const void *insert; if (mode & CK_RHS_MODE_OBJECT) { insert = (void *)((uintptr_t)CK_RHS_VMA(key) | ((h >> 25) << CK_MD_VMA_BITS)); } else { insert = key; } return insert; #else (void)mode; (void)h; return key; #endif } bool ck_rhs_gc(struct ck_rhs *hs) { unsigned long i; struct ck_rhs_map *map = hs->map; unsigned int max_probes = 0; for (i = 0; i < map->capacity; i++) { if (ck_rhs_probes(map, i) > max_probes) max_probes = ck_rhs_probes(map, i); } map->probe_maximum = max_probes; return true; } static void ck_rhs_add_wanted(struct ck_rhs *hs, long end_offset, long old_slot, unsigned long h) { struct ck_rhs_map *map = hs->map; long offset; unsigned int probes = 1; bool found_slot = false; struct ck_rhs_entry_desc *desc; offset = h & map->mask; if (old_slot == -1) found_slot = true; while (offset != end_offset) { if (offset == old_slot) found_slot = true; if (found_slot) { desc = ck_rhs_desc(map, offset); if (desc->wanted < CK_RHS_MAX_WANTED) desc->wanted++; } offset = ck_rhs_map_probe_next(map, offset, probes); probes++; } } static unsigned long ck_rhs_remove_wanted(struct ck_rhs *hs, long offset, long limit) { struct ck_rhs_map *map = hs->map; int probes = ck_rhs_probes(map, offset); bool do_remove = true; struct ck_rhs_entry_desc *desc; while (probes > 1) { probes--; offset = ck_rhs_map_probe_prev(map, offset, probes); if (offset == limit) do_remove = false; if (do_remove) { desc = ck_rhs_desc(map, offset); if (desc->wanted != CK_RHS_MAX_WANTED) desc->wanted--; } } return offset; } static long ck_rhs_get_first_offset(struct ck_rhs_map *map, unsigned long offset, unsigned int probes) { while (probes > (unsigned long)map->offset_mask + 1) { offset -= ((probes - 1) &~ map->offset_mask); offset &= map->mask; offset = (offset &~ map->offset_mask) + ((offset - map->offset_mask) & map->offset_mask); probes -= map->offset_mask + 1; } return ((offset &~ map->offset_mask) + ((offset - (probes - 1)) & map->offset_mask)); } #define CK_RHS_MAX_RH 512 static int ck_rhs_put_robin_hood(struct ck_rhs *hs, long orig_slot, struct ck_rhs_entry_desc *desc) { long slot, first; const void *object, *insert; unsigned long n_probes; struct ck_rhs_map *map; unsigned long h = 0; long prev; void *key; long prevs[CK_RHS_MAX_RH]; unsigned int prevs_nb = 0; unsigned int i; map = hs->map; first = orig_slot; n_probes = desc->probes; restart: key = CK_CC_DECONST_PTR(ck_rhs_entry(map, first)); insert = key; #ifdef CK_RHS_PP if (hs->mode & CK_RHS_MODE_OBJECT) key = CK_RHS_VMA(key); #endif orig_slot = first; ck_rhs_set_rh(map, orig_slot); slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, prevs_nb == CK_RHS_MAX_RH ? CK_RHS_PROBE_NO_RH : CK_RHS_PROBE_ROBIN_HOOD); if (slot == -1 && first == -1) { if (ck_rhs_grow(hs, map->capacity << 1) == false) { desc->in_rh = false; for (i = 0; i < prevs_nb; i++) ck_rhs_unset_rh(map, prevs[i]); return -1; } return 1; } if (first != -1) { desc = ck_rhs_desc(map, first); int old_probes = desc->probes; desc->probes = n_probes; h = ck_rhs_get_first_offset(map, first, n_probes); ck_rhs_map_bound_set(map, h, n_probes); prev = orig_slot; prevs[prevs_nb++] = prev; n_probes = old_probes; goto restart; } else { /* An empty slot was found. */ h = ck_rhs_get_first_offset(map, slot, n_probes); ck_rhs_map_bound_set(map, h, n_probes); ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert); ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]); ck_pr_fence_atomic_store(); ck_rhs_set_probes(map, slot, n_probes); desc->in_rh = 0; ck_rhs_add_wanted(hs, slot, orig_slot, h); } while (prevs_nb > 0) { prev = prevs[--prevs_nb]; ck_pr_store_ptr(ck_rhs_entry_addr(map, orig_slot), ck_rhs_entry(map, prev)); h = ck_rhs_get_first_offset(map, orig_slot, desc->probes); ck_rhs_add_wanted(hs, orig_slot, prev, h); ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]); ck_pr_fence_atomic_store(); orig_slot = prev; desc->in_rh = false; desc = ck_rhs_desc(map, orig_slot); } return 0; } static void ck_rhs_do_backward_shift_delete(struct ck_rhs *hs, long slot) { struct ck_rhs_map *map = hs->map; struct ck_rhs_entry_desc *desc, *new_desc = NULL; unsigned long h; desc = ck_rhs_desc(map, slot); h = ck_rhs_remove_wanted(hs, slot, -1); while (desc->wanted > 0) { unsigned long offset = 0, tmp_offset; unsigned long wanted_probes = 1; unsigned int probe = 0; unsigned int max_probes; /* Find a successor */ while (wanted_probes < map->probe_maximum) { probe = wanted_probes; offset = ck_rhs_map_probe_next(map, slot, probe); while (probe < map->probe_maximum) { new_desc = ck_rhs_desc(map, offset); if (new_desc->probes == probe + 1) break; probe++; offset = ck_rhs_map_probe_next(map, offset, probe); } if (probe < map->probe_maximum) break; wanted_probes++; } if (!(wanted_probes < map->probe_maximum)) { desc->wanted = 0; break; } desc->probes = wanted_probes; h = ck_rhs_remove_wanted(hs, offset, slot); ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), ck_rhs_entry(map, offset)); ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]); ck_pr_fence_atomic_store(); if (wanted_probes < CK_RHS_WORD_MAX) { struct ck_rhs_entry_desc *hdesc = ck_rhs_desc(map, h); if (hdesc->wanted == 1) CK_RHS_STORE(&hdesc->probe_bound, wanted_probes); else if (hdesc->probe_bound == CK_RHS_WORD_MAX || hdesc->probe_bound == new_desc->probes) { probe++; if (hdesc->probe_bound == CK_RHS_WORD_MAX) max_probes = map->probe_maximum; else { max_probes = hdesc->probe_bound; max_probes--; } tmp_offset = ck_rhs_map_probe_next(map, offset, probe); while (probe < max_probes) { if (h == (unsigned long)ck_rhs_get_first_offset(map, tmp_offset, probe)) break; probe++; tmp_offset = ck_rhs_map_probe_next(map, tmp_offset, probe); } if (probe == max_probes) CK_RHS_STORE(&hdesc->probe_bound, wanted_probes); } } if (desc->wanted < CK_RHS_MAX_WANTED) desc->wanted--; slot = offset; desc = new_desc; } ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), CK_RHS_EMPTY); if ((desc->probes - 1) < CK_RHS_WORD_MAX) CK_RHS_STORE(ck_rhs_probe_bound_addr(map, h), desc->probes - 1); desc->probes = 0; } bool ck_rhs_fas(struct ck_rhs *hs, unsigned long h, const void *key, void **previous) { long slot, first; const void *object; const void *insert; unsigned long n_probes; struct ck_rhs_map *map = hs->map; struct ck_rhs_entry_desc *desc, *desc2; *previous = NULL; restart: slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object, ck_rhs_map_bound_get(map, h), CK_RHS_PROBE); /* Replacement semantics presume existence. */ if (object == NULL) return false; insert = ck_rhs_marshal(hs->mode, key, h); if (first != -1) { int ret; desc = ck_rhs_desc(map, slot); desc2 = ck_rhs_desc(map, first); desc->in_rh = true; ret = ck_rhs_put_robin_hood(hs, first, desc2); desc->in_rh = false; if (CK_CC_UNLIKELY(ret == 1)) goto restart; else if (CK_CC_UNLIKELY(ret != 0)) return false; ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert); ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]); ck_pr_fence_atomic_store(); desc2->probes = n_probes; ck_rhs_add_wanted(hs, first, -1, h); ck_rhs_do_backward_shift_delete(hs, slot); } else { ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert); ck_rhs_set_probes(map, slot, n_probes); } *previous = CK_CC_DECONST_PTR(object); return true; } /* * An apply function takes two arguments. The first argument is a pointer to a * pre-existing object. The second argument is a pointer to the fifth argument * passed to ck_hs_apply. If a non-NULL pointer is passed to the first argument * and the return value of the apply function is NULL, then the pre-existing * value is deleted. If the return pointer is the same as the one passed to the * apply function then no changes are made to the hash table. If the first * argument is non-NULL and the return pointer is different than that passed to * the apply function, then the pre-existing value is replaced. For * replacement, it is required that the value itself is identical to the * previous value. */ bool ck_rhs_apply(struct ck_rhs *hs, unsigned long h, const void *key, ck_rhs_apply_fn_t *fn, void *cl) { const void *insert; const void *object, *delta = false; unsigned long n_probes; long slot, first; struct ck_rhs_map *map; bool delta_set = false; restart: map = hs->map; slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_RHS_PROBE_INSERT); if (slot == -1 && first == -1) { if (ck_rhs_grow(hs, map->capacity << 1) == false) return false; goto restart; } if (!delta_set) { delta = fn(CK_CC_DECONST_PTR(object), cl); delta_set = true; } if (delta == NULL) { /* * The apply function has requested deletion. If the object doesn't exist, * then exit early. */ if (CK_CC_UNLIKELY(object == NULL)) return true; /* Otherwise, delete it. */ ck_rhs_do_backward_shift_delete(hs, slot); return true; } /* The apply function has not requested hash set modification so exit early. */ if (delta == object) return true; /* A modification or insertion has been requested. */ ck_rhs_map_bound_set(map, h, n_probes); insert = ck_rhs_marshal(hs->mode, delta, h); if (first != -1) { /* * This follows the same semantics as ck_hs_set, please refer to that * function for documentation. */ struct ck_rhs_entry_desc *desc = NULL, *desc2; if (slot != -1) { desc = ck_rhs_desc(map, slot); desc->in_rh = true; } desc2 = ck_rhs_desc(map, first); int ret = ck_rhs_put_robin_hood(hs, first, desc2); if (slot != -1) desc->in_rh = false; if (CK_CC_UNLIKELY(ret == 1)) goto restart; if (CK_CC_UNLIKELY(ret == -1)) return false; /* If an earlier bucket was found, then store entry there. */ ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert); desc2->probes = n_probes; /* * If a duplicate key was found, then delete it after * signaling concurrent probes to restart. Optionally, * it is possible to install tombstone after grace * period if we can guarantee earlier position of * duplicate key. */ ck_rhs_add_wanted(hs, first, -1, h); if (object != NULL) { ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]); ck_pr_fence_atomic_store(); ck_rhs_do_backward_shift_delete(hs, slot); } } else { /* * If we are storing into same slot, then atomic store is sufficient * for replacement. */ ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert); ck_rhs_set_probes(map, slot, n_probes); if (object == NULL) ck_rhs_add_wanted(hs, slot, -1, h); } if (object == NULL) { map->n_entries++; if ((map->n_entries ) > map->max_entries) ck_rhs_grow(hs, map->capacity << 1); } return true; } bool ck_rhs_set(struct ck_rhs *hs, unsigned long h, const void *key, void **previous) { long slot, first; const void *object; const void *insert; unsigned long n_probes; struct ck_rhs_map *map; *previous = NULL; restart: map = hs->map; slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_RHS_PROBE_INSERT); if (slot == -1 && first == -1) { if (ck_rhs_grow(hs, map->capacity << 1) == false) return false; goto restart; } ck_rhs_map_bound_set(map, h, n_probes); insert = ck_rhs_marshal(hs->mode, key, h); if (first != -1) { struct ck_rhs_entry_desc *desc = NULL, *desc2; if (slot != -1) { desc = ck_rhs_desc(map, slot); desc->in_rh = true; } desc2 = ck_rhs_desc(map, first); int ret = ck_rhs_put_robin_hood(hs, first, desc2); if (slot != -1) desc->in_rh = false; if (CK_CC_UNLIKELY(ret == 1)) goto restart; if (CK_CC_UNLIKELY(ret == -1)) return false; /* If an earlier bucket was found, then store entry there. */ ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert); desc2->probes = n_probes; /* * If a duplicate key was found, then delete it after * signaling concurrent probes to restart. Optionally, * it is possible to install tombstone after grace * period if we can guarantee earlier position of * duplicate key. */ ck_rhs_add_wanted(hs, first, -1, h); if (object != NULL) { ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]); ck_pr_fence_atomic_store(); ck_rhs_do_backward_shift_delete(hs, slot); } } else { /* * If we are storing into same slot, then atomic store is sufficient * for replacement. */ ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert); ck_rhs_set_probes(map, slot, n_probes); if (object == NULL) ck_rhs_add_wanted(hs, slot, -1, h); } if (object == NULL) { map->n_entries++; if ((map->n_entries ) > map->max_entries) ck_rhs_grow(hs, map->capacity << 1); } *previous = CK_CC_DECONST_PTR(object); return true; } static bool ck_rhs_put_internal(struct ck_rhs *hs, unsigned long h, const void *key, enum ck_rhs_probe_behavior behavior) { long slot, first; const void *object; const void *insert; unsigned long n_probes; struct ck_rhs_map *map; restart: map = hs->map; slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, behavior); if (slot == -1 && first == -1) { if (ck_rhs_grow(hs, map->capacity << 1) == false) return false; goto restart; } /* Fail operation if a match was found. */ if (object != NULL) return false; ck_rhs_map_bound_set(map, h, n_probes); insert = ck_rhs_marshal(hs->mode, key, h); if (first != -1) { struct ck_rhs_entry_desc *desc = ck_rhs_desc(map, first); int ret = ck_rhs_put_robin_hood(hs, first, desc); if (CK_CC_UNLIKELY(ret == 1)) return ck_rhs_put_internal(hs, h, key, behavior); else if (CK_CC_UNLIKELY(ret == -1)) return false; /* Insert key into first bucket in probe sequence. */ ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert); desc->probes = n_probes; ck_rhs_add_wanted(hs, first, -1, h); } else { /* An empty slot was found. */ ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert); ck_rhs_set_probes(map, slot, n_probes); ck_rhs_add_wanted(hs, slot, -1, h); } map->n_entries++; if ((map->n_entries ) > map->max_entries) ck_rhs_grow(hs, map->capacity << 1); return true; } bool ck_rhs_put(struct ck_rhs *hs, unsigned long h, const void *key) { return ck_rhs_put_internal(hs, h, key, CK_RHS_PROBE_INSERT); } bool ck_rhs_put_unique(struct ck_rhs *hs, unsigned long h, const void *key) { return ck_rhs_put_internal(hs, h, key, CK_RHS_PROBE_RH); } void * ck_rhs_get(struct ck_rhs *hs, unsigned long h, const void *key) { long first; const void *object; struct ck_rhs_map *map; unsigned long n_probes; unsigned int g, g_p, probe; unsigned int *generation; do { map = ck_pr_load_ptr(&hs->map); generation = &map->generation[h & CK_RHS_G_MASK]; g = ck_pr_load_uint(generation); probe = ck_rhs_map_bound_get(map, h); ck_pr_fence_load(); first = -1; map->probe_func(hs, map, &n_probes, &first, h, key, &object, probe, CK_RHS_PROBE_NO_RH); ck_pr_fence_load(); g_p = ck_pr_load_uint(generation); } while (g != g_p); return CK_CC_DECONST_PTR(object); } void * ck_rhs_remove(struct ck_rhs *hs, unsigned long h, const void *key) { long slot, first; const void *object; struct ck_rhs_map *map = hs->map; unsigned long n_probes; slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object, ck_rhs_map_bound_get(map, h), CK_RHS_PROBE_NO_RH); if (object == NULL) return NULL; map->n_entries--; ck_rhs_do_backward_shift_delete(hs, slot); return CK_CC_DECONST_PTR(object); } bool ck_rhs_move(struct ck_rhs *hs, struct ck_rhs *source, ck_rhs_hash_cb_t *hf, ck_rhs_compare_cb_t *compare, struct ck_malloc *m) { if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL) return false; hs->mode = source->mode; hs->seed = source->seed; hs->map = source->map; hs->load_factor = source->load_factor; hs->m = m; hs->hf = hf; hs->compare = compare; return true; } bool ck_rhs_init(struct ck_rhs *hs, unsigned int mode, ck_rhs_hash_cb_t *hf, ck_rhs_compare_cb_t *compare, struct ck_malloc *m, unsigned long n_entries, unsigned long seed) { if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL) return false; hs->m = m; hs->mode = mode; hs->seed = seed; hs->hf = hf; hs->compare = compare; hs->load_factor = CK_RHS_DEFAULT_LOAD_FACTOR; hs->map = ck_rhs_map_create(hs, n_entries); return hs->map != NULL; } Index: head/sys/contrib/ck =================================================================== --- head/sys/contrib/ck (revision 331897) +++ head/sys/contrib/ck (revision 331898) Property changes on: head/sys/contrib/ck ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /vendor-sys/ck/dist:r314435-331896