diff --git a/include/os/freebsd/spl/sys/atomic.h b/include/os/freebsd/spl/sys/atomic.h index e283c6c0e3ff..4227e5f7d3ec 100644 --- a/include/os/freebsd/spl/sys/atomic.h +++ b/include/os/freebsd/spl/sys/atomic.h @@ -1,188 +1,182 @@ /* * Copyright (c) 2007 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _OPENSOLARIS_SYS_ATOMIC_H_ #define _OPENSOLARIS_SYS_ATOMIC_H_ #include #include -#define casptr(_a, _b, _c) \ - atomic_cmpset_ptr((volatile uintptr_t *)(_a), \ - (uintptr_t)(_b), \ - (uintptr_t)(_c)) -#define cas32 atomic_cmpset_32 -#define atomic_sub_64 atomic_subtract_64 - -#if defined(__i386__) || defined(KLD_MODULE) +#define atomic_sub_64 atomic_subtract_64 + +#if defined(__i386__) && (defined(_KERNEL) || defined(KLD_MODULE)) #define I386_HAVE_ATOMIC64 #endif +#if defined(__i386__) || defined(__amd64__) || defined(__arm__) +/* No spurious failures from fcmpset. */ +#define STRONG_FCMPSET +#endif + #if !defined(__LP64__) && !defined(__mips_n32) && \ - !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) + !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) && \ + !defined(HAS_EMULATED_ATOMIC64) extern void atomic_add_64(volatile uint64_t *target, int64_t delta); extern void atomic_dec_64(volatile uint64_t *target); +extern uint64_t atomic_swap_64(volatile uint64_t *a, uint64_t value); +extern uint64_t atomic_load_64(volatile uint64_t *a); +extern uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta); +extern uint64_t atomic_cas_64(volatile uint64_t *target, uint64_t cmp, + uint64_t newval); #endif -#ifndef __sparc64__ -#if defined(__LP64__) || defined(__mips_n32) || \ - defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64) - -#define membar_producer() wmb() - -static __inline uint64_t -atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval) -{ -#ifdef __i386__ - atomic_fcmpset_64(target, &cmp, newval); -#else - atomic_fcmpset_long(target, &cmp, newval); -#endif - return (cmp); -} +#define membar_producer atomic_thread_fence_rel static __inline uint32_t -atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval) +atomic_add_32_nv(volatile uint32_t *target, int32_t delta) { - - atomic_fcmpset_int(target, &cmp, newval); - return (cmp); + return (atomic_fetchadd_32(target, delta) + delta); } -static __inline uint64_t -atomic_add_64_nv(volatile uint64_t *target, int64_t delta) +static __inline uint_t +atomic_add_int_nv(volatile uint_t *target, int delta) { - uint64_t prev; - - prev = atomic_fetchadd_long(target, delta); - - return (prev + delta); + return (atomic_add_32_nv(target, delta)); } -#else -extern uint32_t atomic_cas_32(volatile uint32_t *target, uint32_t cmp, - uint32_t newval); -extern uint64_t atomic_cas_64(volatile uint64_t *target, uint64_t cmp, - uint64_t newval); -extern uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta); -extern void membar_producer(void); -#endif -#endif -extern uint8_t atomic_or_8_nv(volatile uint8_t *target, uint8_t value); - -#if defined(__sparc64__) || defined(__powerpc__) || defined(__arm__) || \ - defined(__mips__) || defined(__aarch64__) || defined(__riscv) -extern void atomic_or_8(volatile uint8_t *target, uint8_t value); -#else static __inline void -atomic_or_8(volatile uint8_t *target, uint8_t value) -{ - atomic_set_8(target, value); -} -#endif - -static __inline uint32_t -atomic_add_32_nv(volatile uint32_t *target, int32_t delta) +atomic_inc_32(volatile uint32_t *target) { - return (atomic_fetchadd_32(target, delta) + delta); + atomic_add_32(target, 1); } static __inline uint32_t -atomic_add_int_nv(volatile uint32_t *target, int delta) +atomic_inc_32_nv(volatile uint32_t *target) { - return (atomic_add_32_nv(target, delta)); + return (atomic_add_32_nv(target, 1)); } static __inline void atomic_dec_32(volatile uint32_t *target) { atomic_subtract_32(target, 1); } static __inline uint32_t atomic_dec_32_nv(volatile uint32_t *target) { - return (atomic_fetchadd_32(target, -1) - 1); + return (atomic_add_32_nv(target, -1)); +} + +#ifndef __sparc64__ +static inline uint32_t +atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval) +{ +#ifdef STRONG_FCMPSET + (void) atomic_fcmpset_32(target, &cmp, newval); +#else + uint32_t expected = cmp; + + do { + if (atomic_fcmpset_32(target, &cmp, newval)) + break; + } while (cmp == expected); +#endif + return (cmp); } +#endif #if defined(__LP64__) || defined(__mips_n32) || \ - defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64) + defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64) || \ + defined(HAS_EMULATED_ATOMIC64) static __inline void atomic_dec_64(volatile uint64_t *target) { atomic_subtract_64(target, 1); } -#endif -static __inline void -atomic_inc_32(volatile uint32_t *target) +static inline uint64_t +atomic_add_64_nv(volatile uint64_t *target, int64_t delta) { - atomic_add_32(target, 1); + return (atomic_fetchadd_64(target, delta) + delta); } -static __inline uint32_t -atomic_inc_32_nv(volatile uint32_t *target) +#ifndef __sparc64__ +static inline uint64_t +atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval) { - return (atomic_add_32_nv(target, 1)); +#ifdef STRONG_FCMPSET + (void) atomic_fcmpset_64(target, &cmp, newval); +#else + uint64_t expected = cmp; + + do { + if (atomic_fcmpset_64(target, &cmp, newval)) + break; + } while (cmp == expected); +#endif + return (cmp); } +#endif +#endif static __inline void atomic_inc_64(volatile uint64_t *target) { atomic_add_64(target, 1); } static __inline uint64_t atomic_inc_64_nv(volatile uint64_t *target) { return (atomic_add_64_nv(target, 1)); } static __inline uint64_t atomic_dec_64_nv(volatile uint64_t *target) { return (atomic_add_64_nv(target, -1)); } #if !defined(COMPAT_32BIT) && defined(__LP64__) static __inline void * atomic_cas_ptr(volatile void *target, void *cmp, void *newval) { return ((void *)atomic_cas_64((volatile uint64_t *)target, (uint64_t)cmp, (uint64_t)newval)); } #else static __inline void * atomic_cas_ptr(volatile void *target, void *cmp, void *newval) { return ((void *)atomic_cas_32((volatile uint32_t *)target, (uint32_t)cmp, (uint32_t)newval)); } #endif /* !defined(COMPAT_32BIT) && defined(__LP64__) */ #endif /* !_OPENSOLARIS_SYS_ATOMIC_H_ */ diff --git a/module/os/freebsd/spl/spl_atomic.c b/module/os/freebsd/spl/spl_atomic.c index e82fed847409..80040fc6a3e3 100644 --- a/module/os/freebsd/spl/spl_atomic.c +++ b/module/os/freebsd/spl/spl_atomic.c @@ -1,138 +1,123 @@ /* * Copyright (c) 2007 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include +#if !defined(__LP64__) && !defined(__mips_n32) && \ + !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) && \ + !defined(HAS_EMULATED_ATOMIC64) + #ifdef _KERNEL #include struct mtx atomic_mtx; MTX_SYSINIT(atomic, &atomic_mtx, "atomic", MTX_DEF); #else #include #define mtx_lock(lock) pthread_mutex_lock(lock) #define mtx_unlock(lock) pthread_mutex_unlock(lock) static pthread_mutex_t atomic_mtx; static __attribute__((constructor)) void atomic_init(void) { pthread_mutex_init(&atomic_mtx, NULL); } #endif -#if !defined(__LP64__) && !defined(__mips_n32) && \ - !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) void atomic_add_64(volatile uint64_t *target, int64_t delta) { mtx_lock(&atomic_mtx); *target += delta; mtx_unlock(&atomic_mtx); } void atomic_dec_64(volatile uint64_t *target) { mtx_lock(&atomic_mtx); *target -= 1; mtx_unlock(&atomic_mtx); } -#endif uint64_t -atomic_add_64_nv(volatile uint64_t *target, int64_t delta) +atomic_swap_64(volatile uint64_t *a, uint64_t value) { - uint64_t newval; + uint64_t ret; mtx_lock(&atomic_mtx); - newval = (*target += delta); + ret = *a; + *a = value; mtx_unlock(&atomic_mtx); - return (newval); + return (ret); } -#if defined(__powerpc__) || defined(__arm__) || defined(__mips__) -void -atomic_or_8(volatile uint8_t *target, uint8_t value) +uint64_t +atomic_load_64(volatile uint64_t *a) { + uint64_t ret; + mtx_lock(&atomic_mtx); - *target |= value; + ret = *a; mtx_unlock(&atomic_mtx); + return (ret); } -#endif -uint8_t -atomic_or_8_nv(volatile uint8_t *target, uint8_t value) +uint64_t +atomic_add_64_nv(volatile uint64_t *target, int64_t delta) { - uint8_t newval; + uint64_t newval; mtx_lock(&atomic_mtx); - newval = (*target |= value); + newval = (*target += delta); mtx_unlock(&atomic_mtx); return (newval); } uint64_t atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval) { uint64_t oldval; mtx_lock(&atomic_mtx); oldval = *target; if (oldval == cmp) *target = newval; mtx_unlock(&atomic_mtx); return (oldval); } - -uint32_t -atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval) -{ - uint32_t oldval; - - mtx_lock(&atomic_mtx); - oldval = *target; - if (oldval == cmp) - *target = newval; - mtx_unlock(&atomic_mtx); - return (oldval); -} - -void -membar_producer(void) -{ - /* nothing */ -} +#endif