diff --git a/include/os/freebsd/spl/sys/atomic.h b/include/os/freebsd/spl/sys/atomic.h index 01b13fc9afd7..8b9cec15c5e1 100644 --- a/include/os/freebsd/spl/sys/atomic.h +++ b/include/os/freebsd/spl/sys/atomic.h @@ -1,194 +1,195 @@ /* * Copyright (c) 2007 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _OPENSOLARIS_SYS_ATOMIC_H_ #define _OPENSOLARIS_SYS_ATOMIC_H_ #ifndef _STANDALONE #include #include #define atomic_sub_64 atomic_subtract_64 #if defined(__i386__) && (defined(_KERNEL) || defined(KLD_MODULE)) #define I386_HAVE_ATOMIC64 #endif #if defined(__i386__) || defined(__amd64__) || defined(__arm__) /* No spurious failures from fcmpset. */ #define STRONG_FCMPSET #endif #if !defined(__LP64__) && !defined(__mips_n32) && \ !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) && \ !defined(HAS_EMULATED_ATOMIC64) extern void atomic_add_64(volatile uint64_t *target, int64_t delta); extern void atomic_dec_64(volatile uint64_t *target); extern uint64_t atomic_swap_64(volatile uint64_t *a, uint64_t value); extern uint64_t atomic_load_64(volatile uint64_t *a); extern uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta); extern uint64_t atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval); #endif #define membar_consumer() atomic_thread_fence_acq() #define membar_producer() atomic_thread_fence_rel() +#define membar_sync() atomic_thread_fence_seq_cst() static __inline uint32_t atomic_add_32_nv(volatile uint32_t *target, int32_t delta) { return (atomic_fetchadd_32(target, delta) + delta); } static __inline uint_t atomic_add_int_nv(volatile uint_t *target, int delta) { return (atomic_add_32_nv(target, delta)); } static __inline void atomic_inc_32(volatile uint32_t *target) { atomic_add_32(target, 1); } static __inline uint32_t atomic_inc_32_nv(volatile uint32_t *target) { return (atomic_add_32_nv(target, 1)); } static __inline void atomic_dec_32(volatile uint32_t *target) { atomic_subtract_32(target, 1); } static __inline uint32_t atomic_dec_32_nv(volatile uint32_t *target) { return (atomic_add_32_nv(target, -1)); } #ifndef __sparc64__ static inline uint32_t atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval) { #ifdef STRONG_FCMPSET (void) atomic_fcmpset_32(target, &cmp, newval); #else uint32_t expected = cmp; do { if (atomic_fcmpset_32(target, &cmp, newval)) break; } while (cmp == expected); #endif return (cmp); } #endif #if defined(__LP64__) || defined(__mips_n32) || \ defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64) || \ defined(HAS_EMULATED_ATOMIC64) static __inline void atomic_dec_64(volatile uint64_t *target) { atomic_subtract_64(target, 1); } static inline uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta) { return (atomic_fetchadd_64(target, delta) + delta); } #ifndef __sparc64__ static inline uint64_t atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval) { #ifdef STRONG_FCMPSET (void) atomic_fcmpset_64(target, &cmp, newval); #else uint64_t expected = cmp; do { if (atomic_fcmpset_64(target, &cmp, newval)) break; } while (cmp == expected); #endif return (cmp); } #endif #endif static __inline void atomic_inc_64(volatile uint64_t *target) { atomic_add_64(target, 1); } static __inline uint64_t atomic_inc_64_nv(volatile uint64_t *target) { return (atomic_add_64_nv(target, 1)); } static __inline uint64_t atomic_dec_64_nv(volatile uint64_t *target) { return (atomic_add_64_nv(target, -1)); } #if !defined(COMPAT_32BIT) && defined(__LP64__) static __inline void * atomic_cas_ptr(volatile void *target, void *cmp, void *newval) { return ((void *)atomic_cas_64((volatile uint64_t *)target, (uint64_t)cmp, (uint64_t)newval)); } #else static __inline void * atomic_cas_ptr(volatile void *target, void *cmp, void *newval) { return ((void *)atomic_cas_32((volatile uint32_t *)target, (uint32_t)cmp, (uint32_t)newval)); } #endif /* !defined(COMPAT_32BIT) && defined(__LP64__) */ #else /* _STANDALONE */ /* * sometimes atomic_add_64 is defined, sometimes not, but the * following is always right for the boot loader. */ #undef atomic_add_64 #define atomic_add_64(ptr, val) *(ptr) += val #endif /* !_STANDALONE */ #endif /* !_OPENSOLARIS_SYS_ATOMIC_H_ */ diff --git a/include/os/linux/spl/sys/vmsystm.h b/include/os/linux/spl/sys/vmsystm.h index fcd61e818fa3..c6d99fb3183d 100644 --- a/include/os/linux/spl/sys/vmsystm.h +++ b/include/os/linux/spl/sys/vmsystm.h @@ -1,93 +1,94 @@ /* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Brian Behlendorf . * UCRL-CODE-235197 * * This file is part of the SPL, Solaris Porting Layer. * * The SPL is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * The SPL is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . */ #ifndef _SPL_VMSYSTM_H #define _SPL_VMSYSTM_H #include #include #include #include #include #include #include #ifdef HAVE_TOTALRAM_PAGES_FUNC #define zfs_totalram_pages totalram_pages() #else #define zfs_totalram_pages totalram_pages #endif #ifdef HAVE_TOTALHIGH_PAGES #define zfs_totalhigh_pages totalhigh_pages() #else #define zfs_totalhigh_pages totalhigh_pages #endif #define membar_consumer() smp_rmb() #define membar_producer() smp_wmb() +#define membar_sync() smp_mb() #define physmem zfs_totalram_pages #define xcopyin(from, to, size) copy_from_user(to, from, size) #define xcopyout(from, to, size) copy_to_user(to, from, size) static __inline__ int copyin(const void *from, void *to, size_t len) { /* On error copyin routine returns -1 */ if (xcopyin(from, to, len)) return (-1); return (0); } static __inline__ int copyout(const void *from, void *to, size_t len) { /* On error copyout routine returns -1 */ if (xcopyout(from, to, len)) return (-1); return (0); } static __inline__ int copyinstr(const void *from, void *to, size_t len, size_t *done) { size_t rc; if (len == 0) return (-ENAMETOOLONG); /* XXX: Should return ENAMETOOLONG if 'strlen(from) > len' */ memset(to, 0, len); rc = copyin(from, to, len - 1); if (done != NULL) *done = rc; return (0); } #endif /* SPL_VMSYSTM_H */ diff --git a/lib/libspl/atomic.c b/lib/libspl/atomic.c index ba14b113f585..8cc350710ba0 100644 --- a/lib/libspl/atomic.c +++ b/lib/libspl/atomic.c @@ -1,394 +1,400 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License, Version 1.0 only * (the "License"). You may not use this file except in compliance * with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2009 by Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #include /* * These are the void returning variants */ #define ATOMIC_INC(name, type) \ void atomic_inc_##name(volatile type *target) \ { \ (void) __atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST); \ } /* BEGIN CSTYLED */ ATOMIC_INC(8, uint8_t) ATOMIC_INC(16, uint16_t) ATOMIC_INC(32, uint32_t) ATOMIC_INC(64, uint64_t) ATOMIC_INC(uchar, uchar_t) ATOMIC_INC(ushort, ushort_t) ATOMIC_INC(uint, uint_t) ATOMIC_INC(ulong, ulong_t) /* END CSTYLED */ #define ATOMIC_DEC(name, type) \ void atomic_dec_##name(volatile type *target) \ { \ (void) __atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST); \ } /* BEGIN CSTYLED */ ATOMIC_DEC(8, uint8_t) ATOMIC_DEC(16, uint16_t) ATOMIC_DEC(32, uint32_t) ATOMIC_DEC(64, uint64_t) ATOMIC_DEC(uchar, uchar_t) ATOMIC_DEC(ushort, ushort_t) ATOMIC_DEC(uint, uint_t) ATOMIC_DEC(ulong, ulong_t) /* END CSTYLED */ #define ATOMIC_ADD(name, type1, type2) \ void atomic_add_##name(volatile type1 *target, type2 bits) \ { \ (void) __atomic_add_fetch(target, bits, __ATOMIC_SEQ_CST); \ } void atomic_add_ptr(volatile void *target, ssize_t bits) { (void) __atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST); } /* BEGIN CSTYLED */ ATOMIC_ADD(8, uint8_t, int8_t) ATOMIC_ADD(16, uint16_t, int16_t) ATOMIC_ADD(32, uint32_t, int32_t) ATOMIC_ADD(64, uint64_t, int64_t) ATOMIC_ADD(char, uchar_t, signed char) ATOMIC_ADD(short, ushort_t, short) ATOMIC_ADD(int, uint_t, int) ATOMIC_ADD(long, ulong_t, long) /* END CSTYLED */ #define ATOMIC_SUB(name, type1, type2) \ void atomic_sub_##name(volatile type1 *target, type2 bits) \ { \ (void) __atomic_sub_fetch(target, bits, __ATOMIC_SEQ_CST); \ } void atomic_sub_ptr(volatile void *target, ssize_t bits) { (void) __atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST); } /* BEGIN CSTYLED */ ATOMIC_SUB(8, uint8_t, int8_t) ATOMIC_SUB(16, uint16_t, int16_t) ATOMIC_SUB(32, uint32_t, int32_t) ATOMIC_SUB(64, uint64_t, int64_t) ATOMIC_SUB(char, uchar_t, signed char) ATOMIC_SUB(short, ushort_t, short) ATOMIC_SUB(int, uint_t, int) ATOMIC_SUB(long, ulong_t, long) /* END CSTYLED */ #define ATOMIC_OR(name, type) \ void atomic_or_##name(volatile type *target, type bits) \ { \ (void) __atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST); \ } /* BEGIN CSTYLED */ ATOMIC_OR(8, uint8_t) ATOMIC_OR(16, uint16_t) ATOMIC_OR(32, uint32_t) ATOMIC_OR(64, uint64_t) ATOMIC_OR(uchar, uchar_t) ATOMIC_OR(ushort, ushort_t) ATOMIC_OR(uint, uint_t) ATOMIC_OR(ulong, ulong_t) /* END CSTYLED */ #define ATOMIC_AND(name, type) \ void atomic_and_##name(volatile type *target, type bits) \ { \ (void) __atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST); \ } /* BEGIN CSTYLED */ ATOMIC_AND(8, uint8_t) ATOMIC_AND(16, uint16_t) ATOMIC_AND(32, uint32_t) ATOMIC_AND(64, uint64_t) ATOMIC_AND(uchar, uchar_t) ATOMIC_AND(ushort, ushort_t) ATOMIC_AND(uint, uint_t) ATOMIC_AND(ulong, ulong_t) /* END CSTYLED */ /* * New value returning variants */ #define ATOMIC_INC_NV(name, type) \ type atomic_inc_##name##_nv(volatile type *target) \ { \ return (__atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST)); \ } /* BEGIN CSTYLED */ ATOMIC_INC_NV(8, uint8_t) ATOMIC_INC_NV(16, uint16_t) ATOMIC_INC_NV(32, uint32_t) ATOMIC_INC_NV(64, uint64_t) ATOMIC_INC_NV(uchar, uchar_t) ATOMIC_INC_NV(ushort, ushort_t) ATOMIC_INC_NV(uint, uint_t) ATOMIC_INC_NV(ulong, ulong_t) /* END CSTYLED */ #define ATOMIC_DEC_NV(name, type) \ type atomic_dec_##name##_nv(volatile type *target) \ { \ return (__atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST)); \ } /* BEGIN CSTYLED */ ATOMIC_DEC_NV(8, uint8_t) ATOMIC_DEC_NV(16, uint16_t) ATOMIC_DEC_NV(32, uint32_t) ATOMIC_DEC_NV(64, uint64_t) ATOMIC_DEC_NV(uchar, uchar_t) ATOMIC_DEC_NV(ushort, ushort_t) ATOMIC_DEC_NV(uint, uint_t) ATOMIC_DEC_NV(ulong, ulong_t) /* END CSTYLED */ #define ATOMIC_ADD_NV(name, type1, type2) \ type1 atomic_add_##name##_nv(volatile type1 *target, type2 bits) \ { \ return (__atomic_add_fetch(target, bits, __ATOMIC_SEQ_CST)); \ } void * atomic_add_ptr_nv(volatile void *target, ssize_t bits) { return (__atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST)); } /* BEGIN CSTYLED */ ATOMIC_ADD_NV(8, uint8_t, int8_t) ATOMIC_ADD_NV(16, uint16_t, int16_t) ATOMIC_ADD_NV(32, uint32_t, int32_t) ATOMIC_ADD_NV(64, uint64_t, int64_t) ATOMIC_ADD_NV(char, uchar_t, signed char) ATOMIC_ADD_NV(short, ushort_t, short) ATOMIC_ADD_NV(int, uint_t, int) ATOMIC_ADD_NV(long, ulong_t, long) /* END CSTYLED */ #define ATOMIC_SUB_NV(name, type1, type2) \ type1 atomic_sub_##name##_nv(volatile type1 *target, type2 bits) \ { \ return (__atomic_sub_fetch(target, bits, __ATOMIC_SEQ_CST)); \ } void * atomic_sub_ptr_nv(volatile void *target, ssize_t bits) { return (__atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST)); } /* BEGIN CSTYLED */ ATOMIC_SUB_NV(8, uint8_t, int8_t) ATOMIC_SUB_NV(char, uchar_t, signed char) ATOMIC_SUB_NV(16, uint16_t, int16_t) ATOMIC_SUB_NV(short, ushort_t, short) ATOMIC_SUB_NV(32, uint32_t, int32_t) ATOMIC_SUB_NV(int, uint_t, int) ATOMIC_SUB_NV(long, ulong_t, long) ATOMIC_SUB_NV(64, uint64_t, int64_t) /* END CSTYLED */ #define ATOMIC_OR_NV(name, type) \ type atomic_or_##name##_nv(volatile type *target, type bits) \ { \ return (__atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST)); \ } /* BEGIN CSTYLED */ ATOMIC_OR_NV(8, uint8_t) ATOMIC_OR_NV(16, uint16_t) ATOMIC_OR_NV(32, uint32_t) ATOMIC_OR_NV(64, uint64_t) ATOMIC_OR_NV(uchar, uchar_t) ATOMIC_OR_NV(ushort, ushort_t) ATOMIC_OR_NV(uint, uint_t) ATOMIC_OR_NV(ulong, ulong_t) /* END CSTYLED */ #define ATOMIC_AND_NV(name, type) \ type atomic_and_##name##_nv(volatile type *target, type bits) \ { \ return (__atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST)); \ } /* BEGIN CSTYLED */ ATOMIC_AND_NV(8, uint8_t) ATOMIC_AND_NV(16, uint16_t) ATOMIC_AND_NV(32, uint32_t) ATOMIC_AND_NV(64, uint64_t) ATOMIC_AND_NV(uchar, uchar_t) ATOMIC_AND_NV(ushort, ushort_t) ATOMIC_AND_NV(uint, uint_t) ATOMIC_AND_NV(ulong, ulong_t) /* END CSTYLED */ /* * If *tgt == exp, set *tgt = des; return old value * * This may not look right on the first pass (or the sixteenth), but, * from https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html: * > If they are not equal, the operation is a read * > and the current contents of *ptr are written into *expected. * And, in the converse case, exp is already *target by definition. */ #define ATOMIC_CAS(name, type) \ type atomic_cas_##name(volatile type *target, type exp, type des) \ { \ __atomic_compare_exchange_n(target, &exp, des, B_FALSE, \ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \ return (exp); \ } void * atomic_cas_ptr(volatile void *target, void *exp, void *des) { __atomic_compare_exchange_n((void **)target, &exp, des, B_FALSE, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); return (exp); } /* BEGIN CSTYLED */ ATOMIC_CAS(8, uint8_t) ATOMIC_CAS(16, uint16_t) ATOMIC_CAS(32, uint32_t) ATOMIC_CAS(64, uint64_t) ATOMIC_CAS(uchar, uchar_t) ATOMIC_CAS(ushort, ushort_t) ATOMIC_CAS(uint, uint_t) ATOMIC_CAS(ulong, ulong_t) /* END CSTYLED */ /* * Swap target and return old value */ #define ATOMIC_SWAP(name, type) \ type atomic_swap_##name(volatile type *target, type bits) \ { \ return (__atomic_exchange_n(target, bits, __ATOMIC_SEQ_CST)); \ } /* BEGIN CSTYLED */ ATOMIC_SWAP(8, uint8_t) ATOMIC_SWAP(16, uint16_t) ATOMIC_SWAP(32, uint32_t) ATOMIC_SWAP(64, uint64_t) ATOMIC_SWAP(uchar, uchar_t) ATOMIC_SWAP(ushort, ushort_t) ATOMIC_SWAP(uint, uint_t) ATOMIC_SWAP(ulong, ulong_t) /* END CSTYLED */ void * atomic_swap_ptr(volatile void *target, void *bits) { return (__atomic_exchange_n((void **)target, bits, __ATOMIC_SEQ_CST)); } #ifndef _LP64 uint64_t atomic_load_64(volatile uint64_t *target) { return (__atomic_load_n(target, __ATOMIC_RELAXED)); } void atomic_store_64(volatile uint64_t *target, uint64_t bits) { return (__atomic_store_n(target, bits, __ATOMIC_RELAXED)); } #endif int atomic_set_long_excl(volatile ulong_t *target, uint_t value) { ulong_t bit = 1UL << value; ulong_t old = __atomic_fetch_or(target, bit, __ATOMIC_SEQ_CST); return ((old & bit) ? -1 : 0); } int atomic_clear_long_excl(volatile ulong_t *target, uint_t value) { ulong_t bit = 1UL << value; ulong_t old = __atomic_fetch_and(target, ~bit, __ATOMIC_SEQ_CST); return ((old & bit) ? 0 : -1); } void membar_enter(void) { __atomic_thread_fence(__ATOMIC_SEQ_CST); } void membar_exit(void) { __atomic_thread_fence(__ATOMIC_SEQ_CST); } +void +membar_sync(void) +{ + __atomic_thread_fence(__ATOMIC_SEQ_CST); +} + void membar_producer(void) { __atomic_thread_fence(__ATOMIC_RELEASE); } void membar_consumer(void) { __atomic_thread_fence(__ATOMIC_ACQUIRE); } diff --git a/lib/libspl/include/atomic.h b/lib/libspl/include/atomic.h index 1249d42b604a..4ebdbbda9864 100644 --- a/lib/libspl/include/atomic.h +++ b/lib/libspl/include/atomic.h @@ -1,339 +1,346 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2005 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #ifndef _SYS_ATOMIC_H #define _SYS_ATOMIC_H #include #include #ifdef __cplusplus extern "C" { #endif #if defined(__STDC__) /* * Increment target. */ extern void atomic_inc_8(volatile uint8_t *); extern void atomic_inc_uchar(volatile uchar_t *); extern void atomic_inc_16(volatile uint16_t *); extern void atomic_inc_ushort(volatile ushort_t *); extern void atomic_inc_32(volatile uint32_t *); extern void atomic_inc_uint(volatile uint_t *); extern void atomic_inc_ulong(volatile ulong_t *); #if defined(_INT64_TYPE) extern void atomic_inc_64(volatile uint64_t *); #endif /* * Decrement target */ extern void atomic_dec_8(volatile uint8_t *); extern void atomic_dec_uchar(volatile uchar_t *); extern void atomic_dec_16(volatile uint16_t *); extern void atomic_dec_ushort(volatile ushort_t *); extern void atomic_dec_32(volatile uint32_t *); extern void atomic_dec_uint(volatile uint_t *); extern void atomic_dec_ulong(volatile ulong_t *); #if defined(_INT64_TYPE) extern void atomic_dec_64(volatile uint64_t *); #endif /* * Add delta to target */ extern void atomic_add_8(volatile uint8_t *, int8_t); extern void atomic_add_char(volatile uchar_t *, signed char); extern void atomic_add_16(volatile uint16_t *, int16_t); extern void atomic_add_short(volatile ushort_t *, short); extern void atomic_add_32(volatile uint32_t *, int32_t); extern void atomic_add_int(volatile uint_t *, int); extern void atomic_add_ptr(volatile void *, ssize_t); extern void atomic_add_long(volatile ulong_t *, long); #if defined(_INT64_TYPE) extern void atomic_add_64(volatile uint64_t *, int64_t); #endif /* * Subtract delta from target */ extern void atomic_sub_8(volatile uint8_t *, int8_t); extern void atomic_sub_char(volatile uchar_t *, signed char); extern void atomic_sub_16(volatile uint16_t *, int16_t); extern void atomic_sub_short(volatile ushort_t *, short); extern void atomic_sub_32(volatile uint32_t *, int32_t); extern void atomic_sub_int(volatile uint_t *, int); extern void atomic_sub_ptr(volatile void *, ssize_t); extern void atomic_sub_long(volatile ulong_t *, long); #if defined(_INT64_TYPE) extern void atomic_sub_64(volatile uint64_t *, int64_t); #endif /* * logical OR bits with target */ extern void atomic_or_8(volatile uint8_t *, uint8_t); extern void atomic_or_uchar(volatile uchar_t *, uchar_t); extern void atomic_or_16(volatile uint16_t *, uint16_t); extern void atomic_or_ushort(volatile ushort_t *, ushort_t); extern void atomic_or_32(volatile uint32_t *, uint32_t); extern void atomic_or_uint(volatile uint_t *, uint_t); extern void atomic_or_ulong(volatile ulong_t *, ulong_t); #if defined(_INT64_TYPE) extern void atomic_or_64(volatile uint64_t *, uint64_t); #endif /* * logical AND bits with target */ extern void atomic_and_8(volatile uint8_t *, uint8_t); extern void atomic_and_uchar(volatile uchar_t *, uchar_t); extern void atomic_and_16(volatile uint16_t *, uint16_t); extern void atomic_and_ushort(volatile ushort_t *, ushort_t); extern void atomic_and_32(volatile uint32_t *, uint32_t); extern void atomic_and_uint(volatile uint_t *, uint_t); extern void atomic_and_ulong(volatile ulong_t *, ulong_t); #if defined(_INT64_TYPE) extern void atomic_and_64(volatile uint64_t *, uint64_t); #endif /* * As above, but return the new value. Note that these _nv() variants are * substantially more expensive on some platforms than the no-return-value * versions above, so don't use them unless you really need to know the * new value *atomically* (e.g. when decrementing a reference count and * checking whether it went to zero). */ /* * Increment target and return new value. */ extern uint8_t atomic_inc_8_nv(volatile uint8_t *); extern uchar_t atomic_inc_uchar_nv(volatile uchar_t *); extern uint16_t atomic_inc_16_nv(volatile uint16_t *); extern ushort_t atomic_inc_ushort_nv(volatile ushort_t *); extern uint32_t atomic_inc_32_nv(volatile uint32_t *); extern uint_t atomic_inc_uint_nv(volatile uint_t *); extern ulong_t atomic_inc_ulong_nv(volatile ulong_t *); #if defined(_INT64_TYPE) extern uint64_t atomic_inc_64_nv(volatile uint64_t *); #endif /* * Decrement target and return new value. */ extern uint8_t atomic_dec_8_nv(volatile uint8_t *); extern uchar_t atomic_dec_uchar_nv(volatile uchar_t *); extern uint16_t atomic_dec_16_nv(volatile uint16_t *); extern ushort_t atomic_dec_ushort_nv(volatile ushort_t *); extern uint32_t atomic_dec_32_nv(volatile uint32_t *); extern uint_t atomic_dec_uint_nv(volatile uint_t *); extern ulong_t atomic_dec_ulong_nv(volatile ulong_t *); #if defined(_INT64_TYPE) extern uint64_t atomic_dec_64_nv(volatile uint64_t *); #endif /* * Add delta to target */ extern uint8_t atomic_add_8_nv(volatile uint8_t *, int8_t); extern uchar_t atomic_add_char_nv(volatile uchar_t *, signed char); extern uint16_t atomic_add_16_nv(volatile uint16_t *, int16_t); extern ushort_t atomic_add_short_nv(volatile ushort_t *, short); extern uint32_t atomic_add_32_nv(volatile uint32_t *, int32_t); extern uint_t atomic_add_int_nv(volatile uint_t *, int); extern void *atomic_add_ptr_nv(volatile void *, ssize_t); extern ulong_t atomic_add_long_nv(volatile ulong_t *, long); #if defined(_INT64_TYPE) extern uint64_t atomic_add_64_nv(volatile uint64_t *, int64_t); #endif /* * Subtract delta from target */ extern uint8_t atomic_sub_8_nv(volatile uint8_t *, int8_t); extern uchar_t atomic_sub_char_nv(volatile uchar_t *, signed char); extern uint16_t atomic_sub_16_nv(volatile uint16_t *, int16_t); extern ushort_t atomic_sub_short_nv(volatile ushort_t *, short); extern uint32_t atomic_sub_32_nv(volatile uint32_t *, int32_t); extern uint_t atomic_sub_int_nv(volatile uint_t *, int); extern void *atomic_sub_ptr_nv(volatile void *, ssize_t); extern ulong_t atomic_sub_long_nv(volatile ulong_t *, long); #if defined(_INT64_TYPE) extern uint64_t atomic_sub_64_nv(volatile uint64_t *, int64_t); #endif /* * logical OR bits with target and return new value. */ extern uint8_t atomic_or_8_nv(volatile uint8_t *, uint8_t); extern uchar_t atomic_or_uchar_nv(volatile uchar_t *, uchar_t); extern uint16_t atomic_or_16_nv(volatile uint16_t *, uint16_t); extern ushort_t atomic_or_ushort_nv(volatile ushort_t *, ushort_t); extern uint32_t atomic_or_32_nv(volatile uint32_t *, uint32_t); extern uint_t atomic_or_uint_nv(volatile uint_t *, uint_t); extern ulong_t atomic_or_ulong_nv(volatile ulong_t *, ulong_t); #if defined(_INT64_TYPE) extern uint64_t atomic_or_64_nv(volatile uint64_t *, uint64_t); #endif /* * logical AND bits with target and return new value. */ extern uint8_t atomic_and_8_nv(volatile uint8_t *, uint8_t); extern uchar_t atomic_and_uchar_nv(volatile uchar_t *, uchar_t); extern uint16_t atomic_and_16_nv(volatile uint16_t *, uint16_t); extern ushort_t atomic_and_ushort_nv(volatile ushort_t *, ushort_t); extern uint32_t atomic_and_32_nv(volatile uint32_t *, uint32_t); extern uint_t atomic_and_uint_nv(volatile uint_t *, uint_t); extern ulong_t atomic_and_ulong_nv(volatile ulong_t *, ulong_t); #if defined(_INT64_TYPE) extern uint64_t atomic_and_64_nv(volatile uint64_t *, uint64_t); #endif /* * If *arg1 == arg2, set *arg1 = arg3; return old value */ extern uint8_t atomic_cas_8(volatile uint8_t *, uint8_t, uint8_t); extern uchar_t atomic_cas_uchar(volatile uchar_t *, uchar_t, uchar_t); extern uint16_t atomic_cas_16(volatile uint16_t *, uint16_t, uint16_t); extern ushort_t atomic_cas_ushort(volatile ushort_t *, ushort_t, ushort_t); extern uint32_t atomic_cas_32(volatile uint32_t *, uint32_t, uint32_t); extern uint_t atomic_cas_uint(volatile uint_t *, uint_t, uint_t); extern void *atomic_cas_ptr(volatile void *, void *, void *); extern ulong_t atomic_cas_ulong(volatile ulong_t *, ulong_t, ulong_t); #if defined(_INT64_TYPE) extern uint64_t atomic_cas_64(volatile uint64_t *, uint64_t, uint64_t); #endif /* * Swap target and return old value */ extern uint8_t atomic_swap_8(volatile uint8_t *, uint8_t); extern uchar_t atomic_swap_uchar(volatile uchar_t *, uchar_t); extern uint16_t atomic_swap_16(volatile uint16_t *, uint16_t); extern ushort_t atomic_swap_ushort(volatile ushort_t *, ushort_t); extern uint32_t atomic_swap_32(volatile uint32_t *, uint32_t); extern uint_t atomic_swap_uint(volatile uint_t *, uint_t); extern void *atomic_swap_ptr(volatile void *, void *); extern ulong_t atomic_swap_ulong(volatile ulong_t *, ulong_t); #if defined(_INT64_TYPE) extern uint64_t atomic_swap_64(volatile uint64_t *, uint64_t); #endif /* * Atomically read variable. */ #define atomic_load_char(p) (*(volatile uchar_t *)(p)) #define atomic_load_short(p) (*(volatile ushort_t *)(p)) #define atomic_load_int(p) (*(volatile uint_t *)(p)) #define atomic_load_long(p) (*(volatile ulong_t *)(p)) #define atomic_load_ptr(p) (*(volatile __typeof(*p) *)(p)) #define atomic_load_8(p) (*(volatile uint8_t *)(p)) #define atomic_load_16(p) (*(volatile uint16_t *)(p)) #define atomic_load_32(p) (*(volatile uint32_t *)(p)) #ifdef _LP64 #define atomic_load_64(p) (*(volatile uint64_t *)(p)) #elif defined(_INT64_TYPE) extern uint64_t atomic_load_64(volatile uint64_t *); #endif /* * Atomically write variable. */ #define atomic_store_char(p, v) \ (*(volatile uchar_t *)(p) = (uchar_t)(v)) #define atomic_store_short(p, v) \ (*(volatile ushort_t *)(p) = (ushort_t)(v)) #define atomic_store_int(p, v) \ (*(volatile uint_t *)(p) = (uint_t)(v)) #define atomic_store_long(p, v) \ (*(volatile ulong_t *)(p) = (ulong_t)(v)) #define atomic_store_ptr(p, v) \ (*(volatile __typeof(*p) *)(p) = (v)) #define atomic_store_8(p, v) \ (*(volatile uint8_t *)(p) = (uint8_t)(v)) #define atomic_store_16(p, v) \ (*(volatile uint16_t *)(p) = (uint16_t)(v)) #define atomic_store_32(p, v) \ (*(volatile uint32_t *)(p) = (uint32_t)(v)) #ifdef _LP64 #define atomic_store_64(p, v) \ (*(volatile uint64_t *)(p) = (uint64_t)(v)) #elif defined(_INT64_TYPE) extern void atomic_store_64(volatile uint64_t *, uint64_t); #endif /* * Perform an exclusive atomic bit set/clear on a target. * Returns 0 if bit was successfully set/cleared, or -1 * if the bit was already set/cleared. */ extern int atomic_set_long_excl(volatile ulong_t *, uint_t); extern int atomic_clear_long_excl(volatile ulong_t *, uint_t); /* * Generic memory barrier used during lock entry, placed after the * memory operation that acquires the lock to guarantee that the lock * protects its data. No stores from after the memory barrier will * reach visibility, and no loads from after the barrier will be * resolved, before the lock acquisition reaches global visibility. */ extern void membar_enter(void); /* * Generic memory barrier used during lock exit, placed before the * memory operation that releases the lock to guarantee that the lock * protects its data. All loads and stores issued before the barrier * will be resolved before the subsequent lock update reaches visibility. */ extern void membar_exit(void); +/* + * Make all stores and loads emitted prior to the the barrier complete before + * crossing it, while also making sure stores and loads emitted after the + * barrier only start being executed after crossing it. + */ +extern void membar_sync(void); + /* * Arrange that all stores issued before this point in the code reach * global visibility before any stores that follow; useful in producer * modules that update a data item, then set a flag that it is available. * The memory barrier guarantees that the available flag is not visible * earlier than the updated data, i.e. it imposes store ordering. */ extern void membar_producer(void); /* * Arrange that all loads issued before this point in the code are * completed before any subsequent loads; useful in consumer modules * that check to see if data is available and read the data. * The memory barrier guarantees that the data is not sampled until * after the available flag has been seen, i.e. it imposes load ordering. */ extern void membar_consumer(void); #endif /* __STDC__ */ #ifdef __cplusplus } #endif #endif /* _SYS_ATOMIC_H */