Index: head/sys/cddl/compat/opensolaris/kern/opensolaris_atomic.c =================================================================== --- head/sys/cddl/compat/opensolaris/kern/opensolaris_atomic.c +++ head/sys/cddl/compat/opensolaris/kern/opensolaris_atomic.c @@ -33,7 +33,8 @@ #include #if !defined(__LP64__) && !defined(__mips_n32) && \ - !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) + !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) && \ + !defined(HAS_EMULATED_ATOMIC64) #ifdef _KERNEL #include Index: head/sys/cddl/compat/opensolaris/sys/atomic.h =================================================================== --- head/sys/cddl/compat/opensolaris/sys/atomic.h +++ head/sys/cddl/compat/opensolaris/sys/atomic.h @@ -42,7 +42,8 @@ #endif #if !defined(__LP64__) && !defined(__mips_n32) && \ - !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) + !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) && \ + !defined(HAS_EMULATED_ATOMIC64) extern void atomic_add_64(volatile uint64_t *target, int64_t delta); extern void atomic_dec_64(volatile uint64_t *target); extern uint64_t atomic_swap_64(volatile uint64_t *a, uint64_t value); @@ -109,7 +110,8 @@ #endif #if defined(__LP64__) || defined(__mips_n32) || \ - defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64) + defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64) || \ + defined(HAS_EMULATED_ATOMIC64) static __inline void atomic_dec_64(volatile uint64_t *target) { Index: head/sys/conf/files.mips =================================================================== --- head/sys/conf/files.mips +++ head/sys/conf/files.mips @@ -50,6 +50,7 @@ # misc opt-in bits kern/kern_clocksource.c standard kern/link_elf_obj.c standard +kern/subr_atomic64.c optional mips | mipsel | mipshf | mipselhf kern/subr_busdma_bufalloc.c standard kern/subr_dummy_vdso_tc.c standard kern/subr_sfbuf.c optional mips | mipsel | mipsn32 Index: head/sys/conf/files.powerpc =================================================================== --- head/sys/conf/files.powerpc +++ head/sys/conf/files.powerpc @@ -76,6 +76,7 @@ dev/usb/controller/ehci_fsl.c optional ehci mpc85xx dev/vt/hw/ofwfb/ofwfb.c optional vt aim kern/kern_clocksource.c standard +kern/subr_atomic64.c optional powerpc | powerpcspe kern/subr_dummy_vdso_tc.c standard kern/syscalls.c optional ktr kern/subr_sfbuf.c standard Index: head/sys/kern/subr_atomic64.c =================================================================== --- head/sys/kern/subr_atomic64.c +++ head/sys/kern/subr_atomic64.c @@ -0,0 +1,140 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2019 Justin Hibbits + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +enum { + ATOMIC64_ADD, + ATOMIC64_CLEAR, + ATOMIC64_CMPSET, + ATOMIC64_FCMPSET, + ATOMIC64_FETCHADD, + ATOMIC64_LOAD, + ATOMIC64_SET, + ATOMIC64_SUBTRACT, + ATOMIC64_STORE, + ATOMIC64_SWAP +}; + +#ifdef _KERNEL +#define A64_POOL_SIZE MAXCPU +/* Estimated size of a cacheline */ +#define CACHE_ALIGN CACHE_LINE_SIZE + +#define GET_MUTEX(p) \ + (&a64_mtx_pool[(pmap_kextract((vm_offset_t)p) / CACHE_ALIGN) % (A64_POOL_SIZE)]) + +#define LOCK_A64() \ + struct mtx *_amtx = GET_MUTEX(p); \ + if (smp_started) mtx_lock(_amtx) + +#define UNLOCK_A64() if (smp_started) mtx_unlock(_amtx) + +#define ATOMIC64_EMU_UN(op, rt, block, ret) \ + rt \ + atomic_##op##_64(volatile u_int64_t *p) { \ + u_int64_t tmp __unused; \ + LOCK_A64(); \ + block; \ + UNLOCK_A64(); \ + ret; } struct hack + +#define ATOMIC64_EMU_BIN(op, rt, block, ret) \ + rt \ + atomic_##op##_64(volatile u_int64_t *p, u_int64_t v) { \ + u_int64_t tmp __unused; \ + LOCK_A64(); \ + block; \ + UNLOCK_A64(); \ + ret; } struct hack + +static struct mtx a64_mtx_pool[A64_POOL_SIZE]; + +ATOMIC64_EMU_BIN(add, void, (*p = *p + v), return); +ATOMIC64_EMU_BIN(clear, void, *p &= ~v, return); +ATOMIC64_EMU_BIN(fetchadd, u_int64_t, (*p = *p + v, v = *p - v), return (v)); +ATOMIC64_EMU_UN(load, u_int64_t, (tmp = *p), return (tmp)); +ATOMIC64_EMU_BIN(set, void, *p |= v, return); +ATOMIC64_EMU_BIN(subtract, void, (*p = *p - v), return); +ATOMIC64_EMU_BIN(store, void, *p = v, return); +ATOMIC64_EMU_BIN(swap, u_int64_t, tmp = *p; *p = v; v = tmp, return(v)); + +int atomic_cmpset_64(volatile u_int64_t *p, u_int64_t old, u_int64_t new) +{ + u_int64_t tmp; + + LOCK_A64(); + tmp = *p; + if (tmp == old) + *p = new; + UNLOCK_A64(); + + return (tmp == old); +} + +int atomic_fcmpset_64(volatile u_int64_t *p, u_int64_t *old, u_int64_t new) +{ + u_int64_t tmp, tmp_old; + + LOCK_A64(); + tmp = *p; + tmp_old = *old; + if (tmp == tmp_old) + *p = new; + else + *old = tmp; + UNLOCK_A64(); + + return (tmp == tmp_old); +} + +static void +atomic64_mtxinit(void *x __unused) +{ + int i; + + for (i = 0; i < A64_POOL_SIZE; i++) + mtx_init(&a64_mtx_pool[i], "atomic64 mutex", NULL, MTX_DEF); +} + +SYSINIT(atomic64_mtxinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, atomic64_mtxinit, NULL); + +#endif /* _KERNEL */ Index: head/sys/mips/include/atomic.h =================================================================== --- head/sys/mips/include/atomic.h +++ head/sys/mips/include/atomic.h @@ -38,6 +38,10 @@ #include +#if !defined(__mips_n64) && !defined(__mips_n32) +#include +#endif + /* * Note: All the 64-bit atomic operations are only atomic when running * in 64-bit mode. It is assumed that code compiled for n32 and n64 Index: head/sys/powerpc/include/atomic.h =================================================================== --- head/sys/powerpc/include/atomic.h +++ head/sys/powerpc/include/atomic.h @@ -40,6 +40,10 @@ #include +#ifndef __powerpc64__ +#include +#endif + /* * The __ATOMIC_REL/ACQ() macros provide memory barriers only in conjunction * with the atomic lXarx/stXcx. sequences below. They are not exposed outside Index: head/sys/sys/_atomic64e.h =================================================================== --- head/sys/sys/_atomic64e.h +++ head/sys/sys/_atomic64e.h @@ -0,0 +1,80 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2019 Justin Hibbits + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + + +#ifndef _SYS_ATOMIC64E_H_ +#define _SYS_ATOMIC64E_H_ + +#ifndef _MACHINE_ATOMIC_H_ +#error "This should not be included directly. Include " +#endif + +#ifdef _KERNEL +#define HAS_EMULATED_ATOMIC64 + +/* Emulated versions of 64-bit atomic operations. */ + +void atomic_add_64(volatile u_int64_t *, u_int64_t); +#define atomic_add_acq_64 atomic_add_64 +#define atomic_add_rel_64 atomic_add_64 + +int atomic_cmpset_64(volatile u_int64_t *, u_int64_t, u_int64_t); +#define atomic_cmpset_acq_64 atomic_cmpset_64 +#define atomic_cmpset_rel_64 atomic_cmpset_64 + +void atomic_clear_64(volatile u_int64_t *, u_int64_t); +#define atomic_clear_acq_64 atomic_clear_64 +#define atomic_clear_rel_64 atomic_clear_64 + +int atomic_fcmpset_64(volatile u_int64_t *, u_int64_t *, u_int64_t); +#define atomic_fcmpset_acq_64 atomic_fcmpset_64 +#define atomic_fcmpset_rel_64 atomic_fcmpset_64 + +u_int64_t atomic_fetchadd_64(volatile u_int64_t *, u_int64_t); + +u_int64_t atomic_load_64(volatile u_int64_t *); +#define atomic_load_acq_64 atomic_load_64 + +void atomic_readandclear_64(volatile u_int64_t *); + +void atomic_set_64(volatile u_int64_t *, u_int64_t); +#define atomic_set_acq_64 atomic_set_64 +#define atomic_set_rel_64 atomic_set_64 + +void atomic_subtract_64(volatile u_int64_t *, u_int64_t); +#define atomic_subtract_acq_64 atomic_subtract_64 +#define atomic_subtract_rel_64 atomic_subtract_64 + +void atomic_store_64(volatile u_int64_t *, u_int64_t); +#define atomic_store_rel_64 atomic_store_64 + +u_int64_t atomic_swap_64(volatile u_int64_t *, u_int64_t); + +#endif /* _KERNEL */ +#endif /* _SYS_ATOMIC64E_H_ */