Index: stable/10/sys/arm/arm/cpufunc_asm_armv7.S =================================================================== --- stable/10/sys/arm/arm/cpufunc_asm_armv7.S (revision 283316) +++ stable/10/sys/arm/arm/cpufunc_asm_armv7.S (revision 283317) @@ -1,370 +1,366 @@ /*- * Copyright (c) 2010 Per Odlund * Copyright (C) 2011 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include .cpu cortex-a8 .Lcoherency_level: .word _C_LABEL(arm_cache_loc) .Lcache_type: .word _C_LABEL(arm_cache_type) .Lway_mask: .word 0x3ff .Lmax_index: .word 0x7fff .Lpage_mask: .word 0xfff #define PT_NOS (1 << 5) #define PT_S (1 << 1) #define PT_INNER_NC 0 #define PT_INNER_WT (1 << 0) #define PT_INNER_WB ((1 << 0) | (1 << 6)) #define PT_INNER_WBWA (1 << 6) #define PT_OUTER_NC 0 #define PT_OUTER_WT (2 << 3) #define PT_OUTER_WB (3 << 3) #define PT_OUTER_WBWA (1 << 3) #ifdef SMP #define PT_ATTR (PT_S|PT_INNER_WBWA|PT_OUTER_WBWA|PT_NOS) #else #define PT_ATTR (PT_INNER_WBWA|PT_OUTER_WBWA) #endif ENTRY(armv7_setttb) - stmdb sp!, {r0, lr} - bl _C_LABEL(armv7_idcache_wbinv_all) /* clean the D cache */ - ldmia sp!, {r0, lr} dsb - orr r0, r0, #PT_ATTR mcr CP15_TTBR0(r0) isb #ifdef SMP mcr CP15_TLBIALLIS #else mcr CP15_TLBIALL #endif dsb isb RET END(armv7_setttb) ENTRY(armv7_tlb_flushID) dsb #ifdef SMP mcr CP15_TLBIALLIS mcr CP15_BPIALLIS #else mcr CP15_TLBIALL mcr CP15_BPIALL #endif dsb isb mov pc, lr END(armv7_tlb_flushID) ENTRY(armv7_tlb_flushID_SE) ldr r1, .Lpage_mask bic r0, r0, r1 #ifdef SMP mcr CP15_TLBIMVAAIS(r0) mcr CP15_BPIALLIS #else mcr CP15_TLBIMVA(r0) mcr CP15_BPIALL #endif dsb isb mov pc, lr END(armv7_tlb_flushID_SE) /* Based on algorithm from ARM Architecture Reference Manual */ ENTRY(armv7_dcache_wbinv_all) stmdb sp!, {r4, r5, r6, r7, r8, r9} /* Get cache level */ ldr r0, .Lcoherency_level ldr r3, [r0] cmp r3, #0 beq Finished /* For each cache level */ mov r8, #0 Loop1: /* Get cache type for given level */ mov r2, r8, lsl #2 add r2, r2, r2 ldr r0, .Lcache_type ldr r1, [r0, r2] /* Get line size */ and r2, r1, #7 add r2, r2, #4 /* Get number of ways */ ldr r4, .Lway_mask ands r4, r4, r1, lsr #3 clz r5, r4 /* Get max index */ ldr r7, .Lmax_index ands r7, r7, r1, lsr #13 Loop2: mov r9, r4 Loop3: mov r6, r8, lsl #1 orr r6, r6, r9, lsl r5 orr r6, r6, r7, lsl r2 /* Clean and invalidate data cache by way/index */ mcr CP15_DCCISW(r6) subs r9, r9, #1 bge Loop3 subs r7, r7, #1 bge Loop2 Skip: add r8, r8, #1 cmp r3, r8 bne Loop1 Finished: dsb ldmia sp!, {r4, r5, r6, r7, r8, r9} RET END(armv7_dcache_wbinv_all) ENTRY(armv7_idcache_wbinv_all) stmdb sp!, {lr} bl armv7_dcache_wbinv_all #ifdef SMP mcr CP15_ICIALLUIS #else mcr CP15_ICIALLU #endif dsb isb ldmia sp!, {lr} RET END(armv7_idcache_wbinv_all) /* XXX Temporary set it to 32 for MV cores, however this value should be * get from Cache Type register */ .Larmv7_line_size: .word 32 ENTRY(armv7_dcache_wb_range) ldr ip, .Larmv7_line_size sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larmv7_wb_next: mcr CP15_DCCMVAC(r0) add r0, r0, ip subs r1, r1, ip bhi .Larmv7_wb_next dsb /* data synchronization barrier */ RET END(armv7_dcache_wb_range) ENTRY(armv7_dcache_wbinv_range) ldr ip, .Larmv7_line_size sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larmv7_wbinv_next: mcr CP15_DCCIMVAC(r0) add r0, r0, ip subs r1, r1, ip bhi .Larmv7_wbinv_next dsb /* data synchronization barrier */ RET END(armv7_dcache_wbinv_range) /* * Note, we must not invalidate everything. If the range is too big we * must use wb-inv of the entire cache. */ ENTRY(armv7_dcache_inv_range) ldr ip, .Larmv7_line_size sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larmv7_inv_next: mcr CP15_DCIMVAC(r0) add r0, r0, ip subs r1, r1, ip bhi .Larmv7_inv_next dsb /* data synchronization barrier */ RET END(armv7_dcache_inv_range) ENTRY(armv7_idcache_wbinv_range) ldr ip, .Larmv7_line_size sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larmv7_id_wbinv_next: mcr CP15_ICIMVAU(r0) mcr CP15_DCCIMVAC(r0) add r0, r0, ip subs r1, r1, ip bhi .Larmv7_id_wbinv_next isb /* instruction synchronization barrier */ dsb /* data synchronization barrier */ RET END(armv7_idcache_wbinv_range) ENTRY_NP(armv7_icache_sync_all) #ifdef SMP mcr CP15_ICIALLUIS #else mcr CP15_ICIALLU #endif isb /* instruction synchronization barrier */ dsb /* data synchronization barrier */ RET END(armv7_icache_sync_all) ENTRY_NP(armv7_icache_sync_range) ldr ip, .Larmv7_line_size .Larmv7_sync_next: mcr CP15_ICIMVAU(r0) mcr CP15_DCCMVAC(r0) add r0, r0, ip subs r1, r1, ip bhi .Larmv7_sync_next isb /* instruction synchronization barrier */ dsb /* data synchronization barrier */ RET END(armv7_icache_sync_range) ENTRY(armv7_cpu_sleep) dsb /* data synchronization barrier */ wfi /* wait for interrupt */ RET END(armv7_cpu_sleep) ENTRY(armv7_context_switch) dsb orr r0, r0, #PT_ATTR mcr CP15_TTBR0(r0) isb #ifdef SMP mcr CP15_TLBIALLIS #else mcr CP15_TLBIALL #endif dsb isb RET END(armv7_context_switch) ENTRY(armv7_drain_writebuf) dsb RET END(armv7_drain_writebuf) ENTRY(armv7_sev) dsb sev nop RET END(armv7_sev) ENTRY(armv7_auxctrl) mrc CP15_ACTLR(r2) bic r3, r2, r0 /* Clear bits */ eor r3, r3, r1 /* XOR bits */ teq r2, r3 mcrne CP15_ACTLR(r3) mov r0, r2 RET END(armv7_auxctrl) /* * Invalidate all I+D+branch cache. Used by startup code, which counts * on the fact that only r0-r3,ip are modified and no stack space is used. */ ENTRY(armv7_idcache_inv_all) mov r0, #0 mcr CP15_CSSELR(r0) @ set cache level to L1 mrc CP15_CCSIDR(r0) ubfx r2, r0, #13, #15 @ get num sets - 1 from CCSIDR ubfx r3, r0, #3, #10 @ get numways - 1 from CCSIDR clz r1, r3 @ number of bits to MSB of way lsl r3, r3, r1 @ shift into position mov ip, #1 @ lsl ip, ip, r1 @ ip now contains the way decr ubfx r0, r0, #0, #3 @ get linesize from CCSIDR add r0, r0, #4 @ apply bias lsl r2, r2, r0 @ shift sets by log2(linesize) add r3, r3, r2 @ merge numsets - 1 with numways - 1 sub ip, ip, r2 @ subtract numsets - 1 from way decr mov r1, #1 lsl r1, r1, r0 @ r1 now contains the set decr mov r2, ip @ r2 now contains set way decr /* r3 = ways/sets, r2 = way decr, r1 = set decr, r0 and ip are free */ 1: mcr CP15_DCISW(r3) @ invalidate line movs r0, r3 @ get current way/set beq 2f @ at 0 means we are done. movs r0, r0, lsl #10 @ clear way bits leaving only set bits subne r3, r3, r1 @ non-zero?, decrement set # subeq r3, r3, r2 @ zero?, decrement way # and restore set count b 1b 2: dsb @ wait for stores to finish mov r0, #0 @ and ... mcr CP15_ICIALLU @ invalidate instruction+branch cache isb @ instruction sync barrier bx lr @ return END(armv7_idcache_inv_all) ENTRY_NP(armv7_sleep) dsb wfi bx lr END(armv7_sleep) Index: stable/10/sys/arm/arm/cpuinfo.c =================================================================== --- stable/10/sys/arm/arm/cpuinfo.c (revision 283316) +++ stable/10/sys/arm/arm/cpuinfo.c (revision 283317) @@ -1,121 +1,125 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include struct cpuinfo cpuinfo; /* Read and parse CPU id scheme */ void cpuinfo_init(void) { cpuinfo.midr = cp15_midr_get(); /* Test old version id schemes first */ if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) { if (CPU_ID_ISOLD(cpuinfo.midr)) { /* obsolete ARMv2 or ARMv3 CPU */ cpuinfo.midr = 0; return; } if (CPU_ID_IS7(cpuinfo.midr)) { if ((cpuinfo.midr & (1 << 23)) == 0) { /* obsolete ARMv3 CPU */ cpuinfo.midr = 0; return; } /* ARMv4T CPU */ cpuinfo.architecture = 1; cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F; - } + } else { + /* ARM new id scheme */ + cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F; + cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F; + } } else { - /* must be new id scheme */ + /* non ARM -> must be new id scheme */ cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F; cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F; } /* Parse rest of MIDR */ cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF; cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF; cpuinfo.patch = cpuinfo.midr & 0x0F; /* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */ cpuinfo.ctr = cp15_ctr_get(); cpuinfo.tcmtr = cp15_tcmtr_get(); cpuinfo.tlbtr = cp15_tlbtr_get(); cpuinfo.mpidr = cp15_mpidr_get(); cpuinfo.revidr = cp15_revidr_get(); /* if CPU is not v7 cpu id scheme */ if (cpuinfo.architecture != 0xF) return; cpuinfo.id_pfr0 = cp15_id_pfr0_get(); cpuinfo.id_pfr1 = cp15_id_pfr1_get(); cpuinfo.id_dfr0 = cp15_id_dfr0_get(); cpuinfo.id_afr0 = cp15_id_afr0_get(); cpuinfo.id_mmfr0 = cp15_id_mmfr0_get(); cpuinfo.id_mmfr1 = cp15_id_mmfr1_get(); cpuinfo.id_mmfr2 = cp15_id_mmfr2_get(); cpuinfo.id_mmfr3 = cp15_id_mmfr3_get(); cpuinfo.id_isar0 = cp15_id_isar0_get(); cpuinfo.id_isar1 = cp15_id_isar1_get(); cpuinfo.id_isar2 = cp15_id_isar2_get(); cpuinfo.id_isar3 = cp15_id_isar3_get(); cpuinfo.id_isar4 = cp15_id_isar4_get(); cpuinfo.id_isar5 = cp15_id_isar5_get(); /* Not yet - CBAR only exist on ARM SMP Cortex A CPUs cpuinfo.cbar = cp15_cbar_get(); */ /* Test if revidr is implemented */ if (cpuinfo.revidr == cpuinfo.midr) cpuinfo.revidr = 0; /* parsed bits of above registers */ /* id_mmfr0 */ cpuinfo.outermost_shareability = (cpuinfo.id_mmfr0 >> 8) & 0xF; cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF; cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF; cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF; /* id_mmfr2 */ cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF; /* id_mmfr3 */ cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF; cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF; /* id_pfr1 */ cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF; cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF; cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF; } Index: stable/10/sys/arm/include/atomic.h =================================================================== --- stable/10/sys/arm/include/atomic.h (revision 283316) +++ stable/10/sys/arm/include/atomic.h (revision 283317) @@ -1,1134 +1,1134 @@ /* $NetBSD: atomic.h,v 1.1 2002/10/19 12:22:34 bsh Exp $ */ /*- * Copyright (C) 2003-2004 Olivier Houchard * Copyright (C) 1994-1997 Mark Brinicombe * Copyright (C) 1994 Brini * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of Brini may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_ATOMIC_H_ #define _MACHINE_ATOMIC_H_ #include #include #ifndef _KERNEL #include #else #include #endif #if defined (__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__) #define isb() __asm __volatile("isb" : : : "memory") #define dsb() __asm __volatile("dsb" : : : "memory") #define dmb() __asm __volatile("dmb" : : : "memory") #elif defined (__ARM_ARCH_6__) || defined (__ARM_ARCH_6J__) || \ defined (__ARM_ARCH_6K__) || defined (__ARM_ARCH_6T2__) || \ defined (__ARM_ARCH_6Z__) || defined (__ARM_ARCH_6ZK__) #define isb() __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory") #define dsb() __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory") #define dmb() __asm __volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory") #else #define isb() __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory") #define dsb() __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory") #define dmb() dsb() #endif #define mb() dmb() #define wmb() dmb() #define rmb() dmb() /* * It would be nice to use _HAVE_ARMv6_INSTRUCTIONS from machine/asm.h * here, but that header can't be included here because this is C * code. I would like to move the _HAVE_ARMv6_INSTRUCTIONS definition * out of asm.h so it can be used in both asm and C code. - kientzle@ */ #if defined (__ARM_ARCH_7__) || \ defined (__ARM_ARCH_7A__) || \ defined (__ARM_ARCH_6__) || \ defined (__ARM_ARCH_6J__) || \ defined (__ARM_ARCH_6K__) || \ defined (__ARM_ARCH_6T2__) || \ defined (__ARM_ARCH_6Z__) || \ defined (__ARM_ARCH_6ZK__) #define ARM_HAVE_ATOMIC64 static __inline void __do_dmb(void) { #if defined (__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__) __asm __volatile("dmb" : : : "memory"); #else __asm __volatile("mcr p15, 0, r0, c7, c10, 5" : : : "memory"); #endif } #define ATOMIC_ACQ_REL_LONG(NAME) \ static __inline void \ atomic_##NAME##_acq_long(__volatile u_long *p, u_long v) \ { \ atomic_##NAME##_long(p, v); \ __do_dmb(); \ } \ \ static __inline void \ atomic_##NAME##_rel_long(__volatile u_long *p, u_long v) \ { \ __do_dmb(); \ atomic_##NAME##_long(p, v); \ } #define ATOMIC_ACQ_REL(NAME, WIDTH) \ static __inline void \ atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\ { \ atomic_##NAME##_##WIDTH(p, v); \ __do_dmb(); \ } \ \ static __inline void \ atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\ { \ __do_dmb(); \ atomic_##NAME##_##WIDTH(p, v); \ } static __inline void atomic_set_32(volatile uint32_t *address, uint32_t setmask) { uint32_t tmp = 0, tmp2 = 0; __asm __volatile("1: ldrex %0, [%2]\n" "orr %0, %0, %3\n" "strex %1, %0, [%2]\n" "cmp %1, #0\n" "it ne\n" "bne 1b\n" : "=&r" (tmp), "+r" (tmp2) , "+r" (address), "+r" (setmask) : : "cc", "memory"); } static __inline void atomic_set_64(volatile uint64_t *p, uint64_t val) { uint64_t tmp; uint32_t exflag; __asm __volatile( "1: \n" " ldrexd %[tmp], [%[ptr]]\n" " orr %Q[tmp], %Q[val]\n" " orr %R[tmp], %R[val]\n" " strexd %[exf], %[tmp], [%[ptr]]\n" " teq %[exf], #0\n" " it ne \n" " bne 1b\n" : [exf] "=&r" (exflag), [tmp] "=&r" (tmp) : [ptr] "r" (p), [val] "r" (val) : "cc", "memory"); } static __inline void atomic_set_long(volatile u_long *address, u_long setmask) { u_long tmp = 0, tmp2 = 0; __asm __volatile("1: ldrex %0, [%2]\n" "orr %0, %0, %3\n" "strex %1, %0, [%2]\n" "cmp %1, #0\n" "it ne\n" "bne 1b\n" : "=&r" (tmp), "+r" (tmp2) , "+r" (address), "+r" (setmask) : : "cc", "memory"); } static __inline void atomic_clear_32(volatile uint32_t *address, uint32_t setmask) { uint32_t tmp = 0, tmp2 = 0; __asm __volatile("1: ldrex %0, [%2]\n" "bic %0, %0, %3\n" "strex %1, %0, [%2]\n" "cmp %1, #0\n" "it ne\n" "bne 1b\n" : "=&r" (tmp), "+r" (tmp2) ,"+r" (address), "+r" (setmask) : : "cc", "memory"); } static __inline void atomic_clear_64(volatile uint64_t *p, uint64_t val) { uint64_t tmp; uint32_t exflag; __asm __volatile( "1: \n" " ldrexd %[tmp], [%[ptr]]\n" " bic %Q[tmp], %Q[val]\n" " bic %R[tmp], %R[val]\n" " strexd %[exf], %[tmp], [%[ptr]]\n" " teq %[exf], #0\n" " it ne \n" " bne 1b\n" : [exf] "=&r" (exflag), [tmp] "=&r" (tmp) : [ptr] "r" (p), [val] "r" (val) : "cc", "memory"); } static __inline void atomic_clear_long(volatile u_long *address, u_long setmask) { u_long tmp = 0, tmp2 = 0; __asm __volatile("1: ldrex %0, [%2]\n" "bic %0, %0, %3\n" "strex %1, %0, [%2]\n" "cmp %1, #0\n" "it ne\n" "bne 1b\n" : "=&r" (tmp), "+r" (tmp2) ,"+r" (address), "+r" (setmask) : : "cc", "memory"); } static __inline u_int32_t atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval) { uint32_t ret; __asm __volatile("1: ldrex %0, [%1]\n" "cmp %0, %2\n" "itt ne\n" "movne %0, #0\n" "bne 2f\n" "strex %0, %3, [%1]\n" "cmp %0, #0\n" "ite eq\n" "moveq %0, #1\n" "bne 1b\n" "2:" : "=&r" (ret) ,"+r" (p), "+r" (cmpval), "+r" (newval) : : "cc", "memory"); return (ret); } static __inline int atomic_cmpset_64(volatile uint64_t *p, uint64_t cmpval, uint64_t newval) { uint64_t tmp; uint32_t ret; __asm __volatile( "1: \n" " ldrexd %[tmp], [%[ptr]]\n" " teq %Q[tmp], %Q[cmpval]\n" " itee eq \n" " teqeq %R[tmp], %R[cmpval]\n" " movne %[ret], #0\n" " bne 2f\n" " strexd %[ret], %[newval], [%[ptr]]\n" " teq %[ret], #0\n" " it ne \n" " bne 1b\n" " mov %[ret], #1\n" "2: \n" : [ret] "=&r" (ret), [tmp] "=&r" (tmp) : [ptr] "r" (p), [cmpval] "r" (cmpval), [newval] "r" (newval) : "cc", "memory"); return (ret); } static __inline u_long atomic_cmpset_long(volatile u_long *p, volatile u_long cmpval, volatile u_long newval) { u_long ret; __asm __volatile("1: ldrex %0, [%1]\n" "cmp %0, %2\n" "itt ne\n" "movne %0, #0\n" "bne 2f\n" "strex %0, %3, [%1]\n" "cmp %0, #0\n" "ite eq\n" "moveq %0, #1\n" "bne 1b\n" "2:" : "=&r" (ret) ,"+r" (p), "+r" (cmpval), "+r" (newval) : : "cc", "memory"); return (ret); } static __inline u_int32_t atomic_cmpset_acq_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval) { u_int32_t ret = atomic_cmpset_32(p, cmpval, newval); __do_dmb(); return (ret); } static __inline uint64_t atomic_cmpset_acq_64(volatile uint64_t *p, volatile uint64_t cmpval, volatile uint64_t newval) { uint64_t ret = atomic_cmpset_64(p, cmpval, newval); __do_dmb(); return (ret); } static __inline u_long atomic_cmpset_acq_long(volatile u_long *p, volatile u_long cmpval, volatile u_long newval) { u_long ret = atomic_cmpset_long(p, cmpval, newval); __do_dmb(); return (ret); } static __inline u_int32_t atomic_cmpset_rel_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval) { __do_dmb(); return (atomic_cmpset_32(p, cmpval, newval)); } static __inline uint64_t atomic_cmpset_rel_64(volatile uint64_t *p, volatile uint64_t cmpval, volatile uint64_t newval) { __do_dmb(); return (atomic_cmpset_64(p, cmpval, newval)); } static __inline u_long atomic_cmpset_rel_long(volatile u_long *p, volatile u_long cmpval, volatile u_long newval) { __do_dmb(); return (atomic_cmpset_long(p, cmpval, newval)); } static __inline void atomic_add_32(volatile u_int32_t *p, u_int32_t val) { uint32_t tmp = 0, tmp2 = 0; __asm __volatile("1: ldrex %0, [%2]\n" "add %0, %0, %3\n" "strex %1, %0, [%2]\n" "cmp %1, #0\n" "it ne\n" "bne 1b\n" : "=&r" (tmp), "+r" (tmp2) ,"+r" (p), "+r" (val) : : "cc", "memory"); } static __inline void atomic_add_64(volatile uint64_t *p, uint64_t val) { uint64_t tmp; uint32_t exflag; __asm __volatile( "1: \n" " ldrexd %[tmp], [%[ptr]]\n" " adds %Q[tmp], %Q[val]\n" " adc %R[tmp], %R[val]\n" " strexd %[exf], %[tmp], [%[ptr]]\n" " teq %[exf], #0\n" " it ne \n" " bne 1b\n" : [exf] "=&r" (exflag), [tmp] "=&r" (tmp) : [ptr] "r" (p), [val] "r" (val) : "cc", "memory"); } static __inline void atomic_add_long(volatile u_long *p, u_long val) { u_long tmp = 0, tmp2 = 0; __asm __volatile("1: ldrex %0, [%2]\n" "add %0, %0, %3\n" "strex %1, %0, [%2]\n" "cmp %1, #0\n" "it ne\n" "bne 1b\n" : "=&r" (tmp), "+r" (tmp2) ,"+r" (p), "+r" (val) : : "cc", "memory"); } static __inline void atomic_subtract_32(volatile u_int32_t *p, u_int32_t val) { uint32_t tmp = 0, tmp2 = 0; __asm __volatile("1: ldrex %0, [%2]\n" "sub %0, %0, %3\n" "strex %1, %0, [%2]\n" "cmp %1, #0\n" "it ne\n" "bne 1b\n" : "=&r" (tmp), "+r" (tmp2) ,"+r" (p), "+r" (val) : : "cc", "memory"); } static __inline void atomic_subtract_64(volatile uint64_t *p, uint64_t val) { uint64_t tmp; uint32_t exflag; __asm __volatile( "1: \n" " ldrexd %[tmp], [%[ptr]]\n" " subs %Q[tmp], %Q[val]\n" " sbc %R[tmp], %R[val]\n" " strexd %[exf], %[tmp], [%[ptr]]\n" " teq %[exf], #0\n" " it ne \n" " bne 1b\n" : [exf] "=&r" (exflag), [tmp] "=&r" (tmp) : [ptr] "r" (p), [val] "r" (val) : "cc", "memory"); } static __inline void atomic_subtract_long(volatile u_long *p, u_long val) { u_long tmp = 0, tmp2 = 0; __asm __volatile("1: ldrex %0, [%2]\n" "sub %0, %0, %3\n" "strex %1, %0, [%2]\n" "cmp %1, #0\n" "it ne\n" "bne 1b\n" : "=&r" (tmp), "+r" (tmp2) ,"+r" (p), "+r" (val) : : "cc", "memory"); } ATOMIC_ACQ_REL(clear, 32) ATOMIC_ACQ_REL(add, 32) ATOMIC_ACQ_REL(subtract, 32) ATOMIC_ACQ_REL(set, 32) ATOMIC_ACQ_REL(clear, 64) ATOMIC_ACQ_REL(add, 64) ATOMIC_ACQ_REL(subtract, 64) ATOMIC_ACQ_REL(set, 64) ATOMIC_ACQ_REL_LONG(clear) ATOMIC_ACQ_REL_LONG(add) ATOMIC_ACQ_REL_LONG(subtract) ATOMIC_ACQ_REL_LONG(set) #undef ATOMIC_ACQ_REL #undef ATOMIC_ACQ_REL_LONG static __inline uint32_t atomic_fetchadd_32(volatile uint32_t *p, uint32_t val) { uint32_t tmp = 0, tmp2 = 0, ret = 0; __asm __volatile("1: ldrex %0, [%3]\n" "add %1, %0, %4\n" "strex %2, %1, [%3]\n" "cmp %2, #0\n" "it ne\n" "bne 1b\n" : "+r" (ret), "=&r" (tmp), "+r" (tmp2) ,"+r" (p), "+r" (val) : : "cc", "memory"); return (ret); } static __inline uint32_t atomic_readandclear_32(volatile u_int32_t *p) { uint32_t ret, tmp = 0, tmp2 = 0; __asm __volatile("1: ldrex %0, [%3]\n" "mov %1, #0\n" "strex %2, %1, [%3]\n" "cmp %2, #0\n" "it ne\n" "bne 1b\n" : "=r" (ret), "=&r" (tmp), "+r" (tmp2) ,"+r" (p) : : "cc", "memory"); return (ret); } static __inline uint32_t atomic_load_acq_32(volatile uint32_t *p) { uint32_t v; v = *p; __do_dmb(); return (v); } static __inline void atomic_store_rel_32(volatile uint32_t *p, uint32_t v) { __do_dmb(); *p = v; } static __inline uint64_t atomic_fetchadd_64(volatile uint64_t *p, uint64_t val) { uint64_t ret, tmp; uint32_t exflag; __asm __volatile( "1: \n" " ldrexd %[ret], [%[ptr]]\n" " adds %Q[tmp], %Q[ret], %Q[val]\n" " adc %R[tmp], %R[ret], %R[val]\n" " strexd %[exf], %[tmp], [%[ptr]]\n" " teq %[exf], #0\n" " it ne \n" " bne 1b\n" : [ret] "=&r" (ret), [exf] "=&r" (exflag), [tmp] "=&r" (tmp) : [ptr] "r" (p), [val] "r" (val) : "cc", "memory"); return (ret); } static __inline uint64_t atomic_readandclear_64(volatile uint64_t *p) { uint64_t ret, tmp; uint32_t exflag; __asm __volatile( "1: \n" " ldrexd %[ret], [%[ptr]]\n" " mov %Q[tmp], #0\n" " mov %R[tmp], #0\n" " strexd %[exf], %[tmp], [%[ptr]]\n" " teq %[exf], #0\n" " it ne \n" " bne 1b\n" : [ret] "=&r" (ret), [exf] "=&r" (exflag), [tmp] "=&r" (tmp) : [ptr] "r" (p) : "cc", "memory"); return (ret); } static __inline uint64_t atomic_load_64(volatile uint64_t *p) { uint64_t ret; /* * The only way to atomically load 64 bits is with LDREXD which puts the - * exclusive monitor into the open state, so reset it with CLREX because - * we don't actually need to store anything. + * exclusive monitor into the exclusive state, so reset it to open state + * with CLREX because we don't actually need to store anything. */ __asm __volatile( "1: \n" " ldrexd %[ret], [%[ptr]]\n" " clrex \n" : [ret] "=&r" (ret) : [ptr] "r" (p) : "cc", "memory"); return (ret); } static __inline uint64_t atomic_load_acq_64(volatile uint64_t *p) { uint64_t ret; ret = atomic_load_64(p); __do_dmb(); return (ret); } static __inline void atomic_store_64(volatile uint64_t *p, uint64_t val) { uint64_t tmp; uint32_t exflag; /* * The only way to atomically store 64 bits is with STREXD, which will * succeed only if paired up with a preceeding LDREXD using the same * address, so we read and discard the existing value before storing. */ __asm __volatile( "1: \n" " ldrexd %[tmp], [%[ptr]]\n" " strexd %[exf], %[val], [%[ptr]]\n" " teq %[exf], #0\n" " it ne \n" " bne 1b\n" : [tmp] "=&r" (tmp), [exf] "=&r" (exflag) : [ptr] "r" (p), [val] "r" (val) : "cc", "memory"); } static __inline void atomic_store_rel_64(volatile uint64_t *p, uint64_t val) { __do_dmb(); atomic_store_64(p, val); } static __inline u_long atomic_fetchadd_long(volatile u_long *p, u_long val) { u_long tmp = 0, tmp2 = 0, ret = 0; __asm __volatile("1: ldrex %0, [%3]\n" "add %1, %0, %4\n" "strex %2, %1, [%3]\n" "cmp %2, #0\n" "it ne\n" "bne 1b\n" : "+r" (ret), "=&r" (tmp), "+r" (tmp2) ,"+r" (p), "+r" (val) : : "cc", "memory"); return (ret); } static __inline u_long atomic_readandclear_long(volatile u_long *p) { u_long ret, tmp = 0, tmp2 = 0; __asm __volatile("1: ldrex %0, [%3]\n" "mov %1, #0\n" "strex %2, %1, [%3]\n" "cmp %2, #0\n" "it ne\n" "bne 1b\n" : "=r" (ret), "=&r" (tmp), "+r" (tmp2) ,"+r" (p) : : "cc", "memory"); return (ret); } static __inline u_long atomic_load_acq_long(volatile u_long *p) { u_long v; v = *p; __do_dmb(); return (v); } static __inline void atomic_store_rel_long(volatile u_long *p, u_long v) { __do_dmb(); *p = v; } #else /* < armv6 */ #define __with_interrupts_disabled(expr) \ do { \ u_int cpsr_save, tmp; \ \ __asm __volatile( \ "mrs %0, cpsr;" \ "orr %1, %0, %2;" \ "msr cpsr_fsxc, %1;" \ : "=r" (cpsr_save), "=r" (tmp) \ : "I" (PSR_I | PSR_F) \ : "cc" ); \ (expr); \ __asm __volatile( \ "msr cpsr_fsxc, %0" \ : /* no output */ \ : "r" (cpsr_save) \ : "cc" ); \ } while(0) static __inline uint32_t __swp(uint32_t val, volatile uint32_t *ptr) { __asm __volatile("swp %0, %2, [%3]" : "=&r" (val), "=m" (*ptr) : "r" (val), "r" (ptr), "m" (*ptr) : "memory"); return (val); } #ifdef _KERNEL #define ARM_HAVE_ATOMIC64 static __inline void atomic_set_32(volatile uint32_t *address, uint32_t setmask) { __with_interrupts_disabled(*address |= setmask); } static __inline void atomic_set_64(volatile uint64_t *address, uint64_t setmask) { __with_interrupts_disabled(*address |= setmask); } static __inline void atomic_clear_32(volatile uint32_t *address, uint32_t clearmask) { __with_interrupts_disabled(*address &= ~clearmask); } static __inline void atomic_clear_64(volatile uint64_t *address, uint64_t clearmask) { __with_interrupts_disabled(*address &= ~clearmask); } static __inline u_int32_t atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval) { int ret; __with_interrupts_disabled( { if (*p == cmpval) { *p = newval; ret = 1; } else { ret = 0; } }); return (ret); } static __inline u_int64_t atomic_cmpset_64(volatile u_int64_t *p, volatile u_int64_t cmpval, volatile u_int64_t newval) { int ret; __with_interrupts_disabled( { if (*p == cmpval) { *p = newval; ret = 1; } else { ret = 0; } }); return (ret); } static __inline void atomic_add_32(volatile u_int32_t *p, u_int32_t val) { __with_interrupts_disabled(*p += val); } static __inline void atomic_add_64(volatile u_int64_t *p, u_int64_t val) { __with_interrupts_disabled(*p += val); } static __inline void atomic_subtract_32(volatile u_int32_t *p, u_int32_t val) { __with_interrupts_disabled(*p -= val); } static __inline void atomic_subtract_64(volatile u_int64_t *p, u_int64_t val) { __with_interrupts_disabled(*p -= val); } static __inline uint32_t atomic_fetchadd_32(volatile uint32_t *p, uint32_t v) { uint32_t value; __with_interrupts_disabled( { value = *p; *p += v; }); return (value); } static __inline uint64_t atomic_fetchadd_64(volatile uint64_t *p, uint64_t v) { uint64_t value; __with_interrupts_disabled( { value = *p; *p += v; }); return (value); } static __inline uint64_t atomic_load_64(volatile uint64_t *p) { uint64_t value; __with_interrupts_disabled(value = *p); return (value); } static __inline void atomic_store_64(volatile uint64_t *p, uint64_t value) { __with_interrupts_disabled(*p = value); } #else /* !_KERNEL */ static __inline u_int32_t atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval) { register int done, ras_start = ARM_RAS_START; __asm __volatile("1:\n" "adr %1, 1b\n" "str %1, [%0]\n" "adr %1, 2f\n" "str %1, [%0, #4]\n" "ldr %1, [%2]\n" "cmp %1, %3\n" "streq %4, [%2]\n" "2:\n" "mov %1, #0\n" "str %1, [%0]\n" "mov %1, #0xffffffff\n" "str %1, [%0, #4]\n" "moveq %1, #1\n" "movne %1, #0\n" : "+r" (ras_start), "=r" (done) ,"+r" (p), "+r" (cmpval), "+r" (newval) : : "cc", "memory"); return (done); } static __inline void atomic_add_32(volatile u_int32_t *p, u_int32_t val) { int start, ras_start = ARM_RAS_START; __asm __volatile("1:\n" "adr %1, 1b\n" "str %1, [%0]\n" "adr %1, 2f\n" "str %1, [%0, #4]\n" "ldr %1, [%2]\n" "add %1, %1, %3\n" "str %1, [%2]\n" "2:\n" "mov %1, #0\n" "str %1, [%0]\n" "mov %1, #0xffffffff\n" "str %1, [%0, #4]\n" : "+r" (ras_start), "=r" (start), "+r" (p), "+r" (val) : : "memory"); } static __inline void atomic_subtract_32(volatile u_int32_t *p, u_int32_t val) { int start, ras_start = ARM_RAS_START; __asm __volatile("1:\n" "adr %1, 1b\n" "str %1, [%0]\n" "adr %1, 2f\n" "str %1, [%0, #4]\n" "ldr %1, [%2]\n" "sub %1, %1, %3\n" "str %1, [%2]\n" "2:\n" "mov %1, #0\n" "str %1, [%0]\n" "mov %1, #0xffffffff\n" "str %1, [%0, #4]\n" : "+r" (ras_start), "=r" (start), "+r" (p), "+r" (val) : : "memory"); } static __inline void atomic_set_32(volatile uint32_t *address, uint32_t setmask) { int start, ras_start = ARM_RAS_START; __asm __volatile("1:\n" "adr %1, 1b\n" "str %1, [%0]\n" "adr %1, 2f\n" "str %1, [%0, #4]\n" "ldr %1, [%2]\n" "orr %1, %1, %3\n" "str %1, [%2]\n" "2:\n" "mov %1, #0\n" "str %1, [%0]\n" "mov %1, #0xffffffff\n" "str %1, [%0, #4]\n" : "+r" (ras_start), "=r" (start), "+r" (address), "+r" (setmask) : : "memory"); } static __inline void atomic_clear_32(volatile uint32_t *address, uint32_t clearmask) { int start, ras_start = ARM_RAS_START; __asm __volatile("1:\n" "adr %1, 1b\n" "str %1, [%0]\n" "adr %1, 2f\n" "str %1, [%0, #4]\n" "ldr %1, [%2]\n" "bic %1, %1, %3\n" "str %1, [%2]\n" "2:\n" "mov %1, #0\n" "str %1, [%0]\n" "mov %1, #0xffffffff\n" "str %1, [%0, #4]\n" : "+r" (ras_start), "=r" (start), "+r" (address), "+r" (clearmask) : : "memory"); } static __inline uint32_t atomic_fetchadd_32(volatile uint32_t *p, uint32_t v) { uint32_t start, tmp, ras_start = ARM_RAS_START; __asm __volatile("1:\n" "adr %1, 1b\n" "str %1, [%0]\n" "adr %1, 2f\n" "str %1, [%0, #4]\n" "ldr %1, [%3]\n" "mov %2, %1\n" "add %2, %2, %4\n" "str %2, [%3]\n" "2:\n" "mov %2, #0\n" "str %2, [%0]\n" "mov %2, #0xffffffff\n" "str %2, [%0, #4]\n" : "+r" (ras_start), "=r" (start), "=r" (tmp), "+r" (p), "+r" (v) : : "memory"); return (start); } #endif /* _KERNEL */ static __inline uint32_t atomic_readandclear_32(volatile u_int32_t *p) { return (__swp(0, p)); } #define atomic_cmpset_rel_32 atomic_cmpset_32 #define atomic_cmpset_acq_32 atomic_cmpset_32 #define atomic_set_rel_32 atomic_set_32 #define atomic_set_acq_32 atomic_set_32 #define atomic_clear_rel_32 atomic_clear_32 #define atomic_clear_acq_32 atomic_clear_32 #define atomic_add_rel_32 atomic_add_32 #define atomic_add_acq_32 atomic_add_32 #define atomic_subtract_rel_32 atomic_subtract_32 #define atomic_subtract_acq_32 atomic_subtract_32 #define atomic_store_rel_32 atomic_store_32 #define atomic_store_rel_long atomic_store_long #define atomic_load_acq_32 atomic_load_32 #define atomic_load_acq_long atomic_load_long #define atomic_add_acq_long atomic_add_long #define atomic_add_rel_long atomic_add_long #define atomic_subtract_acq_long atomic_subtract_long #define atomic_subtract_rel_long atomic_subtract_long #define atomic_clear_acq_long atomic_clear_long #define atomic_clear_rel_long atomic_clear_long #define atomic_set_acq_long atomic_set_long #define atomic_set_rel_long atomic_set_long #define atomic_cmpset_acq_long atomic_cmpset_long #define atomic_cmpset_rel_long atomic_cmpset_long #define atomic_load_acq_long atomic_load_long #undef __with_interrupts_disabled static __inline void atomic_add_long(volatile u_long *p, u_long v) { atomic_add_32((volatile uint32_t *)p, v); } static __inline void atomic_clear_long(volatile u_long *p, u_long v) { atomic_clear_32((volatile uint32_t *)p, v); } static __inline int atomic_cmpset_long(volatile u_long *dst, u_long old, u_long newe) { return (atomic_cmpset_32((volatile uint32_t *)dst, old, newe)); } static __inline u_long atomic_fetchadd_long(volatile u_long *p, u_long v) { return (atomic_fetchadd_32((volatile uint32_t *)p, v)); } static __inline void atomic_readandclear_long(volatile u_long *p) { atomic_readandclear_32((volatile uint32_t *)p); } static __inline void atomic_set_long(volatile u_long *p, u_long v) { atomic_set_32((volatile uint32_t *)p, v); } static __inline void atomic_subtract_long(volatile u_long *p, u_long v) { atomic_subtract_32((volatile uint32_t *)p, v); } #endif /* Arch >= v6 */ static __inline int atomic_load_32(volatile uint32_t *v) { return (*v); } static __inline void atomic_store_32(volatile uint32_t *dst, uint32_t src) { *dst = src; } static __inline int atomic_load_long(volatile u_long *v) { return (*v); } static __inline void atomic_store_long(volatile u_long *dst, u_long src) { *dst = src; } #define atomic_clear_ptr atomic_clear_32 #define atomic_set_ptr atomic_set_32 #define atomic_cmpset_ptr atomic_cmpset_32 #define atomic_cmpset_rel_ptr atomic_cmpset_rel_32 #define atomic_cmpset_acq_ptr atomic_cmpset_acq_32 #define atomic_store_ptr atomic_store_32 #define atomic_store_rel_ptr atomic_store_rel_32 #define atomic_add_int atomic_add_32 #define atomic_add_acq_int atomic_add_acq_32 #define atomic_add_rel_int atomic_add_rel_32 #define atomic_subtract_int atomic_subtract_32 #define atomic_subtract_acq_int atomic_subtract_acq_32 #define atomic_subtract_rel_int atomic_subtract_rel_32 #define atomic_clear_int atomic_clear_32 #define atomic_clear_acq_int atomic_clear_acq_32 #define atomic_clear_rel_int atomic_clear_rel_32 #define atomic_set_int atomic_set_32 #define atomic_set_acq_int atomic_set_acq_32 #define atomic_set_rel_int atomic_set_rel_32 #define atomic_cmpset_int atomic_cmpset_32 #define atomic_cmpset_acq_int atomic_cmpset_acq_32 #define atomic_cmpset_rel_int atomic_cmpset_rel_32 #define atomic_fetchadd_int atomic_fetchadd_32 #define atomic_readandclear_int atomic_readandclear_32 #define atomic_load_acq_int atomic_load_acq_32 #define atomic_store_rel_int atomic_store_rel_32 #endif /* _MACHINE_ATOMIC_H_ */ Index: stable/10 =================================================================== --- stable/10 (revision 283316) +++ stable/10 (revision 283317) Property changes on: stable/10 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r278770,279114,279215,279338,279543