Index: sys/arm/include/atomic-v4.h =================================================================== --- sys/arm/include/atomic-v4.h +++ sys/arm/include/atomic-v4.h @@ -521,8 +521,10 @@ #define atomic_fcmpset_rel_32 atomic_fcmpset_32 #define atomic_fcmpset_acq_32 atomic_fcmpset_32 #ifdef _KERNEL +#define atomic_fcmpset_8 atomic_fcmpset_8 #define atomic_fcmpset_rel_8 atomic_fcmpset_8 #define atomic_fcmpset_acq_8 atomic_fcmpset_8 +#define atomic_fcmpset_16 atomic_fcmpset_16 #define atomic_fcmpset_rel_16 atomic_fcmpset_16 #define atomic_fcmpset_acq_16 atomic_fcmpset_16 #define atomic_fcmpset_rel_64 atomic_fcmpset_64 @@ -533,8 +535,10 @@ #define atomic_cmpset_rel_32 atomic_cmpset_32 #define atomic_cmpset_acq_32 atomic_cmpset_32 #ifdef _KERNEL +#define atomic_cmpset_8 atomic_cmpset_8 #define atomic_cmpset_rel_8 atomic_cmpset_8 #define atomic_cmpset_acq_8 atomic_cmpset_8 +#define atomic_cmpset_16 atomic_cmpset_16 #define atomic_cmpset_rel_16 atomic_cmpset_16 #define atomic_cmpset_acq_16 atomic_cmpset_16 #define atomic_cmpset_rel_64 atomic_cmpset_64 Index: sys/arm/include/atomic-v6.h =================================================================== --- sys/arm/include/atomic-v6.h +++ sys/arm/include/atomic-v6.h @@ -245,6 +245,7 @@ ATOMIC_FCMPSET_CODE(ret, uint8_t, "b"); return (ret); } +#define atomic_fcmpset_8 atomic_fcmpset_8 static __inline int atomic_fcmpset_acq_8(volatile uint8_t *_ptr, uint8_t *_old, uint8_t _new) @@ -274,6 +275,7 @@ ATOMIC_FCMPSET_CODE(ret, uint16_t, "h"); return (ret); } +#define atomic_fcmpset_16 atomic_fcmpset_16 static __inline int atomic_fcmpset_acq_16(volatile uint16_t *_ptr, uint16_t *_old, uint16_t _new) @@ -429,6 +431,7 @@ ATOMIC_CMPSET_CODE(ret, "b"); return (ret); } +#define atomic_cmpset_8 atomic_cmpset_8 static __inline int atomic_cmpset_acq_8(volatile uint8_t *_ptr, uint8_t _old, uint8_t _new) @@ -458,6 +461,7 @@ ATOMIC_CMPSET_CODE(ret, "h"); return (ret); } +#define atomic_cmpset_16 atomic_cmpset_16 static __inline int atomic_cmpset_acq_16(volatile uint16_t *_ptr, uint16_t _old, uint16_t _new) @@ -890,6 +894,7 @@ return (atomic_testandset_32((volatile uint32_t *)p, v)); } +#define atomic_testandset_long atomic_testandset_long static __inline int atomic_testandset_64(volatile uint64_t *p, u_int v) Index: sys/arm/include/atomic.h =================================================================== --- sys/arm/include/atomic.h +++ sys/arm/include/atomic.h @@ -103,4 +103,6 @@ #define atomic_store_rel_int atomic_store_rel_32 #define atomic_swap_int atomic_swap_32 +#include + #endif /* _MACHINE_ATOMIC_H_ */ Index: sys/kern/subr_csan.c =================================================================== --- sys/kern/subr_csan.c +++ sys/kern/subr_csan.c @@ -540,7 +540,7 @@ CSAN_ATOMIC_FUNC_CLEAR(8, uint8_t) CSAN_ATOMIC_FUNC_CMPSET(8, uint8_t) CSAN_ATOMIC_FUNC_FCMPSET(8, uint8_t) -_CSAN_ATOMIC_FUNC_LOAD(8, uint8_t) +CSAN_ATOMIC_FUNC_LOAD(8, uint8_t) CSAN_ATOMIC_FUNC_SET(8, uint8_t) CSAN_ATOMIC_FUNC_SUBTRACT(8, uint8_t) _CSAN_ATOMIC_FUNC_STORE(8, uint8_t) @@ -556,11 +556,7 @@ CSAN_ATOMIC_FUNC_CLEAR(16, uint16_t) CSAN_ATOMIC_FUNC_CMPSET(16, uint16_t) CSAN_ATOMIC_FUNC_FCMPSET(16, uint16_t) -#if defined(__aarch64__) -_CSAN_ATOMIC_FUNC_LOAD(16, uint16_t) -#else CSAN_ATOMIC_FUNC_LOAD(16, uint16_t) -#endif CSAN_ATOMIC_FUNC_SET(16, uint16_t) CSAN_ATOMIC_FUNC_SUBTRACT(16, uint16_t) _CSAN_ATOMIC_FUNC_STORE(16, uint16_t) Index: sys/sys/_atomic_subword.h =================================================================== --- sys/sys/_atomic_subword.h +++ sys/sys/_atomic_subword.h @@ -41,6 +41,9 @@ #endif #include +#ifndef _KERNEL +#include +#endif #ifndef NBBY #define NBBY 8 @@ -113,6 +116,7 @@ } #endif +#ifndef atomic_cmpset_8 static __inline int atomic_cmpset_8(__volatile uint8_t *addr, uint8_t old, uint8_t val) { @@ -123,7 +127,9 @@ return (_atomic_cmpset_masked_word(_ATOMIC_WORD_ALIGNED(addr), old << shift, val << shift, 0xff << shift)); } +#endif +#ifndef atomic_fcmpset_8 static __inline int atomic_fcmpset_8(__volatile uint8_t *addr, uint8_t *old, uint8_t val) { @@ -138,7 +144,9 @@ *old = (wold >> shift) & 0xff; return (ret); } +#endif +#ifndef atomic_cmpset_16 static __inline int atomic_cmpset_16(__volatile uint16_t *addr, uint16_t old, uint16_t val) { @@ -149,7 +157,9 @@ return (_atomic_cmpset_masked_word(_ATOMIC_WORD_ALIGNED(addr), old << shift, val << shift, 0xffff << shift)); } +#endif +#ifndef atomic_fcmpset_16 static __inline int atomic_fcmpset_16(__volatile uint16_t *addr, uint16_t *old, uint16_t val) { @@ -164,9 +174,83 @@ *old = (wold >> shift) & 0xffff; return (ret); } +#endif + +#ifndef atomic_load_acq_8 +static __inline uint8_t +atomic_load_acq_8(volatile uint8_t *p) +{ + int shift; + uint8_t ret; + + shift = _ATOMIC_BYTE_SHIFT(p); + ret = (atomic_load_acq_32(_ATOMIC_WORD_ALIGNED(p)) >> shift) & 0xff; + return (ret); +} +#endif + +#ifndef atomic_load_acq_16 +static __inline uint16_t +atomic_load_acq_16(volatile uint16_t *p) +{ + int shift; + uint16_t ret; + + shift = _ATOMIC_HWORD_SHIFT(p); + ret = (atomic_load_acq_32(_ATOMIC_WORD_ALIGNED(p)) >> shift) & + 0xffff; + return (ret); +} +#endif #undef _ATOMIC_WORD_ALIGNED #undef _ATOMIC_BYTE_SHIFT #undef _ATOMIC_HWORD_SHIFT +/* + * Provide generic testandset_long implementation based on fcmpset long + * primitive. It may not be ideal for any given arch, so machine/atomic.h + * should define the macro atomic_testandset_long to override with an + * MD-specific version. + * + * (Organizationally, this isn't really subword atomics. But atomic_common is + * included too early in machine/atomic.h, so it isn't a good place for derived + * primitives like this.) + */ +#ifndef atomic_testandset_long +static __inline int +atomic_testandset_long(volatile u_long *p, u_int v) +{ + u_long bit, old; + bool ret; + + bit = (1ul << (v % (sizeof(*p) * NBBY))); + + old = atomic_load_acq_long(p); + ret = false; + while (!ret && (old & bit) == 0) + ret = atomic_fcmpset_acq_long(p, &old, old | bit); + + return (!ret); +} +#endif + +#ifndef atomic_testandclear_long +static __inline int +atomic_testandclear_long(volatile u_long *p, u_int v) +{ + u_long bit, old; + bool ret; + + bit = (1ul << (v % (sizeof(*p) * NBBY))); + + old = atomic_load_acq_long(p); + ret = false; + while (!ret && (old & bit) != 0) + ret = atomic_fcmpset_acq_long(p, &old, old & ~bit); + + return (ret); +} +#endif + #endif /* _SYS__ATOMIC_SUBWORD_H_ */