Index: sys/sys/atomic_common.h =================================================================== --- sys/sys/atomic_common.h +++ sys/sys/atomic_common.h @@ -36,38 +36,97 @@ #error do not include this header, use machine/atomic.h #endif -#define atomic_load_char(p) (*(volatile u_char *)(p)) -#define atomic_load_short(p) (*(volatile u_short *)(p)) -#define atomic_load_int(p) (*(volatile u_int *)(p)) -#define atomic_load_long(p) (*(volatile u_long *)(p)) -#define atomic_load_ptr(p) (*(volatile __typeof(*p) *)(p)) -#define atomic_load_8(p) (*(volatile uint8_t *)(p)) -#define atomic_load_16(p) (*(volatile uint16_t *)(p)) -#define atomic_load_32(p) (*(volatile uint32_t *)(p)) -#ifdef _LP64 -#define atomic_load_64(p) (*(volatile uint64_t *)(p)) -#endif +#include +#include + +#define __atomic_load_char(p) (*(volatile u_char *)(p)) +#define __atomic_load_short(p) (*(volatile u_short *)(p)) +#define __atomic_load_int(p) (*(volatile u_int *)(p)) +#define __atomic_load_long(p) (*(volatile u_long *)(p)) +#define __atomic_load_8(p) (*(volatile uint8_t *)(p)) +#define __atomic_load_16(p) (*(volatile uint16_t *)(p)) +#define __atomic_load_32(p) (*(volatile uint32_t *)(p)) +#define __atomic_load_64(p) (*(volatile uint64_t *)(p)) -#define atomic_store_char(p, v) \ +#define __atomic_store_char(p, v) \ (*(volatile u_char *)(p) = (u_char)(v)) -#define atomic_store_short(p, v) \ +#define __atomic_store_short(p, v) \ (*(volatile u_short *)(p) = (u_short)(v)) -#define atomic_store_int(p, v) \ +#define __atomic_store_int(p, v) \ (*(volatile u_int *)(p) = (u_int)(v)) -#define atomic_store_long(p, v) \ +#define __atomic_store_long(p, v) \ (*(volatile u_long *)(p) = (u_long)(v)) -#define atomic_store_ptr(p, v) \ - (*(volatile __typeof(*p) *)(p) = (v)) -#define atomic_store_8(p, v) \ +#define __atomic_store_8(p, v) \ (*(volatile uint8_t *)(p) = (uint8_t)(v)) -#define atomic_store_16(p, v) \ +#define __atomic_store_16(p, v) \ (*(volatile uint16_t *)(p) = (uint16_t)(v)) -#define atomic_store_32(p, v) \ +#define __atomic_store_32(p, v) \ (*(volatile uint32_t *)(p) = (uint32_t)(v)) -#ifdef _LP64 -#define atomic_store_64(p, v) \ +#define __atomic_store_64(p, v) \ (*(volatile uint64_t *)(p) = (uint64_t)(v)) + +/* + * When _Generic is available, try to provide some type checking. + */ +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \ + __has_extension(c_generic_selections) +#define __atomic_load_generic(p, t, ut, n) \ + _Generic(*(p), \ + t: __atomic_load_ ## n (p), ut: __atomic_load_ ## n (p)) +#define atomic_load_char(p) __atomic_load_generic(p, char, u_char, char) +#define atomic_load_short(p) __atomic_load_generic(p, short, u_short, short) +#define atomic_load_int(p) __atomic_load_generic(p, int, u_int, int) +#define atomic_load_long(p) __atomic_load_generic(p, long, u_long, long) +#define atomic_load_8(p) __atomic_load_generic(p, int8_t, uint8_t, 8) +#define atomic_load_16(p) __atomic_load_generic(p, int16_t, uint16_t, 16) +#define atomic_load_32(p) __atomic_load_generic(p, int32_t, uint32_t, 32) +#ifdef __LP64__ +#define atomic_load_64(p) __atomic_load_generic(p, int64_t, uint64_t, 64) +#endif +#define __atomic_store_generic(p, v, t, ut, n) \ + _Generic(*(p), \ + t: __atomic_store_ ## n (p, v), ut: __atomic_store_ ## n (p, v)) +#define atomic_store_char(p, v) \ + __atomic_store_generic(p, v, char, u_char, char) +#define atomic_store_short(p, v) \ + __atomic_store_generic(p, v, short, u_short, short) +#define atomic_store_int(p, v) \ + __atomic_store_generic(p, v, int, u_int, int) +#define atomic_store_long(p, v) \ + __atomic_store_generic(p, v, long, u_long, long) +#define atomic_store_8(p, v) \ + __atomic_store_generic(p, v, int8_t, uint8_t, 8) +#define atomic_store_16(p, v) \ + __atomic_store_generic(p, v, int16_t, uint16_t, 16) +#define atomic_store_32(p, v) \ + __atomic_store_generic(p, v, int32_t, uint32_t, 32) +#define atomic_store_64(p, v) \ + __atomic_store_generic(p, v, int64_t, uint64_t, 64) +#else /* !C11 */ +#define atomic_load_char(p) __atomic_load_char(p) +#define atomic_load_short(p) __atomic_load_short(p) +#define atomic_load_int(p) __atomic_load_int(p) +#define atomic_load_long(p) __atomic_load_long(p) +#define atomic_load_8(p) __atomic_load_8(p) +#define atomic_load_16(p) __atomic_load_16(p) +#define atomic_load_32(p) __atomic_load_32(p) +#ifdef __LP64__ +#define atomic_load_64(p) __atomic_load_64(p) #endif +#define atomic_store_char(p, v) __atomic_store_char(p, v) +#define atomic_store_short(p, v) __atomic_store_short(p, v) +#define atomic_store_int(p, v) __atomic_store_int(p, v) +#define atomic_store_long(p, v) __atomic_store_long(p, v) +#define atomic_store_8(p, v) __atomic_store_8(p, v) +#define atomic_store_16(p, v) __atomic_store_16(p, v) +#define atomic_store_32(p, v) __atomic_store_32(p, v) +#ifdef __LP64__ +#define atomic_store_64(p, v) __atomic_store_64(p, v) +#endif +#endif /* C11 */ + +#define atomic_load_ptr(p) (*(volatile __typeof(*p) *)(p)) +#define atomic_store_ptr(p, v) (*(volatile __typeof(*p) *)(p) = (v)) /* * Currently all architectures provide acquire and release fences on their own,