Changeset View
Changeset View
Standalone View
Standalone View
sys/sys/atomic_common.h
Show All 30 Lines | |||||
*/ | */ | ||||
#ifndef _SYS_ATOMIC_COMMON_H_ | #ifndef _SYS_ATOMIC_COMMON_H_ | ||||
#define _SYS_ATOMIC_COMMON_H_ | #define _SYS_ATOMIC_COMMON_H_ | ||||
#ifndef _MACHINE_ATOMIC_H_ | #ifndef _MACHINE_ATOMIC_H_ | ||||
#error do not include this header, use machine/atomic.h | #error do not include this header, use machine/atomic.h | ||||
#endif | #endif | ||||
#define atomic_load_char(p) (*(volatile u_char *)(p)) | #include <sys/cdefs.h> | ||||
#define atomic_load_short(p) (*(volatile u_short *)(p)) | #include <sys/types.h> | ||||
#define atomic_load_int(p) (*(volatile u_int *)(p)) | |||||
#define atomic_load_long(p) (*(volatile u_long *)(p)) | #define __atomic_load_char_relaxed(p) (*(volatile u_char *)(p)) | ||||
#define atomic_load_ptr(p) (*(volatile __typeof(*p) *)(p)) | #define __atomic_load_short_relaxed(p) (*(volatile u_short *)(p)) | ||||
#define atomic_load_8(p) (*(volatile uint8_t *)(p)) | #define __atomic_load_int_relaxed(p) (*(volatile u_int *)(p)) | ||||
#define atomic_load_16(p) (*(volatile uint16_t *)(p)) | #define __atomic_load_long_relaxed(p) (*(volatile u_long *)(p)) | ||||
#define atomic_load_32(p) (*(volatile uint32_t *)(p)) | #define __atomic_load_8_relaxed(p) (*(volatile uint8_t *)(p)) | ||||
#ifdef _LP64 | #define __atomic_load_16_relaxed(p) (*(volatile uint16_t *)(p)) | ||||
#define atomic_load_64(p) (*(volatile uint64_t *)(p)) | #define __atomic_load_32_relaxed(p) (*(volatile uint32_t *)(p)) | ||||
#define __atomic_load_64_relaxed(p) (*(volatile uint64_t *)(p)) | |||||
#define __atomic_store_char_relaxed(p, v) \ | |||||
(*(volatile u_char *)(p) = (u_char)(v)) | |||||
#define __atomic_store_short_relaxed(p, v) \ | |||||
(*(volatile u_short *)(p) = (u_short)(v)) | |||||
#define __atomic_store_int_relaxed(p, v) \ | |||||
(*(volatile u_int *)(p) = (u_int)(v)) | |||||
#define __atomic_store_long_relaxed(p, v) \ | |||||
(*(volatile u_long *)(p) = (u_long)(v)) | |||||
#define __atomic_store_8_relaxed(p, v) \ | |||||
(*(volatile uint8_t *)(p) = (uint8_t)(v)) | |||||
#define __atomic_store_16_relaxed(p, v) \ | |||||
(*(volatile uint16_t *)(p) = (uint16_t)(v)) | |||||
#define __atomic_store_32_relaxed(p, v) \ | |||||
(*(volatile uint32_t *)(p) = (uint32_t)(v)) | |||||
#define __atomic_store_64_relaxed(p, v) \ | |||||
(*(volatile uint64_t *)(p) = (uint64_t)(v)) | |||||
/* | |||||
* When _Generic is available, try to provide some type checking. | |||||
*/ | |||||
#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \ | |||||
__has_extension(c_generic_selections) | |||||
#define __atomic_load_generic(p, t, ut, n) \ | |||||
_Generic(*(p), \ | |||||
t: __atomic_load_ ## n ## _relaxed(p), ut: __atomic_load_ ## n ## _relaxed(p)) | |||||
#define __atomic_store_generic(p, v, t, ut, n) \ | |||||
_Generic(*(p), \ | |||||
t: __atomic_store_ ## n ## _relaxed(p, v), ut: __atomic_store_ ## n ## _relaxed(p, v)) | |||||
#else | |||||
#define __atomic_load_generic(p, t, ut, n) __atomic_load_ ## n ## _relaxed(p) | |||||
#define __atomic_store_generic(p, v, t, ut, n) __atomic_store_ ## n ## _relaxed(p, v) | |||||
#endif | #endif | ||||
#define atomic_load_char(p) __atomic_load_generic(p, char, u_char, char) | |||||
#define atomic_load_short(p) __atomic_load_generic(p, short, u_short, short) | |||||
#define atomic_load_int(p) __atomic_load_generic(p, int, u_int, int) | |||||
#define atomic_load_long(p) __atomic_load_generic(p, long, u_long, long) | |||||
#define atomic_load_8(p) __atomic_load_generic(p, int8_t, uint8_t, 8) | |||||
#define atomic_load_16(p) __atomic_load_generic(p, int16_t, uint16_t, 16) | |||||
#define atomic_load_32(p) __atomic_load_generic(p, int32_t, uint32_t, 32) | |||||
#ifdef __LP64__ | |||||
#define atomic_load_64(p) __atomic_load_generic(p, int64_t, uint64_t, 64) | |||||
#endif | |||||
#define atomic_store_char(p, v) \ | #define atomic_store_char(p, v) \ | ||||
(*(volatile u_char *)(p) = (u_char)(v)) | __atomic_store_generic(p, v, char, u_char, char) | ||||
#define atomic_store_short(p, v) \ | #define atomic_store_short(p, v) \ | ||||
(*(volatile u_short *)(p) = (u_short)(v)) | __atomic_store_generic(p, v, short, u_short, short) | ||||
#define atomic_store_int(p, v) \ | #define atomic_store_int(p, v) \ | ||||
(*(volatile u_int *)(p) = (u_int)(v)) | __atomic_store_generic(p, v, int, u_int, int) | ||||
#define atomic_store_long(p, v) \ | #define atomic_store_long(p, v) \ | ||||
(*(volatile u_long *)(p) = (u_long)(v)) | __atomic_store_generic(p, v, long, u_long, long) | ||||
#define atomic_store_ptr(p, v) \ | |||||
(*(volatile __typeof(*p) *)(p) = (v)) | |||||
#define atomic_store_8(p, v) \ | #define atomic_store_8(p, v) \ | ||||
(*(volatile uint8_t *)(p) = (uint8_t)(v)) | __atomic_store_generic(p, v, int8_t, uint8_t, 8) | ||||
jrtc27: You've lost the LP64 guard on just this one | |||||
#define atomic_store_16(p, v) \ | #define atomic_store_16(p, v) \ | ||||
(*(volatile uint16_t *)(p) = (uint16_t)(v)) | __atomic_store_generic(p, v, int16_t, uint16_t, 16) | ||||
#define atomic_store_32(p, v) \ | #define atomic_store_32(p, v) \ | ||||
Done Inline ActionsWhy not just do: #define __atomic_load_generic(p, t, ut, n) __atomic_load_ ## n (p) #define __atomic_store_generic(p, v, t, ut, n) __atomic_load_ ## n (p) in the !C11 case and avoid duplicating all the per-type defines? jrtc27: Why not just do:
```
#define __atomic_load_generic(p, t, ut, n) __atomic_load_ ## n (p)
#define… | |||||
Done Inline ActionsUh #define __atomic_load_generic(p, t, ut, n) __atomic_load_ ## n (p) #define __atomic_store_generic(p, v, t, ut, n) __atomic_store_ ## n (p, v) of course... jrtc27: Uh
```
#define __atomic_load_generic(p, t, ut, n) __atomic_load_ ## n (p)
#define… | |||||
(*(volatile uint32_t *)(p) = (uint32_t)(v)) | __atomic_store_generic(p, v, int32_t, uint32_t, 32) | ||||
#ifdef _LP64 | #ifdef __LP64__ | ||||
#define atomic_store_64(p, v) \ | #define atomic_store_64(p, v) \ | ||||
(*(volatile uint64_t *)(p) = (uint64_t)(v)) | __atomic_store_generic(p, v, int64_t, uint64_t, 64) | ||||
#endif | #endif | ||||
#define atomic_load_ptr(p) (*(volatile __typeof(*p) *)(p)) | |||||
#define atomic_store_ptr(p, v) (*(volatile __typeof(*p) *)(p) = (v)) | |||||
/* | /* | ||||
* Currently all architectures provide acquire and release fences on their own, | * Currently all architectures provide acquire and release fences on their own, | ||||
* but they don't provide consume. Kludge below allows relevant code to stop | * but they don't provide consume. Kludge below allows relevant code to stop | ||||
* openly resorting to the stronger acquire fence, to be sorted out. | * openly resorting to the stronger acquire fence, to be sorted out. | ||||
*/ | */ | ||||
#define atomic_load_consume_ptr(p) \ | #define atomic_load_consume_ptr(p) \ | ||||
((__typeof(*p)) atomic_load_acq_ptr((uintptr_t *)p)) | ((__typeof(*p)) atomic_load_acq_ptr((uintptr_t *)p)) | ||||
#define atomic_interrupt_fence() __compiler_membar() | #define atomic_interrupt_fence() __compiler_membar() | ||||
#endif | #endif |
You've lost the LP64 guard on just this one