diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h --- a/sys/amd64/include/atomic.h +++ b/sys/amd64/include/atomic.h @@ -304,7 +304,7 @@ #define ATOMIC_LOAD(TYPE) \ static __inline u_##TYPE \ -atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ +atomic_load_acq_##TYPE(const volatile u_##TYPE *p) \ { \ u_##TYPE res; \ \ diff --git a/sys/arm/include/atomic.h b/sys/arm/include/atomic.h --- a/sys/arm/include/atomic.h +++ b/sys/arm/include/atomic.h @@ -608,7 +608,7 @@ } static __inline uint32_t -atomic_load_acq_32(volatile uint32_t *p) +atomic_load_acq_32(const volatile uint32_t *p) { uint32_t v; @@ -618,7 +618,7 @@ } static __inline uint64_t -atomic_load_64(volatile uint64_t *p) +atomic_load_64(const volatile uint64_t *p) { uint64_t ret; @@ -637,7 +637,7 @@ } static __inline uint64_t -atomic_load_acq_64(volatile uint64_t *p) +atomic_load_acq_64(const volatile uint64_t *p) { uint64_t ret; @@ -647,7 +647,7 @@ } static __inline u_long -atomic_load_acq_long(volatile u_long *p) +atomic_load_acq_long(const volatile u_long *p) { u_long v; diff --git a/sys/arm64/include/atomic.h b/sys/arm64/include/atomic.h --- a/sys/arm64/include/atomic.h +++ b/sys/arm64/include/atomic.h @@ -465,7 +465,7 @@ #define _ATOMIC_LOAD_ACQ_IMPL(t, w, s) \ static __inline uint##t##_t \ -atomic_load_acq_##t(volatile uint##t##_t *p) \ +atomic_load_acq_##t(const volatile uint##t##_t *p) \ { \ uint##t##_t ret; \ \ diff --git a/sys/i386/include/atomic.h b/sys/i386/include/atomic.h --- a/sys/i386/include/atomic.h +++ b/sys/i386/include/atomic.h @@ -249,7 +249,7 @@ #define ATOMIC_LOAD(TYPE) \ static __inline u_##TYPE \ -atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ +atomic_load_acq_##TYPE(const volatile u_##TYPE *p) \ { \ u_##TYPE res; \ \ @@ -302,8 +302,8 @@ #ifdef WANT_FUNCTIONS int atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t); int atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t); -uint64_t atomic_load_acq_64_i386(volatile uint64_t *); -uint64_t atomic_load_acq_64_i586(volatile uint64_t *); +uint64_t atomic_load_acq_64_i386(const volatile uint64_t *); +uint64_t atomic_load_acq_64_i586(const volatile uint64_t *); void atomic_store_rel_64_i386(volatile uint64_t *, uint64_t); void atomic_store_rel_64_i586(volatile uint64_t *, uint64_t); uint64_t atomic_swap_64_i386(volatile uint64_t *, uint64_t); @@ -353,7 +353,7 @@ } static __inline uint64_t -atomic_load_acq_64_i386(volatile uint64_t *p) +atomic_load_acq_64_i386(const volatile uint64_t *p) { volatile uint32_t *q; uint64_t res; @@ -448,7 +448,7 @@ } static __inline uint64_t -atomic_load_acq_64_i586(volatile uint64_t *p) +atomic_load_acq_64_i586(const volatile uint64_t *p) { uint64_t res; @@ -457,7 +457,7 @@ " movl %%ecx,%%edx ; " " lock; cmpxchg8b %1" : "=&A" (res), /* 0 */ - "+m" (*p) /* 1 */ + "m" (*p) /* 1 */ : : "memory", "cc"); return (res); } @@ -514,7 +514,7 @@ } static __inline uint64_t -atomic_load_acq_64(volatile uint64_t *p) +atomic_load_acq_64(const volatile uint64_t *p) { if ((cpu_feature & CPUID_CX8) == 0) @@ -842,7 +842,7 @@ #define atomic_subtract_rel_ptr(p, v) \ atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v)) #define atomic_load_acq_ptr(p) \ - atomic_load_acq_int((volatile u_int *)(p)) + atomic_load_acq_int((const volatile u_int *)(p)) #define atomic_store_rel_ptr(p, v) \ atomic_store_rel_int((volatile u_int *)(p), (v)) #define atomic_cmpset_ptr(dst, old, new) \ diff --git a/sys/powerpc/include/atomic.h b/sys/powerpc/include/atomic.h --- a/sys/powerpc/include/atomic.h +++ b/sys/powerpc/include/atomic.h @@ -502,7 +502,7 @@ */ #define ATOMIC_STORE_LOAD(TYPE) \ static __inline u_##TYPE \ -atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ +atomic_load_acq_##TYPE(const volatile u_##TYPE *p) \ { \ u_##TYPE v; \ \ @@ -534,10 +534,10 @@ #define atomic_store_rel_ptr atomic_store_rel_long #else static __inline u_long -atomic_load_acq_long(volatile u_long *addr) +atomic_load_acq_long(const volatile u_long *addr) { - return ((u_long)atomic_load_acq_int((volatile u_int *)addr)); + return ((u_long)atomic_load_acq_int((const volatile u_int *)addr)); } static __inline void diff --git a/sys/riscv/include/atomic.h b/sys/riscv/include/atomic.h --- a/sys/riscv/include/atomic.h +++ b/sys/riscv/include/atomic.h @@ -121,7 +121,7 @@ #define atomic_load_acq_16 atomic_load_acq_16 static __inline uint16_t -atomic_load_acq_16(volatile uint16_t *p) +atomic_load_acq_16(const volatile uint16_t *p) { uint16_t ret; @@ -284,7 +284,7 @@ ATOMIC_FCMPSET_ACQ_REL(32); static __inline uint32_t -atomic_load_acq_32(volatile uint32_t *p) +atomic_load_acq_32(const volatile uint32_t *p) { uint32_t ret; diff --git a/sys/sys/_atomic64e.h b/sys/sys/_atomic64e.h --- a/sys/sys/_atomic64e.h +++ b/sys/sys/_atomic64e.h @@ -55,7 +55,7 @@ u_int64_t atomic_fetchadd_64(volatile u_int64_t *, u_int64_t); -u_int64_t atomic_load_64(volatile u_int64_t *); +u_int64_t atomic_load_64(const volatile u_int64_t *); #define atomic_load_acq_64 atomic_load_64 void atomic_readandclear_64(volatile u_int64_t *); diff --git a/sys/sys/_atomic_subword.h b/sys/sys/_atomic_subword.h --- a/sys/sys/_atomic_subword.h +++ b/sys/sys/_atomic_subword.h @@ -176,7 +176,7 @@ #ifndef atomic_load_acq_8 static __inline uint8_t -atomic_load_acq_8(volatile uint8_t *p) +atomic_load_acq_8(const volatile uint8_t *p) { int shift; uint8_t ret; @@ -189,7 +189,7 @@ #ifndef atomic_load_acq_16 static __inline uint16_t -atomic_load_acq_16(volatile uint16_t *p) +atomic_load_acq_16(const volatile uint16_t *p) { int shift; uint16_t ret; diff --git a/sys/sys/atomic_common.h b/sys/sys/atomic_common.h --- a/sys/sys/atomic_common.h +++ b/sys/sys/atomic_common.h @@ -36,18 +36,18 @@ #include -#define __atomic_load_bool_relaxed(p) (*(volatile _Bool *)(p)) +#define __atomic_load_bool_relaxed(p) (*(const volatile _Bool *)(p)) #define __atomic_store_bool_relaxed(p, v) \ (*(volatile _Bool *)(p) = (_Bool)(v)) -#define __atomic_load_char_relaxed(p) (*(volatile u_char *)(p)) -#define __atomic_load_short_relaxed(p) (*(volatile u_short *)(p)) -#define __atomic_load_int_relaxed(p) (*(volatile u_int *)(p)) -#define __atomic_load_long_relaxed(p) (*(volatile u_long *)(p)) -#define __atomic_load_8_relaxed(p) (*(volatile uint8_t *)(p)) -#define __atomic_load_16_relaxed(p) (*(volatile uint16_t *)(p)) -#define __atomic_load_32_relaxed(p) (*(volatile uint32_t *)(p)) -#define __atomic_load_64_relaxed(p) (*(volatile uint64_t *)(p)) +#define __atomic_load_char_relaxed(p) (*(const volatile u_char *)(p)) +#define __atomic_load_short_relaxed(p) (*(const volatile u_short *)(p)) +#define __atomic_load_int_relaxed(p) (*(const volatile u_int *)(p)) +#define __atomic_load_long_relaxed(p) (*(const volatile u_long *)(p)) +#define __atomic_load_8_relaxed(p) (*(const volatile uint8_t *)(p)) +#define __atomic_load_16_relaxed(p) (*(const volatile uint16_t *)(p)) +#define __atomic_load_32_relaxed(p) (*(const volatile uint32_t *)(p)) +#define __atomic_load_64_relaxed(p) (*(const volatile uint64_t *)(p)) #define __atomic_store_char_relaxed(p, v) \ (*(volatile u_char *)(p) = (u_char)(v)) @@ -124,7 +124,7 @@ __atomic_store_generic(p, v, int64_t, uint64_t, 64) #endif -#define atomic_load_ptr(p) (*(volatile __typeof(*p) *)(p)) +#define atomic_load_ptr(p) (*(const volatile __typeof(*p) *)(p)) #define atomic_store_ptr(p, v) (*(volatile __typeof(*p) *)(p) = (v)) /*