Changeset View
Changeset View
Standalone View
Standalone View
sys/mips/include/atomic.h
Show First 20 Lines • Show All 445 Lines • ▼ Show 20 Lines | __asm __volatile ( | ||||
"addu %2, %3, %0\n\t" /* calculate new value */ | "addu %2, %3, %0\n\t" /* calculate new value */ | ||||
"sc %2, %1\n\t" /* attempt to store */ | "sc %2, %1\n\t" /* attempt to store */ | ||||
"beqz %2, 1b\n\t" /* spin if failed */ | "beqz %2, 1b\n\t" /* spin if failed */ | ||||
: "=&r" (value), "=m" (*p), "=&r" (temp) | : "=&r" (value), "=m" (*p), "=&r" (temp) | ||||
: "r" (v), "m" (*p)); | : "r" (v), "m" (*p)); | ||||
return (value); | return (value); | ||||
} | } | ||||
#if defined(__mips_n64) || defined(__mips_n32) | #if (defined(__mips_n64) || defined(__mips_n32)) | ||||
/* | /* | ||||
* Atomically compare the value stored at *p with cmpval and if the | * Atomically compare the value stored at *p with cmpval and if the | ||||
* two values are equal, update the value of *p with newval. Returns | * two values are equal, update the value of *p with newval. Returns | ||||
* zero if the compare failed, nonzero otherwise. | * zero if the compare failed, nonzero otherwise. | ||||
*/ | */ | ||||
static __inline int | static __inline int | ||||
atomic_cmpset_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval) | atomic_cmpset_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval) | ||||
{ | { | ||||
Show All 12 Lines | __asm __volatile ( | ||||
"3:\n" | "3:\n" | ||||
: "=&r" (ret), "=m" (*p) | : "=&r" (ret), "=m" (*p) | ||||
: "r" (cmpval), "r" (newval), "m" (*p) | : "r" (cmpval), "r" (newval), "m" (*p) | ||||
: "memory"); | : "memory"); | ||||
return ret; | return ret; | ||||
} | } | ||||
/* | static __inline uint32_t | ||||
* Atomically compare the value stored at *p with cmpval and if the | |||||
* two values are equal, update the value of *p with newval. Returns | |||||
* zero if the compare failed, nonzero otherwise. | |||||
*/ | |||||
static __inline int | |||||
atomic_cmpset_acq_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval) | |||||
{ | |||||
int retval; | |||||
retval = atomic_cmpset_64(p, cmpval, newval); | |||||
mips_sync(); | |||||
return (retval); | |||||
} | |||||
static __inline int | |||||
atomic_cmpset_rel_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval) | |||||
{ | |||||
mips_sync(); | |||||
return (atomic_cmpset_64(p, cmpval, newval)); | |||||
} | |||||
static __inline int | |||||
atomic_fcmpset_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) | atomic_fcmpset_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) | ||||
{ | { | ||||
int ret; | uint32_t ret; | ||||
__asm __volatile ( | __asm __volatile ( | ||||
"1:\n\t" | "1:\n\t" | ||||
"lld %0, %1\n\t" /* load old value */ | "lld %0, %1\n\t" /* load old value */ | ||||
"bne %0, %4, 2f\n\t" /* compare */ | "bne %0, %4, 2f\n\t" /* compare */ | ||||
"move %0, %3\n\t" /* value to store */ | "move %0, %3\n\t" /* value to store */ | ||||
"scd %0, %1\n\t" /* attempt to store */ | "scd %0, %1\n\t" /* attempt to store */ | ||||
"beqz %0, 1b\n\t" /* if it failed, spin */ | "beqz %0, 1b\n\t" /* if it failed, spin */ | ||||
"j 3f\n\t" | "j 3f\n\t" | ||||
"2:\n\t" | "2:\n\t" | ||||
"sd %0, %2\n\t" /* save old value */ | "sd %0, %2\n\t" /* save old value */ | ||||
"li %0, 0\n\t" | "li %0, 0\n\t" | ||||
"3:\n" | "3:\n" | ||||
: "=&r" (ret), "+m" (*p), "=m" (*cmpval) | : "=&r" (ret), "+m" (*p), "=m" (*cmpval) | ||||
: "r" (newval), "r" (*cmpval) | : "r" (newval), "r" (*cmpval) | ||||
: "memory"); | : "memory"); | ||||
return ret; | return ret; | ||||
} | } | ||||
static __inline int | |||||
atomic_fcmpset_acq_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) | |||||
{ | |||||
int retval; | |||||
retval = atomic_fcmpset_64(p, cmpval, newval); | |||||
mips_sync(); | |||||
return (retval); | |||||
} | |||||
static __inline int | |||||
atomic_fcmpset_rel_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) | |||||
{ | |||||
mips_sync(); | |||||
return (atomic_fcmpset_64(p, cmpval, newval)); | |||||
} | |||||
/* | /* | ||||
* Atomically add the value of v to the integer pointed to by p and return | * Atomically add the value of v to the integer pointed to by p and return | ||||
* the previous value of *p. | * the previous value of *p. | ||||
*/ | */ | ||||
static __inline uint64_t | static __inline uint64_t | ||||
atomic_fetchadd_64(__volatile uint64_t *p, uint64_t v) | atomic_fetchadd_64(__volatile uint64_t *p, uint64_t v) | ||||
{ | { | ||||
uint64_t value, temp; | uint64_t value, temp; | ||||
__asm __volatile ( | __asm __volatile ( | ||||
"1:\n\t" | "1:\n\t" | ||||
"lld %0, %1\n\t" /* load old value */ | "lld %0, %1\n\t" /* load old value */ | ||||
"daddu %2, %3, %0\n\t" /* calculate new value */ | "daddu %2, %3, %0\n\t" /* calculate new value */ | ||||
"scd %2, %1\n\t" /* attempt to store */ | "scd %2, %1\n\t" /* attempt to store */ | ||||
"beqz %2, 1b\n\t" /* spin if failed */ | "beqz %2, 1b\n\t" /* spin if failed */ | ||||
: "=&r" (value), "=m" (*p), "=&r" (temp) | : "=&r" (value), "=m" (*p), "=&r" (temp) | ||||
: "r" (v), "m" (*p)); | : "r" (v), "m" (*p)); | ||||
return (value); | return (value); | ||||
} | } | ||||
#else | |||||
#ifdef _KERNEL | |||||
#include <machine/cpufunc.h> | |||||
#ifdef SMP | |||||
#error "32-bit mips SMP not supported" | |||||
#endif | #endif | ||||
/* | |||||
* For 32-bit mips CPUs, there's no 64-bit atomic operations in hardware. For | |||||
* UP, in kernel environments, you can disable interrupts, do the op, then | |||||
* restore the old interrupt mask. | |||||
*/ | |||||
/* | |||||
* Atomically compare the value stored at *p with cmpval and if the | |||||
* two values are equal, update the value of *p with newval. Returns | |||||
* zero if the compare failed, nonzero otherwise. | |||||
*/ | |||||
static __inline int | |||||
atomic_cmpset_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval) | |||||
{ | |||||
int s, rv; | |||||
s = intr_disable(); | |||||
if (*p == cmpval) { | |||||
*p = newval; | |||||
rv = 1; | |||||
} else | |||||
rv = 0; | |||||
if (s) | |||||
intr_enable(); | |||||
return (rv); | |||||
} | |||||
static __inline int | |||||
atomic_fcmpset_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) | |||||
{ | |||||
int s, rv; | |||||
s = intr_disable(); | |||||
if (*p == *cmpval) { | |||||
*p = newval; | |||||
rv = 1; | |||||
} else { | |||||
*cmpval = *p; | |||||
rv = 0; | |||||
} | |||||
if (s) | |||||
intr_enable(); | |||||
return (rv); | |||||
} | |||||
/* | |||||
* Atomically add the value of v to the integer pointed to by p and return | |||||
* the previous value of *p. | |||||
*/ | |||||
static __inline uint64_t | |||||
atomic_fetchadd_64(__volatile uint64_t *p, uint64_t v) | |||||
{ | |||||
uint64_t tmp; | |||||
int s; | |||||
s = intr_disable(); | |||||
tmp = *p; | |||||
*p += v; | |||||
if (s) | |||||
intr_enable(); | |||||
return (tmp); | |||||
} | |||||
#endif /* _KERNEL */ | |||||
#endif | |||||
#if (defined(__mips_n64) || defined(__mips_n32)) || defined(_KERNEL) | |||||
static __inline int | |||||
atomic_cmpset_acq_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval) | |||||
{ | |||||
int retval; | |||||
retval = atomic_cmpset_64(p, cmpval, newval); | |||||
mips_sync(); | |||||
return (retval); | |||||
} | |||||
static __inline int | |||||
atomic_cmpset_rel_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval) | |||||
{ | |||||
mips_sync(); | |||||
return (atomic_cmpset_64(p, cmpval, newval)); | |||||
} | |||||
static __inline int | |||||
atomic_fcmpset_acq_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) | |||||
{ | |||||
int retval; | |||||
retval = atomic_fcmpset_64(p, cmpval, newval); | |||||
mips_sync(); | |||||
return (retval); | |||||
} | |||||
static __inline int | |||||
atomic_fcmpset_rel_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) | |||||
{ | |||||
mips_sync(); | |||||
return (atomic_fcmpset_64(p, cmpval, newval)); | |||||
} | |||||
#endif | |||||
static __inline void | static __inline void | ||||
atomic_thread_fence_acq(void) | atomic_thread_fence_acq(void) | ||||
{ | { | ||||
mips_sync(); | mips_sync(); | ||||
} | } | ||||
static __inline void | static __inline void | ||||
▲ Show 20 Lines • Show All 199 Lines • ▼ Show 20 Lines | atomic_swap_32(volatile uint32_t *ptr, const uint32_t value) | ||||
retval = *ptr; | retval = *ptr; | ||||
while (!atomic_fcmpset_32(ptr, &retval, value)) | while (!atomic_fcmpset_32(ptr, &retval, value)) | ||||
; | ; | ||||
return (retval); | return (retval); | ||||
} | } | ||||
#if defined(__mips_n64) || defined(__mips_n32) | #if (defined(__mips_n64) || defined(__mips_n32)) || defined(_KERNEL) | ||||
static __inline uint64_t | static __inline uint64_t | ||||
atomic_swap_64(volatile uint64_t *ptr, const uint64_t value) | atomic_swap_64(volatile uint64_t *ptr, const uint64_t value) | ||||
{ | { | ||||
uint64_t retval; | uint64_t retval; | ||||
retval = *ptr; | retval = *ptr; | ||||
while (!atomic_fcmpset_64(ptr, &retval, value)) | while (!atomic_fcmpset_64(ptr, &retval, value)) | ||||
Show All 35 Lines |