Index: sys/mips/include/atomic.h =================================================================== --- sys/mips/include/atomic.h +++ sys/mips/include/atomic.h @@ -451,7 +451,7 @@ return (value); } -#if defined(__mips_n64) || defined(__mips_n32) +#if (defined(__mips_n64) || defined(__mips_n32)) /* * Atomically compare the value stored at *p with cmpval and if the * two values are equal, update the value of *p with newval. Returns @@ -480,32 +480,10 @@ return ret; } -/* - * Atomically compare the value stored at *p with cmpval and if the - * two values are equal, update the value of *p with newval. Returns - * zero if the compare failed, nonzero otherwise. - */ -static __inline int -atomic_cmpset_acq_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval) -{ - int retval; - - retval = atomic_cmpset_64(p, cmpval, newval); - mips_sync(); - return (retval); -} - -static __inline int -atomic_cmpset_rel_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval) -{ - mips_sync(); - return (atomic_cmpset_64(p, cmpval, newval)); -} - -static __inline int +static __inline uint32_t atomic_fcmpset_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) { - int ret; + uint32_t ret; __asm __volatile ( "1:\n\t" @@ -526,23 +504,6 @@ return ret; } -static __inline int -atomic_fcmpset_acq_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) -{ - int retval; - - retval = atomic_fcmpset_64(p, cmpval, newval); - mips_sync(); - return (retval); -} - -static __inline int -atomic_fcmpset_rel_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) -{ - mips_sync(); - return (atomic_fcmpset_64(p, cmpval, newval)); -} - /* * Atomically add the value of v to the integer pointed to by p and return * the previous value of *p. @@ -562,6 +523,111 @@ : "r" (v), "m" (*p)); return (value); } +#else +#ifdef _KERNEL +#include +#ifdef SMP +#error "32-bit mips SMP not supported" +#endif +/* + * For 32-bit mips CPUs, there's no 64-bit atomic operations in hardware. For + * UP, in kernel environments, you can disable interrupts, do the op, then + * restore the old interrupt mask. + */ + +/* + * Atomically compare the value stored at *p with cmpval and if the + * two values are equal, update the value of *p with newval. Returns + * zero if the compare failed, nonzero otherwise. + */ +static __inline int +atomic_cmpset_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval) +{ + int s, rv; + + s = intr_disable(); + if (*p == cmpval) { + *p = newval; + rv = 1; + } else + rv = 0; + if (s) + intr_enable(); + return (rv); +} + +static __inline int +atomic_fcmpset_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) +{ + int s, rv; + + s = intr_disable(); + if (*p == *cmpval) { + *p = newval; + rv = 1; + } else { + *cmpval = *p; + rv = 0; + } + if (s) + intr_enable(); + return (rv); +} + +/* + * Atomically add the value of v to the integer pointed to by p and return + * the previous value of *p. + */ +static __inline uint64_t +atomic_fetchadd_64(__volatile uint64_t *p, uint64_t v) +{ + uint64_t tmp; + int s; + + s = intr_disable(); + tmp = *p; + *p += v; + if (s) + intr_enable(); + return (tmp); +} +#endif /* _KERNEL */ +#endif + +#if (defined(__mips_n64) || defined(__mips_n32)) || defined(_KERNEL) +static __inline int +atomic_cmpset_acq_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval) +{ + int retval; + + retval = atomic_cmpset_64(p, cmpval, newval); + mips_sync(); + return (retval); +} + +static __inline int +atomic_cmpset_rel_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval) +{ + mips_sync(); + return (atomic_cmpset_64(p, cmpval, newval)); +} + +static __inline int +atomic_fcmpset_acq_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) +{ + int retval; + + retval = atomic_fcmpset_64(p, cmpval, newval); + mips_sync(); + return (retval); +} + +static __inline int +atomic_fcmpset_rel_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) +{ + mips_sync(); + return (atomic_fcmpset_64(p, cmpval, newval)); +} #endif static __inline void @@ -779,7 +845,7 @@ return (retval); } -#if defined(__mips_n64) || defined(__mips_n32) +#if (defined(__mips_n64) || defined(__mips_n32)) || defined(_KERNEL) static __inline uint64_t atomic_swap_64(volatile uint64_t *ptr, const uint64_t value) {