Page MenuHomeFreeBSD

D21681.diff
No OneTemporary

D21681.diff

Index: sys/mips/include/atomic.h
===================================================================
--- sys/mips/include/atomic.h
+++ sys/mips/include/atomic.h
@@ -36,6 +36,8 @@
#error this file needs sys/cdefs.h as a prerequisite
#endif
+#include <machine/endian.h>
+
#include <sys/atomic_common.h>
/*
@@ -66,20 +68,221 @@
#define wmb() mips_sync()
#define rmb() mips_sync()
+#define _ATOMIC_WORD_ALIGNED(p) ((uintptr_t)p - ((uintptr_t)p % 4))
+
+#if _BYTE_ORDER == _BIG_ENDIAN
+#define _ATOMIC_BYTE_SHIFT(p) \
+ ((3 - ((uintptr_t)(p) % 4)) * 8)
+
+#define _ATOMIC_HWORD_SHIFT(p) \
+ ((2 - ((uintptr_t)(p) % 4)) * 8)
+#else
+#define _ATOMIC_BYTE_SHIFT(p) \
+ ((((uintptr_t)(p) % 4)) * 8)
+
+#define _ATOMIC_HWORD_SHIFT(p) \
+ ((((uintptr_t)(p) % 4)) * 8)
+#endif
+
/*
* Various simple arithmetic on memory which is atomic in the presence
* of interrupts and SMP safe.
*/
-void atomic_set_8(__volatile uint8_t *, uint8_t);
-void atomic_clear_8(__volatile uint8_t *, uint8_t);
-void atomic_add_8(__volatile uint8_t *, uint8_t);
-void atomic_subtract_8(__volatile uint8_t *, uint8_t);
+static __inline void
+atomic_set_8(__volatile uint8_t *p, uint8_t v)
+{
+ uint32_t *paligned, tempw;
+
+ paligned = (uint32_t *)_ATOMIC_WORD_ALIGNED(p);
+ __asm __volatile ("1:\tll %0, %1\n\t"
+ : "=&r" (tempw)
+ : "m" (*paligned)
+ );
+
+ tempw |= v << _ATOMIC_BYTE_SHIFT(p);
-void atomic_set_16(__volatile uint16_t *, uint16_t);
-void atomic_clear_16(__volatile uint16_t *, uint16_t);
-void atomic_add_16(__volatile uint16_t *, uint16_t);
-void atomic_subtract_16(__volatile uint16_t *, uint16_t);
+ __asm __volatile (
+ "sc %1, %0\n\t" /* Attempt to store */
+ "beqz %1, 1b\n\t" /* Spin if failed */
+ : "=m" (*paligned)
+ : "r" (tempw)
+ : "memory");
+}
+
+static __inline void
+atomic_clear_8(__volatile uint8_t *p, uint8_t v)
+{
+ uint32_t *paligned, tempw;
+
+ paligned = (uint32_t *)_ATOMIC_WORD_ALIGNED(p);
+ __asm __volatile ("1:\tll %0, %1\n\t"
+ : "=&r" (tempw)
+ : "m" (*paligned)
+ );
+
+ tempw &= ~(v << _ATOMIC_BYTE_SHIFT(p));
+
+ __asm __volatile (
+ "sc %1, %0\n\t" /* Attempt to store */
+ "beqz %1, 1b\n\t" /* Spin if failed */
+ : "=m" (*paligned)
+ : "r" (tempw)
+ : "memory");
+}
+
+static __inline void
+atomic_add_8(__volatile uint8_t *p, uint8_t v)
+{
+ uint32_t extracted, *paligned, shiftval, tempw;
+
+ paligned = (uint32_t *)_ATOMIC_WORD_ALIGNED(p);
+ /* Shift v up to the top for overflow detection */
+ shiftval = v << 24;
+
+ __asm __volatile ("1:\tll %0, %1\n\t"
+ : "=&r" (tempw)
+ : "m" (*paligned)
+ );
+
+ extracted = (tempw << (24 - _ATOMIC_BYTE_SHIFT(p)));
+ extracted += shiftval;
+ extracted >>= (24 - _ATOMIC_BYTE_SHIFT(p));
+ tempw = (tempw & ~(0xffU << _ATOMIC_BYTE_SHIFT(p))) |
+ extracted;
+
+ __asm __volatile (
+ "sc %1, %0\n\t" /* Attempt to store */
+ "beqz %1, 1b\n\t" /* Spin if failed */
+ : "=m" (*paligned)
+ : "r" (tempw)
+ : "memory");
+}
+
+
+static __inline void
+atomic_subtract_8(__volatile uint8_t *p, uint8_t v)
+{
+ uint32_t extracted, *paligned, tempw;
+
+ paligned = (uint32_t *)_ATOMIC_WORD_ALIGNED(p);
+
+ __asm __volatile ("1:\tll %0, %1\n\t"
+ : "=&r" (tempw)
+ : "m" (*paligned)
+ );
+
+ extracted = (tempw >> _ATOMIC_BYTE_SHIFT(p));
+ extracted -= v;
+ extracted <<= _ATOMIC_BYTE_SHIFT(p);
+ tempw = (tempw & ~(0xff << _ATOMIC_BYTE_SHIFT(p))) |
+ extracted;
+
+ __asm __volatile (
+ "sc %1, %0\n\t" /* Attempt to store */
+ "beqz %1, 1b\n\t" /* Spin if failed */
+ : "=m" (*paligned)
+ : "r" (tempw)
+ : "memory");
+}
+
+
+static __inline void
+atomic_set_16(__volatile uint16_t *p, uint16_t v)
+{
+ uint32_t *paligned, tempw;
+
+ paligned = (uint32_t *)_ATOMIC_WORD_ALIGNED(p);
+ __asm __volatile ("1:\tll %0, %1\n\t"
+ : "=&r" (tempw)
+ : "m" (*paligned)
+ );
+
+ tempw |= v << _ATOMIC_HWORD_SHIFT(p);
+
+ __asm __volatile (
+ "sc %1, %0\n\t" /* Attempt to store */
+ "beqz %1, 1b\n\t" /* Spin if failed */
+ : "=m" (*paligned)
+ : "r" (tempw)
+ : "memory");
+}
+
+static __inline void
+atomic_clear_16(__volatile uint16_t *p, uint16_t v)
+{
+ uint32_t *paligned, tempw;
+
+ paligned = (uint32_t *)_ATOMIC_WORD_ALIGNED(p);
+ __asm __volatile ("1:\tll %0, %1\n\t"
+ : "=&r" (tempw)
+ : "m" (*paligned)
+ );
+
+ tempw &= ~(v << _ATOMIC_HWORD_SHIFT(p));
+
+ __asm __volatile (
+ "sc %1, %0\n\t" /* Attempt to store */
+ "beqz %1, 1b\n\t" /* Spin if failed */
+ : "=m" (*paligned)
+ : "r" (tempw)
+ : "memory");
+}
+
+static __inline void
+atomic_add_16(__volatile uint16_t *p, uint16_t v)
+{
+ uint32_t extracted, *paligned, shiftval, tempw;
+
+ paligned = (uint32_t *)_ATOMIC_WORD_ALIGNED(p);
+ /* Shift v up to the top for overflow detection */
+ shiftval = v << 16;
+
+ __asm __volatile ("1:\tll %0, %1\n\t"
+ : "=&r" (tempw)
+ : "m" (*paligned)
+ );
+
+ extracted = (tempw << (16 - _ATOMIC_HWORD_SHIFT(p)));
+ extracted += shiftval;
+ extracted >>= (16 - _ATOMIC_HWORD_SHIFT(p));
+ tempw = (tempw & ~(0xffff << _ATOMIC_HWORD_SHIFT(p))) |
+ extracted;
+
+ __asm __volatile (
+ "sc %1, %0\n\t" /* Attempt to store */
+ "beqz %1, 1b\n\t" /* Spin if failed */
+ : "=m" (*paligned)
+ : "r" (tempw)
+ : "memory");
+}
+
+
+static __inline void
+atomic_subtract_16(__volatile uint16_t *p, uint16_t v)
+{
+ uint32_t extracted, *paligned, tempw;
+
+ paligned = (uint32_t *)_ATOMIC_WORD_ALIGNED(p);
+
+ __asm __volatile ("1:\tll %0, %1\n\t"
+ : "=&r" (tempw)
+ : "m" (*paligned)
+ );
+
+ extracted = (tempw >> _ATOMIC_HWORD_SHIFT(p));
+ extracted -= v;
+ extracted <<= _ATOMIC_HWORD_SHIFT(p);
+ tempw = (tempw & ~(0xffff << _ATOMIC_HWORD_SHIFT(p))) |
+ extracted;
+
+ __asm __volatile (
+ "sc %1, %0\n\t" /* Attempt to store */
+ "beqz %1, 1b\n\t" /* Spin if failed */
+ : "=m" (*paligned)
+ : "r" (tempw)
+ : "memory");
+}
static __inline void
atomic_set_32(__volatile uint32_t *p, uint32_t v)
@@ -348,6 +551,68 @@
* two values are equal, update the value of *p with newval. Returns
* zero if the compare failed, nonzero otherwise.
*/
+static __inline int
+atomic_cmpset_8(__volatile uint8_t *p, uint8_t cmpval, uint8_t newval)
+{
+ uint32_t *paligned, tempw, store;
+ int ret;
+
+ paligned = (uint32_t *)_ATOMIC_WORD_ALIGNED(p);
+ __asm __volatile ("1:\tll %0, %1\n\t"
+ : "=&r" (tempw)
+ : "m" (*paligned)
+ );
+
+ store = (tempw >> _ATOMIC_BYTE_SHIFT(p)) & 0xff;
+ if (store == cmpval) {
+ tempw = (tempw & ~(0xff << _ATOMIC_BYTE_SHIFT(p))) |
+ (newval << _ATOMIC_BYTE_SHIFT(p));
+ ret = 1;
+
+ __asm __volatile (
+ "sc %1, %0\n\t" /* Attempt to store */
+ "beqz %1, 1b\n\t" /* Spin if failed */
+ : "=m" (*paligned)
+ : "r" (tempw)
+ : "memory");
+ } else {
+ ret = 0;
+ }
+
+ return (ret);
+}
+
+static __inline int
+atomic_cmpset_16(__volatile uint16_t *p, uint16_t cmpval, uint16_t newval)
+{
+ uint32_t *paligned, tempw, store;
+ int ret;
+
+ paligned = (uint32_t *)_ATOMIC_WORD_ALIGNED(p);
+ __asm __volatile ("1:\tll %0, %1\n\t"
+ : "=&r" (tempw)
+ : "m" (*paligned)
+ );
+
+ store = (tempw >> _ATOMIC_HWORD_SHIFT(p) & 0xffff);
+ if (store == cmpval) {
+ tempw = (tempw & ~(0xffff << _ATOMIC_HWORD_SHIFT(p))) |
+ (newval << _ATOMIC_HWORD_SHIFT(p));
+ ret = 1;
+
+ __asm __volatile (
+ "sc %1, %0\n\t" /* Attempt to store */
+ "beqz %1, 1b\n\t" /* Spin if failed */
+ : "=m" (*paligned)
+ : "r" (tempw)
+ : "memory");
+ } else {
+ ret = 0;
+ }
+
+ return (ret);
+}
+
static __inline int
atomic_cmpset_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
{
@@ -370,28 +635,71 @@
return ret;
}
-/*
- * Atomically compare the value stored at *p with cmpval and if the
- * two values are equal, update the value of *p with newval. Returns
- * zero if the compare failed, nonzero otherwise.
- */
static __inline int
-atomic_cmpset_acq_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
+atomic_fcmpset_8(__volatile uint8_t *p, uint8_t *cmpval, uint8_t newval)
{
- int retval;
+ uint32_t *paligned, tempw, store;
+ int ret;
- retval = atomic_cmpset_32(p, cmpval, newval);
- mips_sync();
- return (retval);
+ paligned = (uint32_t *)_ATOMIC_WORD_ALIGNED(p);
+ __asm __volatile ("1:\tll %0, %1\n\t"
+ : "=&r" (tempw)
+ : "m" (*paligned)
+ );
+
+ store = (tempw >> _ATOMIC_BYTE_SHIFT(p)) & 0xff;
+ if (store == *cmpval) {
+ tempw = (tempw & ~(0xff << _ATOMIC_BYTE_SHIFT(p))) |
+ (newval << _ATOMIC_BYTE_SHIFT(p));
+ ret = 1;
+
+ __asm __volatile (
+ "sc %1, %0\n\t" /* Attempt to store */
+ "beqz %1, 1b\n\t" /* Spin if failed */
+ : "=m" (*paligned)
+ : "r" (tempw)
+ : "memory");
+ } else {
+ *cmpval = store;
+ ret = 0;
+ }
+
+ return (ret);
}
static __inline int
-atomic_cmpset_rel_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
+atomic_fcmpset_16(__volatile uint16_t *p, uint16_t *cmpval, uint16_t newval)
{
- mips_sync();
- return (atomic_cmpset_32(p, cmpval, newval));
+ uint32_t *paligned, tempw, store;
+ int ret;
+
+ paligned = (uint32_t *)_ATOMIC_WORD_ALIGNED(p);
+ __asm __volatile ("1:\tll %0, %1\n\t"
+ : "=&r" (tempw)
+ : "m" (*paligned)
+ );
+
+ store = (tempw >> _ATOMIC_HWORD_SHIFT(p)) & 0xffff;
+ if (store == *cmpval) {
+ tempw = (tempw & ~(0xffff << _ATOMIC_HWORD_SHIFT(p))) |
+ (newval << _ATOMIC_HWORD_SHIFT(p));
+ ret = 1;
+
+ __asm __volatile (
+ "sc %1, %0\n\t" /* Attempt to store */
+ "beqz %1, 1b\n\t" /* Spin if failed */
+ : "=m" (*paligned)
+ : "r" (tempw)
+ : "memory");
+ } else {
+ *cmpval = store;
+ ret = 0;
+ }
+
+ return (ret);
}
+
static __inline int
atomic_fcmpset_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
{
@@ -415,23 +723,58 @@
return ret;
}
-static __inline int
-atomic_fcmpset_acq_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
-{
- int retval;
-
- retval = atomic_fcmpset_32(p, cmpval, newval);
- mips_sync();
- return (retval);
+#define ATOMIC_CMPSET_ACQ_REL(WIDTH) \
+static __inline int \
+atomic_cmpset_acq_##WIDTH(__volatile uint##WIDTH##_t *p, \
+ uint##WIDTH##_t cmpval, uint##WIDTH##_t newval) \
+{ \
+ int retval; \
+ \
+ retval = atomic_cmpset_##WIDTH(p, cmpval, newval); \
+ mips_sync(); \
+ return (retval); \
+} \
+ \
+static __inline int \
+atomic_cmpset_rel_##WIDTH(__volatile uint##WIDTH##_t *p, \
+ uint##WIDTH##_t cmpval, uint##WIDTH##_t newval) \
+{ \
+ mips_sync(); \
+ return (atomic_cmpset_##WIDTH(p, cmpval, newval)); \
}
-static __inline int
-atomic_fcmpset_rel_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
-{
- mips_sync();
- return (atomic_fcmpset_32(p, cmpval, newval));
+#define ATOMIC_FCMPSET_ACQ_REL(WIDTH) \
+static __inline int \
+atomic_fcmpset_acq_##WIDTH(__volatile uint##WIDTH##_t *p, \
+ uint##WIDTH##_t *cmpval, uint##WIDTH##_t newval) \
+{ \
+ int retval; \
+ \
+ retval = atomic_fcmpset_##WIDTH(p, cmpval, newval); \
+ mips_sync(); \
+ return (retval); \
+} \
+ \
+static __inline int \
+atomic_fcmpset_rel_##WIDTH(__volatile uint##WIDTH##_t *p, \
+ uint##WIDTH##_t *cmpval, uint##WIDTH##_t newval) \
+{ \
+ mips_sync(); \
+ return (atomic_fcmpset_##WIDTH(p, cmpval, newval)); \
}
+/*
+ * Atomically compare the value stored at *p with cmpval and if the
+ * two values are equal, update the value of *p with newval. Returns
+ * zero if the compare failed, nonzero otherwise.
+ */
+ATOMIC_CMPSET_ACQ_REL(8);
+ATOMIC_CMPSET_ACQ_REL(16);
+ATOMIC_CMPSET_ACQ_REL(32);
+ATOMIC_FCMPSET_ACQ_REL(8);
+ATOMIC_FCMPSET_ACQ_REL(16);
+ATOMIC_FCMPSET_ACQ_REL(32);
+
/*
* Atomically add the value of v to the integer pointed to by p and return
* the previous value of *p.
@@ -480,28 +823,6 @@
return ret;
}
-/*
- * Atomically compare the value stored at *p with cmpval and if the
- * two values are equal, update the value of *p with newval. Returns
- * zero if the compare failed, nonzero otherwise.
- */
-static __inline int
-atomic_cmpset_acq_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
-{
- int retval;
-
- retval = atomic_cmpset_64(p, cmpval, newval);
- mips_sync();
- return (retval);
-}
-
-static __inline int
-atomic_cmpset_rel_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
-{
- mips_sync();
- return (atomic_cmpset_64(p, cmpval, newval));
-}
-
static __inline int
atomic_fcmpset_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
{
@@ -526,22 +847,13 @@
return ret;
}
-static __inline int
-atomic_fcmpset_acq_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
-{
- int retval;
-
- retval = atomic_fcmpset_64(p, cmpval, newval);
- mips_sync();
- return (retval);
-}
-
-static __inline int
-atomic_fcmpset_rel_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
-{
- mips_sync();
- return (atomic_fcmpset_64(p, cmpval, newval));
-}
+/*
+ * Atomically compare the value stored at *p with cmpval and if the
+ * two values are equal, update the value of *p with newval. Returns
+ * zero if the compare failed, nonzero otherwise.
+ */
+ATOMIC_CMPSET_ACQ_REL(64);
+ATOMIC_FCMPSET_ACQ_REL(64);
/*
* Atomically add the value of v to the integer pointed to by p and return
@@ -564,6 +876,11 @@
}
#endif
+#undef ATOMIC_XCMPSET_ACQ_REL
+#undef _ATOMIC_WORD_ALIGNED
+#undef _ATOMIC_BYTE_SHIFT
+#undef _ATOMIC_HWORD_SHIFT
+
static __inline void
atomic_thread_fence_acq(void)
{
@@ -605,6 +922,12 @@
#define atomic_subtract_char atomic_subtract_8
#define atomic_subtract_acq_char atomic_subtract_acq_8
#define atomic_subtract_rel_char atomic_subtract_rel_8
+#define atomic_cmpset_char atomic_cmpset_8
+#define atomic_cmpset_acq_char atomic_cmpset_acq_8
+#define atomic_cmpset_rel_char atomic_cmpset_rel_8
+#define atomic_fcmpset_char atomic_fcmpset_8
+#define atomic_fcmpset_acq_char atomic_fcmpset_acq_8
+#define atomic_fcmpset_rel_char atomic_fcmpset_rel_8
/* Operations on shorts. */
#define atomic_set_short atomic_set_16
@@ -619,6 +942,12 @@
#define atomic_subtract_short atomic_subtract_16
#define atomic_subtract_acq_short atomic_subtract_acq_16
#define atomic_subtract_rel_short atomic_subtract_rel_16
+#define atomic_cmpset_short atomic_cmpset_16
+#define atomic_cmpset_acq_short atomic_cmpset_acq_16
+#define atomic_cmpset_rel_short atomic_cmpset_rel_16
+#define atomic_fcmpset_short atomic_fcmpset_16
+#define atomic_fcmpset_acq_short atomic_fcmpset_acq_16
+#define atomic_fcmpset_rel_short atomic_fcmpset_rel_16
/* Operations on ints. */
#define atomic_set_int atomic_set_32

File Metadata

Mime Type
text/plain
Expires
Fri, Feb 20, 3:07 PM (17 h, 35 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28900574
Default Alt Text
D21681.diff (14 KB)

Event Timeline