Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F135975959
D21822.id62647.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
14 KB
Referenced Files
None
Subscribers
None
D21822.id62647.diff
View Options
Index: sys/mips/include/atomic.h
===================================================================
--- sys/mips/include/atomic.h
+++ sys/mips/include/atomic.h
@@ -81,6 +81,11 @@
void atomic_add_16(__volatile uint16_t *, uint16_t);
void atomic_subtract_16(__volatile uint16_t *, uint16_t);
+static int atomic_cmpset_8(__volatile uint8_t *, uint8_t, uint8_t);
+static int atomic_fcmpset_8(__volatile uint8_t *, uint8_t *, uint8_t);
+static int atomic_cmpset_16(__volatile uint16_t *, uint16_t, uint16_t);
+static int atomic_fcmpset_16(__volatile uint16_t *, uint16_t *, uint16_t);
+
static __inline void
atomic_set_32(__volatile uint32_t *p, uint32_t v)
{
@@ -376,23 +381,6 @@
* zero if the compare failed, nonzero otherwise.
*/
static __inline int
-atomic_cmpset_acq_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
-{
- int retval;
-
- retval = atomic_cmpset_32(p, cmpval, newval);
- mips_sync();
- return (retval);
-}
-
-static __inline int
-atomic_cmpset_rel_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
-{
- mips_sync();
- return (atomic_cmpset_32(p, cmpval, newval));
-}
-
-static __inline int
atomic_fcmpset_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
{
int ret;
@@ -415,24 +403,59 @@
return ret;
}
-static __inline int
-atomic_fcmpset_acq_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
-{
- int retval;
-
- retval = atomic_fcmpset_32(p, cmpval, newval);
- mips_sync();
- return (retval);
+#define ATOMIC_CMPSET_ACQ_REL(WIDTH) \
+static __inline int \
+atomic_cmpset_acq_##WIDTH(__volatile uint##WIDTH##_t *p, \
+ uint##WIDTH##_t cmpval, uint##WIDTH##_t newval) \
+{ \
+ int retval; \
+ \
+ retval = atomic_cmpset_##WIDTH(p, cmpval, newval); \
+ mips_sync(); \
+ return (retval); \
+} \
+ \
+static __inline int \
+atomic_cmpset_rel_##WIDTH(__volatile uint##WIDTH##_t *p, \
+ uint##WIDTH##_t cmpval, uint##WIDTH##_t newval) \
+{ \
+ mips_sync(); \
+ return (atomic_cmpset_##WIDTH(p, cmpval, newval)); \
}
-static __inline int
-atomic_fcmpset_rel_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
-{
- mips_sync();
- return (atomic_fcmpset_32(p, cmpval, newval));
+#define ATOMIC_FCMPSET_ACQ_REL(WIDTH) \
+static __inline int \
+atomic_fcmpset_acq_##WIDTH(__volatile uint##WIDTH##_t *p, \
+ uint##WIDTH##_t *cmpval, uint##WIDTH##_t newval) \
+{ \
+ int retval; \
+ \
+ retval = atomic_fcmpset_##WIDTH(p, cmpval, newval); \
+ mips_sync(); \
+ return (retval); \
+} \
+ \
+static __inline int \
+atomic_fcmpset_rel_##WIDTH(__volatile uint##WIDTH##_t *p, \
+ uint##WIDTH##_t *cmpval, uint##WIDTH##_t newval) \
+{ \
+ mips_sync(); \
+ return (atomic_fcmpset_##WIDTH(p, cmpval, newval)); \
}
/*
+ * Atomically compare the value stored at *p with cmpval and if the
+ * two values are equal, update the value of *p with newval. Returns
+ * zero if the compare failed, nonzero otherwise.
+ */
+ATOMIC_CMPSET_ACQ_REL(8);
+ATOMIC_CMPSET_ACQ_REL(16);
+ATOMIC_CMPSET_ACQ_REL(32);
+ATOMIC_FCMPSET_ACQ_REL(8);
+ATOMIC_FCMPSET_ACQ_REL(16);
+ATOMIC_FCMPSET_ACQ_REL(32);
+
+/*
* Atomically add the value of v to the integer pointed to by p and return
* the previous value of *p.
*/
@@ -480,29 +503,7 @@
return ret;
}
-/*
- * Atomically compare the value stored at *p with cmpval and if the
- * two values are equal, update the value of *p with newval. Returns
- * zero if the compare failed, nonzero otherwise.
- */
static __inline int
-atomic_cmpset_acq_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
-{
- int retval;
-
- retval = atomic_cmpset_64(p, cmpval, newval);
- mips_sync();
- return (retval);
-}
-
-static __inline int
-atomic_cmpset_rel_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
-{
- mips_sync();
- return (atomic_cmpset_64(p, cmpval, newval));
-}
-
-static __inline int
atomic_fcmpset_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
{
int ret;
@@ -526,23 +527,14 @@
return ret;
}
-static __inline int
-atomic_fcmpset_acq_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
-{
- int retval;
+/*
+ * Atomically compare the value stored at *p with cmpval and if the
+ * two values are equal, update the value of *p with newval. Returns
+ * zero if the compare failed, nonzero otherwise.
+ */
+ATOMIC_CMPSET_ACQ_REL(64);
+ATOMIC_FCMPSET_ACQ_REL(64);
- retval = atomic_fcmpset_64(p, cmpval, newval);
- mips_sync();
- return (retval);
-}
-
-static __inline int
-atomic_fcmpset_rel_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
-{
- mips_sync();
- return (atomic_fcmpset_64(p, cmpval, newval));
-}
-
/*
* Atomically add the value of v to the integer pointed to by p and return
* the previous value of *p.
@@ -605,6 +597,12 @@
#define atomic_subtract_char atomic_subtract_8
#define atomic_subtract_acq_char atomic_subtract_acq_8
#define atomic_subtract_rel_char atomic_subtract_rel_8
+#define atomic_cmpset_char atomic_cmpset_8
+#define atomic_cmpset_acq_char atomic_cmpset_acq_8
+#define atomic_cmpset_rel_char atomic_cmpset_rel_8
+#define atomic_fcmpset_char atomic_fcmpset_8
+#define atomic_fcmpset_acq_char atomic_fcmpset_acq_8
+#define atomic_fcmpset_rel_char atomic_fcmpset_rel_8
/* Operations on shorts. */
#define atomic_set_short atomic_set_16
@@ -619,6 +617,12 @@
#define atomic_subtract_short atomic_subtract_16
#define atomic_subtract_acq_short atomic_subtract_acq_16
#define atomic_subtract_rel_short atomic_subtract_rel_16
+#define atomic_cmpset_short atomic_cmpset_16
+#define atomic_cmpset_acq_short atomic_cmpset_acq_16
+#define atomic_cmpset_rel_short atomic_cmpset_rel_16
+#define atomic_fcmpset_short atomic_fcmpset_16
+#define atomic_fcmpset_acq_short atomic_fcmpset_acq_16
+#define atomic_fcmpset_rel_short atomic_fcmpset_rel_16
/* Operations on ints. */
#define atomic_set_int atomic_set_32
@@ -821,5 +825,7 @@
}
#endif
#define atomic_swap_ptr(ptr, value) atomic_swap_long((unsigned long *)(ptr), value)
+
+#include <sys/_atomic_subword.h>
#endif /* ! _MACHINE_ATOMIC_H_ */
Index: sys/sparc64/include/atomic.h
===================================================================
--- sys/sparc64/include/atomic.h
+++ sys/sparc64/include/atomic.h
@@ -48,6 +48,11 @@
#define __ASI_ATOMIC ASI_P
#endif
+static int atomic_cmpset_8(__volatile uint8_t *, uint8_t, uint8_t);
+static int atomic_fcmpset_8(__volatile uint8_t *, uint8_t *, uint8_t);
+static int atomic_cmpset_16(__volatile uint16_t *, uint16_t, uint16_t);
+static int atomic_fcmpset_16(__volatile uint16_t *, uint16_t *, uint16_t);
+
/*
* Various simple arithmetic on memory which is atomic in the presence
* of interrupts and multiple processors. See atomic(9) for details.
@@ -356,6 +361,68 @@
ATOMIC_GEN(ptr, uintptr_t *, uintptr_t, uintptr_t, 64);
+#define ATOMIC_CMPSET_ACQ_REL(WIDTH) \
+static __inline int \
+atomic_cmpset_acq_##WIDTH(__volatile uint##WIDTH##_t *p, \
+ uint##WIDTH##_t cmpval, uint##WIDTH##_t newval) \
+{ \
+ int retval; \
+ \
+ retval = atomic_cmpset_##WIDTH(p, cmpval, newval); \
+ mb(); \
+ return (retval); \
+} \
+ \
+static __inline int \
+atomic_cmpset_rel_##WIDTH(__volatile uint##WIDTH##_t *p, \
+ uint##WIDTH##_t cmpval, uint##WIDTH##_t newval) \
+{ \
+ mb(); \
+ return (atomic_cmpset_##WIDTH(p, cmpval, newval)); \
+}
+
+#define ATOMIC_FCMPSET_ACQ_REL(WIDTH) \
+static __inline int \
+atomic_fcmpset_acq_##WIDTH(__volatile uint##WIDTH##_t *p, \
+ uint##WIDTH##_t *cmpval, uint##WIDTH##_t newval) \
+{ \
+ int retval; \
+ \
+ retval = atomic_fcmpset_##WIDTH(p, cmpval, newval); \
+ mb(); \
+ return (retval); \
+} \
+ \
+static __inline int \
+atomic_fcmpset_rel_##WIDTH(__volatile uint##WIDTH##_t *p, \
+ uint##WIDTH##_t *cmpval, uint##WIDTH##_t newval) \
+{ \
+ mb(); \
+ return (atomic_fcmpset_##WIDTH(p, cmpval, newval)); \
+}
+
+/*
+ * Atomically compare the value stored at *p with cmpval and if the
+ * two values are equal, update the value of *p with newval. Returns
+ * zero if the compare failed, nonzero otherwise.
+ */
+ATOMIC_CMPSET_ACQ_REL(8);
+ATOMIC_CMPSET_ACQ_REL(16);
+ATOMIC_FCMPSET_ACQ_REL(8);
+ATOMIC_FCMPSET_ACQ_REL(16);
+
+#define atomic_cmpset_char atomic_cmpset_8
+#define atomic_cmpset_acq_char atomic_cmpset_acq_8
+#define atomic_cmpset_rel_char atomic_cmpset_rel_8
+#define atomic_fcmpset_acq_char atomic_fcmpset_acq_8
+#define atomic_fcmpset_rel_char atomic_fcmpset_rel_8
+
+#define atomic_cmpset_short atomic_cmpset_16
+#define atomic_cmpset_acq_short atomic_cmpset_acq_16
+#define atomic_cmpset_rel_short atomic_cmpset_rel_16
+#define atomic_fcmpset_acq_short atomic_fcmpset_acq_16
+#define atomic_fcmpset_rel_short atomic_fcmpset_rel_16
+
#define atomic_fetchadd_int atomic_add_int
#define atomic_fetchadd_32 atomic_add_32
#define atomic_fetchadd_long atomic_add_long
@@ -373,5 +440,7 @@
#undef atomic_st
#undef atomic_st_acq
#undef atomic_st_rel
+
+#include <sys/_atomic_subword.h>
#endif /* !_MACHINE_ATOMIC_H_ */
Index: sys/sys/_atomic_subword.h
===================================================================
--- sys/sys/_atomic_subword.h
+++ sys/sys/_atomic_subword.h
@@ -0,0 +1,181 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Kyle Evans <kevans@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _SYS__ATOMIC_SUBWORD_H_
+#define _SYS__ATOMIC_SUBWORD__H_
+
+/*
+ * This header is specifically for platforms that either do not have ways to or
+ * simply do not do sub-word atomic operations. These are not ideal as they
+ * require a little more effort to make sure our atomic operations are failing
+ * because of the bits of the word we're trying to write rather than the rest
+ * of the word.
+ */
+#ifndef _MACHINE_ATOMIC_H_
+#error do not include this header, use machine/atomic.h
+#endif
+
+#include <machine/endian.h>
+
+#ifdef _KERNEL
+#include <sys/systm.h>
+#else
+#define KASSERT(exp, msg)
+#endif
+
+#define _ATOMIC_WORD_ALIGNED(p) \
+ (uint32_t *)((uintptr_t)p - ((uintptr_t)p % 4))
+
+#if _BYTE_ORDER == _BIG_ENDIAN
+#define _ATOMIC_BYTE_SHIFT(p) \
+ ((3 - ((uintptr_t)(p) % 4)) * 8)
+
+#define _ATOMIC_HWORD_SHIFT(p) \
+ ((2 - ((uintptr_t)(p) % 4)) * 8)
+#else
+#define _ATOMIC_BYTE_SHIFT(p) \
+ ((((uintptr_t)(p) % 4)) * 8)
+
+#define _ATOMIC_HWORD_SHIFT(p) \
+ ((((uintptr_t)(p) % 4)) * 8)
+#endif
+
+static __inline int atomic_fcmpset_32(__volatile uint32_t *p, uint32_t *cmpval,
+ uint32_t newval);
+
+/*
+ * Pass these bad boys a couple words and a mask of the bits you care about,
+ * they'll loop until we either succeed or fail because of those bits rather
+ * than the ones we're not masking. old and val should already be preshifted to
+ * the proper position.
+ */
+static int
+_atomic_cmpset_masked_word(uint32_t *addr, uint32_t old, uint32_t val,
+ uint32_t mask)
+{
+ int ret;
+ uint32_t wcomp;
+
+ /* This should already be masked, but play it safe. */
+ wcomp = old & mask;
+
+ /*
+ * We'll attempt the cmpset on the entire word. Loop here in case the
+ * operation fails due to the other half-word resident in that word,
+ * rather than the half-word we're trying to operate on. Ideally we
+ * only take one trip through here. We'll have to recalculate the old
+ * value since it's the other part of the word changing.
+ */
+ do {
+ old = (*addr & ~mask) | wcomp;
+ ret = atomic_fcmpset_32(addr, &old, (old & ~mask) | val);
+ } while (ret == 0 && (old & mask) == wcomp);
+
+ return (ret);
+}
+
+static int
+_atomic_fcmpset_masked_word(uint32_t *addr, uint32_t *old, uint32_t val,
+ uint32_t mask)
+{
+ int ret;
+ uint32_t wcomp;
+
+ wcomp = *old & mask;
+
+ /* Same deal as with _atomic_cmpset_masked_word; see above. */
+ do {
+ *old = (*addr & ~mask) | wcomp;
+ ret = atomic_fcmpset_32(addr, old, (*old & ~mask) | val);
+ } while (ret == 0 && (*old & mask) == wcomp);
+
+ return (ret);
+}
+
+static int
+atomic_cmpset_8(__volatile uint8_t *addr, uint8_t old, uint8_t val)
+{
+ int shift;
+
+ shift = _ATOMIC_BYTE_SHIFT(addr);
+
+ return (_atomic_cmpset_masked_word(_ATOMIC_WORD_ALIGNED(addr),
+ old << shift, val << shift, 0xff << shift));
+}
+
+static int
+atomic_fcmpset_8(__volatile uint8_t *addr, uint8_t *old, uint8_t val)
+{
+ int ret, shift;
+ uint32_t wold;
+
+ shift = _ATOMIC_BYTE_SHIFT(addr);
+ wold = *old << shift;
+ ret = _atomic_fcmpset_masked_word(_ATOMIC_WORD_ALIGNED(addr),
+ &wold, val << shift, 0xff << shift);
+ if (ret == 0)
+ *old = (wold >> shift) & 0xff;
+ return (ret);
+}
+
+static int
+atomic_cmpset_16(__volatile uint16_t *addr, uint16_t old, uint16_t val)
+{
+ int shift;
+
+ KASSERT(((uintptr_t)addr % 2) == 0, ("%s: misaligned address"));
+ shift = _ATOMIC_HWORD_SHIFT(addr);
+
+ return (_atomic_cmpset_masked_word(_ATOMIC_WORD_ALIGNED(addr),
+ old << shift, val << shift, 0xffff << shift));
+}
+
+static int
+atomic_fcmpset_16(__volatile uint16_t *addr, uint16_t *old, uint16_t val)
+{
+ int ret, shift;
+ uint32_t wold;
+
+ KASSERT(((uintptr_t)addr % 2) == 0, ("%s: misaligned address"));
+ shift = _ATOMIC_BYTE_SHIFT(addr);
+ wold = *old << shift;
+ ret = _atomic_fcmpset_masked_word(_ATOMIC_WORD_ALIGNED(addr),
+ &wold, val << shift, 0xffff << shift);
+ if (ret == 0)
+ *old = (wold >> shift) & 0xffff;
+ return (ret);
+}
+
+#undef _ATOMIC_WORD_ALIGNED
+#undef _ATOMIC_BYTE_SHIFT
+#undef _ATOMIC_HWORD_SHIFT
+#ifndef _KERNEL
+#undef KASSERT
+#endif
+
+#endif /* _SYS__ATOMIC_SUBWORD_H_ */
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sat, Nov 15, 5:59 PM (13 h, 2 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
25335075
Default Alt Text
D21822.id62647.diff (14 KB)
Attached To
Mode
D21822: Provide generic sub-word atomic *cmpset
Attached
Detach File
Event Timeline
Log In to Comment