Changeset View
Changeset View
Standalone View
Standalone View
sys/arm64/include/xen/synch_bitops.h
- This file was added.
/* | |||||
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD | |||||
* | |||||
* Copyright (c) 2014 Julien Grall | |||||
* | |||||
* Redistribution and use in source and binary forms, with or without | |||||
* modification, are permitted provided that the following conditions | |||||
* are met: | |||||
* 1. Redistributions of source code must retain the above copyright | |||||
* notice, this list of conditions and the following disclaimer. | |||||
* 2. Redistributions in binary form must reproduce the above copyright | |||||
* notice, this list of conditions and the following disclaimer in the | |||||
* documentation and/or other materials provided with the distribution. | |||||
* | |||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND | |||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | |||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |||||
* SUCH DAMAGE. | |||||
* | |||||
* $FreeBSD$ | |||||
*/ | |||||
#ifndef __MACHINE_XEN_SYNCH_BITOPS_H__ | |||||
#define __MACHINE_XEN_SYNCH_BITOPS_H__ | |||||
#define synch_cmpxchg(ptr, old, new) \ | |||||
((__typeof__(*(ptr)))__synch_cmpxchg((ptr), \ | |||||
(unsigned long)(old), \ | |||||
(unsigned long)(new), \ | |||||
sizeof(*(ptr)))) | |||||
static inline uint8_t | |||||
__synch_cmpxchg_8(volatile uint8_t *ptr, uint8_t old, uint8_t new) | |||||
{ | |||||
uint8_t prev = *ptr; | |||||
/* atomic_cmpset_acq_* returns 0 on failure */ | |||||
return (atomic_cmpset_acq_8(ptr, old, new) ? prev : new); | |||||
} | |||||
static inline uint16_t | |||||
__synch_cmpxchg_16(volatile uint16_t *ptr, uint16_t old, uint16_t new) | |||||
{ | |||||
uint16_t prev = *ptr; | |||||
/* atomic_cmpset_acq_* returns 0 on failure */ | |||||
return (atomic_cmpset_acq_16(ptr, old, new) ? prev : new); | |||||
} | |||||
static inline uint32_t | |||||
__synch_cmpxchg_32(volatile uint32_t *ptr, uint32_t old, uint32_t new) | |||||
{ | |||||
uint32_t prev = *ptr; | |||||
/* atomic_cmpset_acq_* returns 0 on failure */ | |||||
return (atomic_cmpset_acq_32(ptr, old, new) ? prev : new); | |||||
} | |||||
static inline uint64_t | |||||
__synch_cmpxchg_64(volatile uint64_t *ptr, uint64_t old, uint64_t new) | |||||
{ | |||||
uint64_t prev = *ptr; | |||||
/* atomic_cmpset_acq_* returns 0 on failure */ | |||||
return (atomic_cmpset_acq_64(ptr, old, new) ? prev : new); | |||||
} | |||||
/* Use to get a linker error if the size is not supported by __synch_cmpxchg */ | |||||
void | |||||
__synch_bad_size(void); | |||||
static inline unsigned long | |||||
__synch_cmpxchg(volatile void *ptr, unsigned long old, | |||||
unsigned long new, int size) | |||||
{ | |||||
switch (size) { | |||||
case 1: | |||||
return __synch_cmpxchg_8(ptr, old, new); | |||||
case 2: | |||||
return __synch_cmpxchg_16(ptr, old, new); | |||||
case 4: | |||||
return __synch_cmpxchg_32(ptr, old, new); | |||||
case 8: | |||||
return __synch_cmpxchg_64(ptr, old, new); | |||||
default: | |||||
__synch_bad_size(); | |||||
return old; | |||||
} | |||||
} | |||||
/* | |||||
* Based on ofed/include/linux/bitops.h | |||||
*/ | |||||
#define NBPL (NBBY * sizeof(long)) | |||||
static inline bool | |||||
synch_test_bit(u_int bit, volatile long *addr) | |||||
{ | |||||
unsigned long mask = 1UL << (bit % NBPL); | |||||
return !!(atomic_load_acq_long(&addr[bit / NBPL]) & mask); | |||||
} | |||||
static inline void | |||||
synch_set_bit(u_int bit, volatile long *addr) | |||||
{ | |||||
atomic_set_long(&addr[bit / NBPL], 1UL << (bit % NBPL)); | |||||
} | |||||
static inline void | |||||
synch_clear_bit(u_int bit, volatile long *addr) | |||||
{ | |||||
atomic_clear_long(&addr[bit / NBPL], 1UL << (bit % NBPL)); | |||||
} | |||||
static inline bool | |||||
synch_test_and_clear_bit(u_int bit, long *addr) | |||||
{ | |||||
long val; | |||||
addr += bit / NBPL; | |||||
bit %= NBPL; | |||||
bit = (1UL << bit); | |||||
do { | |||||
val = *(volatile long *)addr; | |||||
} while (atomic_cmpset_long(addr, val, val & ~bit) == 0); | |||||
return !!(val & bit); | |||||
} | |||||
static inline bool | |||||
synch_test_and_set_bit(u_int bit, long *addr) | |||||
{ | |||||
long val; | |||||
addr += bit / NBPL; | |||||
bit %= NBPL; | |||||
bit = (1UL << bit); | |||||
do { | |||||
val = *(volatile long *)addr; | |||||
} while (atomic_cmpset_long(addr, val, val | bit) == 0); | |||||
return !!(val & bit); | |||||
} | |||||
#undef NBPL | |||||
#endif /* __MACHINE_XEN_SYNCH_BITOPS_H__ */ | |||||