diff --git a/lib/libthr/thread/thr_private.h b/lib/libthr/thread/thr_private.h index 9b306a0cba35..711ae5e0ce38 100644 --- a/lib/libthr/thread/thr_private.h +++ b/lib/libthr/thread/thr_private.h @@ -1,738 +1,733 @@ /* * Copyright (C) 2005 Daniel M. Eischen * Copyright (c) 2005 David Xu * Copyright (c) 1995-1998 John Birrell . * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _THR_PRIVATE_H #define _THR_PRIVATE_H /* * Include files. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef __hidden #define __hidden __attribute__((visibility("hidden"))) #endif #include "pthread_md.h" #include "thr_umtx.h" #include "thread_db.h" typedef TAILQ_HEAD(pthreadlist, pthread) pthreadlist; typedef TAILQ_HEAD(atfork_head, pthread_atfork) atfork_head; TAILQ_HEAD(mutex_queue, pthread_mutex); /* Signal to do cancellation */ #define SIGCANCEL 32 /* * Kernel fatal error handler macro. */ #define PANIC(string) _thread_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args) #define stderr_debug(args...) _thread_printf(STDERR_FILENO, ##args) #ifdef _PTHREADS_INVARIANTS #define THR_ASSERT(cond, msg) do { \ if (__predict_false(!(cond))) \ PANIC(msg); \ } while (0) #else #define THR_ASSERT(cond, msg) #endif #ifdef PIC # define STATIC_LIB_REQUIRE(name) #else # define STATIC_LIB_REQUIRE(name) __asm (".globl " #name) #endif #define TIMESPEC_ADD(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \ if ((dst)->tv_nsec >= 1000000000) { \ (dst)->tv_sec++; \ (dst)->tv_nsec -= 1000000000; \ } \ } while (0) #define TIMESPEC_SUB(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ if ((dst)->tv_nsec < 0) { \ (dst)->tv_sec--; \ (dst)->tv_nsec += 1000000000; \ } \ } while (0) struct pthread_mutex { /* * Lock for accesses to this structure. */ struct umutex m_lock; enum pthread_mutextype m_type; struct pthread *m_owner; int m_flags; int m_count; int m_refcount; int m_spinloops; int m_yieldloops; /* * Link for all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; }; /* * Flags for mutexes. */ #define MUTEX_FLAGS_PRIVATE 0x01 #define MUTEX_FLAGS_INITED 0x02 #define MUTEX_FLAGS_BUSY 0x04 struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; int m_flags; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } struct pthread_cond { struct umutex c_lock; struct ucond c_kerncv; int c_pshared; int c_clockid; }; struct pthread_cond_attr { int c_pshared; int c_clockid; }; struct pthread_barrier { struct umutex b_lock; struct ucond b_cv; volatile int64_t b_cycle; volatile int b_count; volatile int b_waiters; }; struct pthread_barrierattr { int pshared; }; struct pthread_spinlock { struct umutex s_lock; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *next; void (*routine)(void *args); void *routine_arg; int onstack; }; #define THR_CLEANUP_PUSH(td, func, arg) { \ struct pthread_cleanup __cup; \ \ __cup.routine = func; \ __cup.routine_arg = arg; \ __cup.onstack = 1; \ __cup.next = (td)->cleanup; \ (td)->cleanup = &__cup; #define THR_CLEANUP_POP(td, exec) \ (td)->cleanup = __cup.next; \ if ((exec) != 0) \ __cup.routine(__cup.routine_arg); \ } struct pthread_atfork { TAILQ_ENTRY(pthread_atfork) qe; void (*prepare)(void); void (*parent)(void); void (*child)(void); }; struct pthread_attr { int sched_policy; int sched_inherit; int prio; int suspend; #define THR_STACK_USER 0x100 /* 0xFF reserved for */ int flags; void *stackaddr_attr; size_t stacksize_attr; size_t guardsize_attr; cpuset_t *cpuset; size_t cpusetsize; }; /* * Thread creation state attributes. */ #define THR_CREATE_RUNNING 0 #define THR_CREATE_SUSPENDED 1 /* * Miscellaneous definitions. */ #define THR_STACK_DEFAULT (sizeof(void *) / 4 * 1024 * 1024) /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define THR_STACK_INITIAL (THR_STACK_DEFAULT * 2) /* * Define priorities returned by kernel. */ #define THR_MIN_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_min) #define THR_MAX_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_max) #define THR_DEF_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_default) #define THR_MIN_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_min) #define THR_MAX_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_max) #define THR_DEF_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_default) /* XXX The SCHED_FIFO should have same priority range as SCHED_RR */ #define THR_MIN_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO_1].pri_min) #define THR_MAX_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO-1].pri_max) #define THR_DEF_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO-1].pri_default) struct pthread_prio { int pri_min; int pri_max; int pri_default; }; struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { - pthread_mutex_t lock; /* monitor lock */ - pthread_cond_t read_signal; - pthread_cond_t write_signal; - volatile int32_t state; - int blocked_writers; - int blocked_readers; + struct urwlock lock; struct pthread *owner; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_DEAD }; struct pthread_specific_elem { const void *data; int seqno; }; struct pthread_key { volatile int allocated; int seqno; void (*destructor)(void *); }; /* * lwpid_t is 32bit but kernel thr API exports tid as long type * in very earily date. */ #define TID(thread) ((uint32_t) ((thread)->tid)) /* * Thread structure. */ struct pthread { /* Kernel thread id. */ long tid; #define TID_TERMINATED 1 /* * Lock for accesses to this thread structure. */ struct umutex lock; /* Internal condition variable cycle number. */ long cycle; /* How many low level locks the thread held. */ int locklevel; /* * Set to non-zero when this thread has entered a critical * region. We allow for recursive entries into critical regions. */ int critical_count; /* Signal blocked counter. */ int sigblock; /* Queue entry for list of all threads. */ TAILQ_ENTRY(pthread) tle; /* link for all threads in process */ /* Queue entry for GC lists. */ TAILQ_ENTRY(pthread) gcle; /* Hash queue entry. */ LIST_ENTRY(pthread) hle; /* Threads reference count. */ int refcount; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; struct pthread_attr attr; #define SHOULD_CANCEL(thr) \ ((thr)->cancel_pending && \ ((thr)->cancel_point || (thr)->cancel_async) && \ (thr)->cancel_enable && (thr)->cancelling == 0) /* Cancellation is enabled */ int cancel_enable; /* Cancellation request is pending */ int cancel_pending; /* Thread is at cancellation point */ int cancel_point; /* Cancellation should be synchoronized */ int cancel_defer; /* Asynchronouse cancellation is enabled */ int cancel_async; /* Cancellation is in progress */ int cancelling; /* Thread temporary signal mask. */ sigset_t sigmask; /* Thread is in SIGCANCEL handler. */ int in_sigcancel_handler; /* New thread should unblock SIGCANCEL. */ int unblock_sigcancel; /* Force new thread to exit. */ int force_exit; /* Thread state: */ enum pthread_state state; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* * The joiner is the thread that is joining to this thread. The * join status keeps track of a join operation to another thread. */ struct pthread *joiner; /* Miscellaneous flags; only set with scheduling lock held. */ int flags; #define THR_FLAGS_PRIVATE 0x0001 #define THR_FLAGS_NEED_SUSPEND 0x0002 /* thread should be suspended */ #define THR_FLAGS_SUSPENDED 0x0004 /* thread is suspended */ /* Thread list flags; only set with thread list lock held. */ int tlflags; #define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */ #define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */ #define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */ #define TLFLAGS_DETACHED 0x0008 /* thread is detached */ /* Queue of currently owned NORMAL or PRIO_INHERIT type mutexes. */ struct mutex_queue mutexq; /* Queue of all owned PRIO_PROTECT mutexes. */ struct mutex_queue pp_mutexq; void *ret; struct pthread_specific_elem *specific; int specific_data_count; /* Number rwlocks rdlocks held. */ int rdlock_count; /* * Current locks bitmap for rtld. */ int rtld_bits; /* Thread control block */ struct tcb *tcb; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define THR_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; /* Enable event reporting */ int report_events; /* Event mask */ int event_mask; /* Event */ td_event_msg_t event_buf; }; #define THR_IN_CRITICAL(thrd) \ (((thrd)->locklevel > 0) || \ ((thrd)->critical_count > 0)) #define THR_CRITICAL_ENTER(thrd) \ (thrd)->critical_count++ #define THR_CRITICAL_LEAVE(thrd) \ (thrd)->critical_count--; \ _thr_ast(thrd); #define THR_UMUTEX_TRYLOCK(thrd, lck) \ _thr_umutex_trylock((lck), TID(thrd)) #define THR_UMUTEX_LOCK(thrd, lck) \ _thr_umutex_lock((lck), TID(thrd)) #define THR_UMUTEX_TIMEDLOCK(thrd, lck, timo) \ _thr_umutex_timedlock((lck), TID(thrd), (timo)) #define THR_UMUTEX_UNLOCK(thrd, lck) \ _thr_umutex_unlock((lck), TID(thrd)) #define THR_LOCK_ACQUIRE(thrd, lck) \ do { \ (thrd)->locklevel++; \ _thr_umutex_lock(lck, TID(thrd)); \ } while (0) #ifdef _PTHREADS_INVARIANTS #define THR_ASSERT_LOCKLEVEL(thrd) \ do { \ if (__predict_false((thrd)->locklevel <= 0)) \ _thr_assert_lock_level(); \ } while (0) #else #define THR_ASSERT_LOCKLEVEL(thrd) #endif #define THR_LOCK_RELEASE(thrd, lck) \ do { \ THR_ASSERT_LOCKLEVEL(thrd); \ _thr_umutex_unlock((lck), TID(thrd)); \ (thrd)->locklevel--; \ _thr_ast(thrd); \ } while (0) #define THR_LOCK(curthrd) THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock) #define THR_UNLOCK(curthrd) THR_LOCK_RELEASE(curthrd, &(curthrd)->lock) #define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock) #define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock) #define THREAD_LIST_LOCK(curthrd) \ do { \ THR_LOCK_ACQUIRE((curthrd), &_thr_list_lock); \ } while (0) #define THREAD_LIST_UNLOCK(curthrd) \ do { \ THR_LOCK_RELEASE((curthrd), &_thr_list_lock); \ } while (0) /* * Macros to insert/remove threads to the all thread list and * the gc list. */ #define THR_LIST_ADD(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \ TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \ _thr_hash_add(thrd); \ (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \ } \ } while (0) #define THR_LIST_REMOVE(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \ TAILQ_REMOVE(&_thread_list, thrd, tle); \ _thr_hash_remove(thrd); \ (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \ } \ } while (0) #define THR_GCLIST_ADD(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \ TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\ (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \ _gc_count++; \ } \ } while (0) #define THR_GCLIST_REMOVE(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \ TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \ (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \ _gc_count--; \ } \ } while (0) #define GC_NEEDED() (_gc_count >= 5) #define SHOULD_REPORT_EVENT(curthr, e) \ (curthr->report_events && \ (((curthr)->event_mask | _thread_event_mask ) & e) != 0) extern int __isthreaded; /* * Global variables for the pthread kernel. */ extern char *_usrstack __hidden; extern struct pthread *_thr_initial __hidden; /* For debugger */ extern int _libthr_debug; extern int _thread_event_mask; extern struct pthread *_thread_last_event; /* List of all threads: */ extern pthreadlist _thread_list; /* List of threads needing GC: */ extern pthreadlist _thread_gc_list __hidden; extern int _thread_active_threads; extern atfork_head _thr_atfork_list __hidden; extern struct umutex _thr_atfork_lock __hidden; /* Default thread attributes: */ extern struct pthread_attr _pthread_attr_default __hidden; /* Default mutex attributes: */ extern struct pthread_mutex_attr _pthread_mutexattr_default __hidden; /* Default condition variable attributes: */ extern struct pthread_cond_attr _pthread_condattr_default __hidden; extern struct pthread_prio _thr_priorities[] __hidden; extern pid_t _thr_pid __hidden; extern int _thr_is_smp __hidden; extern size_t _thr_guard_default __hidden; extern size_t _thr_stack_default __hidden; extern size_t _thr_stack_initial __hidden; extern int _thr_page_size __hidden; extern int _thr_spinloops __hidden; extern int _thr_yieldloops __hidden; /* Garbage thread count. */ extern int _gc_count __hidden; extern struct umutex _mutex_static_lock __hidden; extern struct umutex _cond_static_lock __hidden; extern struct umutex _rwlock_static_lock __hidden; extern struct umutex _keytable_lock __hidden; extern struct umutex _thr_list_lock __hidden; extern struct umutex _thr_event_lock __hidden; /* * Function prototype definitions. */ __BEGIN_DECLS int _thr_setthreaded(int) __hidden; int _mutex_cv_lock(pthread_mutex_t *, int count) __hidden; int _mutex_cv_unlock(pthread_mutex_t *, int *count) __hidden; int _mutex_reinit(pthread_mutex_t *) __hidden; void _mutex_fork(struct pthread *curthread) __hidden; void _mutex_unlock_private(struct pthread *) __hidden; void _libpthread_init(struct pthread *) __hidden; struct pthread *_thr_alloc(struct pthread *) __hidden; void _thread_exit(const char *, int, const char *) __hidden __dead2; void _thr_exit_cleanup(void) __hidden; int _thr_ref_add(struct pthread *, struct pthread *, int) __hidden; void _thr_ref_delete(struct pthread *, struct pthread *) __hidden; void _thr_ref_delete_unlocked(struct pthread *, struct pthread *) __hidden; int _thr_find_thread(struct pthread *, struct pthread *, int) __hidden; void _thr_rtld_init(void) __hidden; void _thr_rtld_fini(void) __hidden; int _thr_stack_alloc(struct pthread_attr *) __hidden; void _thr_stack_free(struct pthread_attr *) __hidden; void _thr_free(struct pthread *, struct pthread *) __hidden; void _thr_gc(struct pthread *) __hidden; void _thread_cleanupspecific(void) __hidden; void _thread_dump_info(void) __hidden; void _thread_printf(int, const char *, ...) __hidden; void _thr_spinlock_init(void) __hidden; void _thr_cancel_enter(struct pthread *) __hidden; void _thr_cancel_leave(struct pthread *) __hidden; void _thr_cancel_enter_defer(struct pthread *) __hidden; void _thr_cancel_leave_defer(struct pthread *, int) __hidden; void _thr_testcancel(struct pthread *) __hidden; void _thr_signal_block(struct pthread *) __hidden; void _thr_signal_unblock(struct pthread *) __hidden; void _thr_signal_init(void) __hidden; void _thr_signal_deinit(void) __hidden; int _thr_send_sig(struct pthread *, int sig) __hidden; void _thr_list_init(void) __hidden; void _thr_hash_add(struct pthread *) __hidden; void _thr_hash_remove(struct pthread *) __hidden; struct pthread *_thr_hash_find(struct pthread *) __hidden; void _thr_link(struct pthread *, struct pthread *) __hidden; void _thr_unlink(struct pthread *, struct pthread *) __hidden; void _thr_suspend_check(struct pthread *) __hidden; void _thr_assert_lock_level(void) __hidden __dead2; void _thr_ast(struct pthread *) __hidden; void _thr_once_init(void) __hidden; void _thr_report_creation(struct pthread *curthread, struct pthread *newthread) __hidden; void _thr_report_death(struct pthread *curthread) __hidden; int _thr_getscheduler(lwpid_t, int *, struct sched_param *) __hidden; int _thr_setscheduler(lwpid_t, int, const struct sched_param *) __hidden; int _rtp_to_schedparam(const struct rtprio *rtp, int *policy, struct sched_param *param) __hidden; int _schedparam_to_rtp(int policy, const struct sched_param *param, struct rtprio *rtp) __hidden; void _thread_bp_create(void); void _thread_bp_death(void); int _sched_yield(void); int _pthread_getaffinity_np(pthread_t, size_t, cpuset_t *); int _pthread_setaffinity_np(pthread_t, size_t, const cpuset_t *); int _pthread_cond_wait_unlocked(pthread_cond_t *, pthread_mutex_t *, const struct timespec *) __hidden; int _pthread_cond_broadcast_unlock(pthread_cond_t *, pthread_mutex_t *, int broadcast); /* #include */ #ifdef _SYS_FCNTL_H_ int __sys_fcntl(int, int, ...); int __sys_open(const char *, int, ...); #endif /* #include */ #ifdef _SIGNAL_H_ int __sys_kill(pid_t, int); int __sys_sigaction(int, const struct sigaction *, struct sigaction *); int __sys_sigpending(sigset_t *); int __sys_sigprocmask(int, const sigset_t *, sigset_t *); int __sys_sigsuspend(const sigset_t *); int __sys_sigreturn(ucontext_t *); int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); int __sys_sigwait(const sigset_t *, int *); int __sys_sigtimedwait(const sigset_t *, siginfo_t *, const struct timespec *); int __sys_sigwaitinfo(const sigset_t *set, siginfo_t *info); #endif /* #include */ #ifdef _TIME_H_ int __sys_nanosleep(const struct timespec *, struct timespec *); #endif /* #include */ #ifdef _UNISTD_H_ int __sys_close(int); int __sys_fork(void); pid_t __sys_getpid(void); ssize_t __sys_read(int, void *, size_t); ssize_t __sys_write(int, const void *, size_t); void __sys_exit(int); #endif static inline int _thr_isthreaded(void) { return (__isthreaded != 0); } static inline int _thr_is_inited(void) { return (_thr_initial != NULL); } static inline void _thr_check_init(void) { if (_thr_initial == NULL) _libpthread_init(NULL); } __END_DECLS #endif /* !_THR_PRIVATE_H */ diff --git a/lib/libthr/thread/thr_rwlock.c b/lib/libthr/thread/thr_rwlock.c index d52454f1f7ec..2ed8af87c802 100644 --- a/lib/libthr/thread/thr_rwlock.c +++ b/lib/libthr/thread/thr_rwlock.c @@ -1,503 +1,379 @@ /*- * Copyright (c) 1998 Alex Nash * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include "namespace.h" #include +#include #include "un-namespace.h" #include "thr_private.h" -#define RWLOCK_WRITE_OWNER 0x80000000U -#define RWLOCK_WRITE_WAITERS 0x40000000U -#define RWLOCK_READ_WAITERS 0x20000000U -#define RWLOCK_MAX_READERS 0x1fffffffU -#define RWLOCK_READER_COUNT(c) ((c) & RWLOCK_MAX_READERS) - __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy); __weak_reference(_pthread_rwlock_init, pthread_rwlock_init); __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock); __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock); __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock); __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock); __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock); __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock); __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock); /* * Prototypes */ static int rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused) { pthread_rwlock_t prwlock; - int ret; - - /* allocate rwlock object */ - prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock)); + prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock)); if (prwlock == NULL) return (ENOMEM); - - /* initialize the lock */ - if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0) - free(prwlock); - else { - /* initialize the read condition signal */ - ret = _pthread_cond_init(&prwlock->read_signal, NULL); - - if (ret != 0) { - _pthread_mutex_destroy(&prwlock->lock); - free(prwlock); - } else { - /* initialize the write condition signal */ - ret = _pthread_cond_init(&prwlock->write_signal, NULL); - - if (ret != 0) { - _pthread_cond_destroy(&prwlock->read_signal); - _pthread_mutex_destroy(&prwlock->lock); - free(prwlock); - } else { - /* success */ - prwlock->state = 0; - prwlock->blocked_readers = 0; - prwlock->blocked_writers = 0; - prwlock->owner = NULL; - *rwlock = prwlock; - } - } - } - - return (ret); + *rwlock = prwlock; + return (0); } int _pthread_rwlock_destroy (pthread_rwlock_t *rwlock) { int ret; if (rwlock == NULL) ret = EINVAL; else { pthread_rwlock_t prwlock; prwlock = *rwlock; *rwlock = NULL; - _pthread_mutex_destroy(&prwlock->lock); - _pthread_cond_destroy(&prwlock->read_signal); - _pthread_cond_destroy(&prwlock->write_signal); free(prwlock); - ret = 0; } return (ret); } static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock) { int ret; THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock); if (*rwlock == NULL) ret = rwlock_init(rwlock, NULL); else ret = 0; THR_LOCK_RELEASE(thread, &_rwlock_static_lock); return (ret); } int _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) { *rwlock = NULL; return (rwlock_init(rwlock, attr)); } -static inline int -rwlock_tryrdlock(struct pthread_rwlock *prwlock, int prefer_reader) -{ - int32_t state; - int32_t wrflags; - - if (prefer_reader) - wrflags = RWLOCK_WRITE_OWNER; - else - wrflags = RWLOCK_WRITE_OWNER | RWLOCK_WRITE_WAITERS; - state = prwlock->state; - while (!(state & wrflags)) { - if (__predict_false(RWLOCK_READER_COUNT(state) == RWLOCK_MAX_READERS)) - return (EAGAIN); - if (atomic_cmpset_acq_32(&prwlock->state, state, state + 1)) - return (0); - CPU_SPINWAIT; - state = prwlock->state; - } - - return (EBUSY); -} - static int rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); - const int prefer_read = curthread->rdlock_count > 0; pthread_rwlock_t prwlock; - int ret, wrflags, old; - int32_t state; + struct timespec ts, ts2, *tsp; + int flags; + int ret; if (__predict_false(rwlock == NULL)) return (EINVAL); prwlock = *rwlock; /* check for static initialization */ if (__predict_false(prwlock == NULL)) { if ((ret = init_static(curthread, rwlock)) != 0) return (ret); prwlock = *rwlock; } - /* - * POSIX said the validity of the abstimeout parameter need - * not be checked if the lock can be immediately acquired. - */ - ret = rwlock_tryrdlock(prwlock, prefer_read); - if (ret == 0) { - curthread->rdlock_count++; - return (ret); - } - if (__predict_false(ret == EAGAIN)) - return (ret); - - if (__predict_false(abstime && - (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0))) - return (EINVAL); - - if (prefer_read) { + if (curthread->rdlock_count) { /* * To avoid having to track all the rdlocks held by * a thread or all of the threads that hold a rdlock, * we keep a simple count of all the rdlocks held by * a thread. If a thread holds any rdlocks it is * possible that it is attempting to take a recursive * rdlock. If there are blocked writers and precedence * is given to them, then that would result in the thread * deadlocking. So allowing a thread to take the rdlock * when it already has one or more rdlocks avoids the * deadlock. I hope the reader can follow that logic ;-) */ + flags = URWLOCK_PREFER_READER; + } else { + flags = 0; + } - wrflags = RWLOCK_WRITE_OWNER; - } else - wrflags = RWLOCK_WRITE_OWNER | RWLOCK_WRITE_WAITERS; + /* + * POSIX said the validity of the abstimeout parameter need + * not be checked if the lock can be immediately acquired. + */ + ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags); + if (ret == 0) { + curthread->rdlock_count++; + return (ret); + } - /* reset to zero */ - ret = 0; - for (;;) { - _pthread_mutex_lock(&prwlock->lock); - state = prwlock->state; - /* set read contention bit */ - while ((state & wrflags) && !(state & RWLOCK_READ_WAITERS)) { - if (atomic_cmpset_acq_32(&prwlock->state, state, state | RWLOCK_READ_WAITERS)) - break; - CPU_SPINWAIT; - state = prwlock->state; - } - - atomic_add_32(&prwlock->blocked_readers, 1); - if (state & wrflags) { - ret = _pthread_cond_wait_unlocked(&prwlock->read_signal, &prwlock->lock, abstime); - old = atomic_fetchadd_32(&prwlock->blocked_readers, -1); - if (old == 1) - _pthread_mutex_lock(&prwlock->lock); - else - goto try_it; - } else { - atomic_subtract_32(&prwlock->blocked_readers, 1); - } + if (__predict_false(abstime && + (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0))) + return (EINVAL); - if (prwlock->blocked_readers == 0) - atomic_clear_32(&prwlock->state, RWLOCK_READ_WAITERS); - _pthread_mutex_unlock(&prwlock->lock); + for (;;) { + if (abstime) { + clock_gettime(CLOCK_REALTIME, &ts); + TIMESPEC_SUB(&ts2, abstime, &ts); + if (ts2.tv_sec < 0 || + (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) + return (ETIMEDOUT); + tsp = &ts2; + } else + tsp = NULL; + + /* goto kernel and lock it */ + ret = __thr_rwlock_rdlock(&prwlock->lock, flags, tsp); + if (ret != EINTR) + break; -try_it: - /* try to lock it again. */ - if (rwlock_tryrdlock(prwlock, prefer_read) == 0) { - curthread->rdlock_count++; + /* if interrupted, try to lock it in userland again. */ + if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) { ret = 0; + curthread->rdlock_count++; break; } - - if (ret) - break; } return (ret); } int _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) { return (rwlock_rdlock_common(rwlock, NULL)); } int _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock, const struct timespec *abstime) { return (rwlock_rdlock_common(rwlock, abstime)); } int _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) { struct pthread *curthread = _get_curthread(); pthread_rwlock_t prwlock; + int flags; int ret; if (__predict_false(rwlock == NULL)) return (EINVAL); prwlock = *rwlock; /* check for static initialization */ if (__predict_false(prwlock == NULL)) { if ((ret = init_static(curthread, rwlock)) != 0) return (ret); prwlock = *rwlock; } - ret = rwlock_tryrdlock(prwlock, curthread->rdlock_count > 0); + if (curthread->rdlock_count) { + /* + * To avoid having to track all the rdlocks held by + * a thread or all of the threads that hold a rdlock, + * we keep a simple count of all the rdlocks held by + * a thread. If a thread holds any rdlocks it is + * possible that it is attempting to take a recursive + * rdlock. If there are blocked writers and precedence + * is given to them, then that would result in the thread + * deadlocking. So allowing a thread to take the rdlock + * when it already has one or more rdlocks avoids the + * deadlock. I hope the reader can follow that logic ;-) + */ + flags = URWLOCK_PREFER_READER; + } else { + flags = 0; + } + + ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags); if (ret == 0) curthread->rdlock_count++; return (ret); } -static inline int -rwlock_trywrlock(struct pthread_rwlock *prwlock) -{ - int32_t state; - - state = prwlock->state; - while (!(state & RWLOCK_WRITE_OWNER) && RWLOCK_READER_COUNT(state) == 0) { - if (atomic_cmpset_acq_32(&prwlock->state, state, state | RWLOCK_WRITE_OWNER)) - return (0); - CPU_SPINWAIT; - state = prwlock->state; - } - return (EBUSY); -} - int _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) { struct pthread *curthread = _get_curthread(); pthread_rwlock_t prwlock; int ret; if (__predict_false(rwlock == NULL)) return (EINVAL); prwlock = *rwlock; /* check for static initialization */ if (__predict_false(prwlock == NULL)) { if ((ret = init_static(curthread, rwlock)) != 0) return (ret); prwlock = *rwlock; } - ret = rwlock_trywrlock(prwlock); + ret = _thr_rwlock_trywrlock(&prwlock->lock); if (ret == 0) prwlock->owner = curthread; return (ret); } static int rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); pthread_rwlock_t prwlock; + struct timespec ts, ts2, *tsp; int ret; - int32_t state; if (__predict_false(rwlock == NULL)) return (EINVAL); prwlock = *rwlock; /* check for static initialization */ if (__predict_false(prwlock == NULL)) { if ((ret = init_static(curthread, rwlock)) != 0) return (ret); prwlock = *rwlock; } /* * POSIX said the validity of the abstimeout parameter need * not be checked if the lock can be immediately acquired. */ - - /* try to lock it in userland */ - ret = rwlock_trywrlock(prwlock); + ret = _thr_rwlock_trywrlock(&prwlock->lock); if (ret == 0) { prwlock->owner = curthread; return (ret); } if (__predict_false(abstime && (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0))) return (EINVAL); - /* reset to zero */ - ret = 0; - for (;;) { - _pthread_mutex_lock(&prwlock->lock); - state = prwlock->state; - while (((state & RWLOCK_WRITE_OWNER) || RWLOCK_READER_COUNT(state) != 0) && - (state & RWLOCK_WRITE_WAITERS) == 0) { - if (atomic_cmpset_acq_32(&prwlock->state, state, state | RWLOCK_WRITE_WAITERS)) - break; - CPU_SPINWAIT; - state = prwlock->state; - } - - prwlock->blocked_writers++; - - while ((state & RWLOCK_WRITE_OWNER) || RWLOCK_READER_COUNT(state) != 0) { - if (abstime == NULL) - ret = _pthread_cond_wait(&prwlock->write_signal, &prwlock->lock); - else - ret = _pthread_cond_timedwait(&prwlock->write_signal, &prwlock->lock, abstime); - - if (ret) - break; - state = prwlock->state; + if (abstime != NULL) { + clock_gettime(CLOCK_REALTIME, &ts); + TIMESPEC_SUB(&ts2, abstime, &ts); + if (ts2.tv_sec < 0 || + (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) + return (ETIMEDOUT); + tsp = &ts2; + } else + tsp = NULL; + + /* goto kernel and lock it */ + ret = __thr_rwlock_wrlock(&prwlock->lock, tsp); + if (ret == 0) { + prwlock->owner = curthread; + break; } - prwlock->blocked_writers--; - if (prwlock->blocked_writers == 0) - atomic_clear_32(&prwlock->state, RWLOCK_WRITE_WAITERS); - _pthread_mutex_unlock(&prwlock->lock); + if (ret != EINTR) + break; - if (rwlock_trywrlock(prwlock) == 0) { - prwlock->owner = curthread; + /* if interrupted, try to lock it in userland again. */ + if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) { ret = 0; + prwlock->owner = curthread; break; } - - if (ret) - break; } return (ret); } int _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock) { return (rwlock_wrlock_common (rwlock, NULL)); } int _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock, const struct timespec *abstime) { return (rwlock_wrlock_common (rwlock, abstime)); } int _pthread_rwlock_unlock (pthread_rwlock_t *rwlock) { struct pthread *curthread = _get_curthread(); pthread_rwlock_t prwlock; + int ret; int32_t state; if (__predict_false(rwlock == NULL)) return (EINVAL); prwlock = *rwlock; if (__predict_false(prwlock == NULL)) return (EINVAL); - state = prwlock->state; - - if (state & RWLOCK_WRITE_OWNER) { + state = prwlock->lock.rw_state; + if (state & URWLOCK_WRITE_OWNER) { if (__predict_false(prwlock->owner != curthread)) return (EPERM); prwlock->owner = NULL; - while (!atomic_cmpset_rel_32(&prwlock->state, state, state & ~RWLOCK_WRITE_OWNER)) { - CPU_SPINWAIT; - state = prwlock->state; - } - } else if (RWLOCK_READER_COUNT(state) != 0) { - while (!atomic_cmpset_rel_32(&prwlock->state, state, state - 1)) { - CPU_SPINWAIT; - state = prwlock->state; - if (RWLOCK_READER_COUNT(state) == 0) - return (EPERM); - } - curthread->rdlock_count--; - } else { - return (EPERM); } -#if 1 - if (state & RWLOCK_WRITE_WAITERS) { - _pthread_mutex_lock(&prwlock->lock); - _pthread_cond_signal(&prwlock->write_signal); - _pthread_mutex_unlock(&prwlock->lock); - } else if (state & RWLOCK_READ_WAITERS) { - _pthread_mutex_lock(&prwlock->lock); - _pthread_cond_broadcast(&prwlock->read_signal); - _pthread_mutex_unlock(&prwlock->lock); - } -#else - - if (state & RWLOCK_WRITE_WAITERS) { - _pthread_mutex_lock(&prwlock->lock); - _pthread_cond_broadcast_unlock(&prwlock->write_signal, &prwlock->lock, 0); - } else if (state & RWLOCK_READ_WAITERS) { - _pthread_mutex_lock(&prwlock->lock); - _pthread_cond_broadcast_unlock(&prwlock->write_signal, &prwlock->lock, 1); - } -#endif - return (0); + ret = _thr_rwlock_unlock(&prwlock->lock); + if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0) + curthread->rdlock_count--; + + return (ret); } diff --git a/lib/libthr/thread/thr_umtx.c b/lib/libthr/thread/thr_umtx.c index 058205b52731..d0a225ddbb3c 100644 --- a/lib/libthr/thread/thr_umtx.c +++ b/lib/libthr/thread/thr_umtx.c @@ -1,161 +1,185 @@ /* * Copyright (c) 2005 David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #include "thr_private.h" #include "thr_umtx.h" void _thr_umutex_init(struct umutex *mtx) { static struct umutex default_mtx = DEFAULT_UMUTEX; *mtx = default_mtx; } int __thr_umutex_lock(struct umutex *mtx) { if (_umtx_op(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0) != -1) return 0; return (errno); } int __thr_umutex_timedlock(struct umutex *mtx, const struct timespec *timeout) { if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))) return (ETIMEDOUT); if (_umtx_op(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, __DECONST(void *, timeout)) != -1) return (0); return (errno); } int __thr_umutex_unlock(struct umutex *mtx) { if (_umtx_op(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0) != -1) return (0); return (errno); } int __thr_umutex_trylock(struct umutex *mtx) { if (_umtx_op(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0) != -1) return (0); return (errno); } int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, uint32_t *oldceiling) { if (_umtx_op(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0) != -1) return (0); return (errno); } int _thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout) { if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))) return (ETIMEDOUT); if (_umtx_op(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0, __DECONST(void*, timeout)) != -1) return (0); return (errno); } int _thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout) { if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))) return (ETIMEDOUT); if (_umtx_op(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT_UINT, id, 0, __DECONST(void*, timeout)) != -1) return (0); return (errno); } int _thr_umtx_wake(volatile void *mtx, int nr_wakeup) { if (_umtx_op(__DEVOLATILE(void *, mtx), UMTX_OP_WAKE, nr_wakeup, 0, 0) != -1) return (0); return (errno); } void _thr_ucond_init(struct ucond *cv) { bzero(cv, sizeof(struct ucond)); } int _thr_ucond_wait(struct ucond *cv, struct umutex *m, const struct timespec *timeout, int check_unparking) { if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))) { __thr_umutex_unlock(m); return (ETIMEDOUT); } if (_umtx_op(cv, UMTX_OP_CV_WAIT, check_unparking ? UMTX_CHECK_UNPARKING : 0, m, __DECONST(void*, timeout)) != -1) { return (0); } return (errno); } int _thr_ucond_signal(struct ucond *cv) { if (!cv->c_has_waiters) return (0); if (_umtx_op(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL) != -1) return (0); return (errno); } int _thr_ucond_broadcast(struct ucond *cv) { if (!cv->c_has_waiters) return (0); if (_umtx_op(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL) != -1) return (0); return (errno); } + +int +__thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) +{ + if (_umtx_op(rwlock, UMTX_OP_RW_RDLOCK, flags, NULL, tsp) != -1) + return (0); + return (errno); +} + +int +__thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) +{ + if (_umtx_op(rwlock, UMTX_OP_RW_WRLOCK, 0, NULL, tsp) != -1) + return (0); + return (errno); +} + +int +__thr_rwlock_unlock(struct urwlock *rwlock) +{ + if (_umtx_op(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL) != -1) + return (0); + return (errno); +} diff --git a/lib/libthr/thread/thr_umtx.h b/lib/libthr/thread/thr_umtx.h index 65d00f281cba..752e7b2fe4c9 100644 --- a/lib/libthr/thread/thr_umtx.h +++ b/lib/libthr/thread/thr_umtx.h @@ -1,100 +1,181 @@ /*- * Copyright (c) 2005 David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _THR_FBSD_UMTX_H_ #define _THR_FBSD_UMTX_H_ #include #include #define DEFAULT_UMUTEX {0,0, {0,0},{0,0,0,0}} int __thr_umutex_lock(struct umutex *mtx) __hidden; int __thr_umutex_timedlock(struct umutex *mtx, const struct timespec *timeout) __hidden; int __thr_umutex_unlock(struct umutex *mtx) __hidden; int __thr_umutex_trylock(struct umutex *mtx) __hidden; int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, uint32_t *oldceiling) __hidden; void _thr_umutex_init(struct umutex *mtx) __hidden; int _thr_umtx_wait(volatile long *mtx, long exp, const struct timespec *timeout) __hidden; int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp, const struct timespec *timeout) __hidden; int _thr_umtx_wake(volatile void *mtx, int count) __hidden; int _thr_ucond_wait(struct ucond *cv, struct umutex *m, const struct timespec *timeout, int check_unpaking) __hidden; void _thr_ucond_init(struct ucond *cv) __hidden; int _thr_ucond_signal(struct ucond *cv) __hidden; int _thr_ucond_broadcast(struct ucond *cv) __hidden; +int __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) __hidden; +int __thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) __hidden; +int __thr_rwlock_unlock(struct urwlock *rwlock) __hidden; + static inline int _thr_umutex_trylock(struct umutex *mtx, uint32_t id) { if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) return (0); if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0) return (EBUSY); return (__thr_umutex_trylock(mtx)); } static inline int _thr_umutex_trylock2(struct umutex *mtx, uint32_t id) { if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) return (0); return (EBUSY); } static inline int _thr_umutex_lock(struct umutex *mtx, uint32_t id) { if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) return (0); return (__thr_umutex_lock(mtx)); } static inline int _thr_umutex_timedlock(struct umutex *mtx, uint32_t id, const struct timespec *timeout) { if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) return (0); return (__thr_umutex_timedlock(mtx, timeout)); } static inline int _thr_umutex_unlock(struct umutex *mtx, uint32_t id) { if (atomic_cmpset_rel_32(&mtx->m_owner, id, UMUTEX_UNOWNED)) return (0); return (__thr_umutex_unlock(mtx)); } +static inline int +_thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags) +{ + int32_t state; + int32_t wrflags; + + if (flags & URWLOCK_PREFER_READER || rwlock->rw_flags & URWLOCK_PREFER_READER) + wrflags = URWLOCK_WRITE_OWNER; + else + wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS; + state = rwlock->rw_state; + while (!(state & wrflags)) { + if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) + return (EAGAIN); + if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1)) + return (0); + state = rwlock->rw_state; + } + + return (EBUSY); +} + +static inline int +_thr_rwlock_trywrlock(struct urwlock *rwlock) +{ + int32_t state; + + state = rwlock->rw_state; + while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) { + if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER)) + return (0); + state = rwlock->rw_state; + } + + return (EBUSY); +} + +static inline int +_thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) +{ + if (_thr_rwlock_tryrdlock(rwlock, flags) == 0) + return (0); + return (__thr_rwlock_rdlock(rwlock, flags, tsp)); +} + +static inline int +_thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) +{ + if (_thr_rwlock_trywrlock(rwlock) == 0) + return (0); + return (__thr_rwlock_wrlock(rwlock, tsp)); +} + +static inline int +_thr_rwlock_unlock(struct urwlock *rwlock) +{ + int32_t state; + + state = rwlock->rw_state; + if (state & URWLOCK_WRITE_OWNER) { + if (atomic_cmpset_rel_32(&rwlock->rw_state, URWLOCK_WRITE_OWNER, 0)) + return (0); + } else { + for (;;) { + if (__predict_false(URWLOCK_READER_COUNT(state) == 0)) + return (EPERM); + if (!((state & URWLOCK_WRITE_WAITERS) && URWLOCK_READER_COUNT(state) == 1)) { + if (atomic_cmpset_rel_32(&rwlock->rw_state, state, state-1)) + return (0); + state = rwlock->rw_state; + } else { + break; + } + } + } + return (__thr_rwlock_unlock(rwlock)); +} #endif