Index: head/lib/libthr/thread/thr_barrier.c =================================================================== --- head/lib/libthr/thread/thr_barrier.c (revision 297700) +++ head/lib/libthr/thread/thr_barrier.c (revision 297701) @@ -1,168 +1,171 @@ /*- * Copyright (c) 2003 David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "namespace.h" #include #include #include #include "un-namespace.h" #include "thr_private.h" +_Static_assert(sizeof(struct pthread_barrier) <= PAGE_SIZE, + "pthread_barrier is too large for off-page"); + __weak_reference(_pthread_barrier_init, pthread_barrier_init); __weak_reference(_pthread_barrier_wait, pthread_barrier_wait); __weak_reference(_pthread_barrier_destroy, pthread_barrier_destroy); int _pthread_barrier_destroy(pthread_barrier_t *barrier) { pthread_barrier_t bar; struct pthread *curthread; int pshared; if (barrier == NULL || *barrier == NULL) return (EINVAL); if (*barrier == THR_PSHARED_PTR) { bar = __thr_pshared_offpage(barrier, 0); if (bar == NULL) { *barrier = NULL; return (0); } pshared = 1; } else { bar = *barrier; pshared = 0; } curthread = _get_curthread(); THR_UMUTEX_LOCK(curthread, &bar->b_lock); if (bar->b_destroying) { THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); return (EBUSY); } bar->b_destroying = 1; do { if (bar->b_waiters > 0) { bar->b_destroying = 0; THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); return (EBUSY); } if (bar->b_refcount != 0) { _thr_ucond_wait(&bar->b_cv, &bar->b_lock, NULL, 0); THR_UMUTEX_LOCK(curthread, &bar->b_lock); } else break; } while (1); bar->b_destroying = 0; THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); *barrier = NULL; if (pshared) __thr_pshared_destroy(barrier); else free(bar); return (0); } int _pthread_barrier_init(pthread_barrier_t *barrier, const pthread_barrierattr_t *attr, unsigned count) { pthread_barrier_t bar; int pshared; if (barrier == NULL || count <= 0) return (EINVAL); if (attr == NULL || *attr == NULL || (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) { bar = calloc(1, sizeof(struct pthread_barrier)); if (bar == NULL) return (ENOMEM); *barrier = bar; pshared = 0; } else { bar = __thr_pshared_offpage(barrier, 1); if (bar == NULL) return (EFAULT); *barrier = THR_PSHARED_PTR; pshared = 1; } _thr_umutex_init(&bar->b_lock); _thr_ucond_init(&bar->b_cv); if (pshared) { bar->b_lock.m_flags |= USYNC_PROCESS_SHARED; bar->b_cv.c_flags |= USYNC_PROCESS_SHARED; } bar->b_count = count; return (0); } int _pthread_barrier_wait(pthread_barrier_t *barrier) { struct pthread *curthread; pthread_barrier_t bar; int64_t cycle; int ret; if (barrier == NULL || *barrier == NULL) return (EINVAL); if (*barrier == THR_PSHARED_PTR) { bar = __thr_pshared_offpage(barrier, 0); if (bar == NULL) return (EINVAL); } else { bar = *barrier; } curthread = _get_curthread(); THR_UMUTEX_LOCK(curthread, &bar->b_lock); if (++bar->b_waiters == bar->b_count) { /* Current thread is lastest thread */ bar->b_waiters = 0; bar->b_cycle++; _thr_ucond_broadcast(&bar->b_cv); THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); ret = PTHREAD_BARRIER_SERIAL_THREAD; } else { cycle = bar->b_cycle; bar->b_refcount++; do { _thr_ucond_wait(&bar->b_cv, &bar->b_lock, NULL, 0); THR_UMUTEX_LOCK(curthread, &bar->b_lock); /* test cycle to avoid bogus wakeup */ } while (cycle == bar->b_cycle); if (--bar->b_refcount == 0 && bar->b_destroying) _thr_ucond_broadcast(&bar->b_cv); THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); ret = 0; } return (ret); } Index: head/lib/libthr/thread/thr_cond.c =================================================================== --- head/lib/libthr/thread/thr_cond.c (revision 297700) +++ head/lib/libthr/thread/thr_cond.c (revision 297701) @@ -1,519 +1,522 @@ /* * Copyright (c) 2005 David Xu * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include "namespace.h" #include #include #include #include #include #include "un-namespace.h" #include "thr_private.h" +_Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE, + "pthread_cond too large"); + /* * Prototypes */ int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec * abstime); static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int cancel); static int cond_signal_common(pthread_cond_t *cond); static int cond_broadcast_common(pthread_cond_t *cond); /* * Double underscore versions are cancellation points. Single underscore * versions are not and are provided for libc internal usage (which * shouldn't introduce cancellation points). */ __weak_reference(__pthread_cond_wait, pthread_cond_wait); __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait); __weak_reference(_pthread_cond_init, pthread_cond_init); __weak_reference(_pthread_cond_destroy, pthread_cond_destroy); __weak_reference(_pthread_cond_signal, pthread_cond_signal); __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); #define CV_PSHARED(cvp) (((cvp)->__flags & USYNC_PROCESS_SHARED) != 0) static void cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr) { if (cattr == NULL) { cvp->__clock_id = CLOCK_REALTIME; } else { if (cattr->c_pshared) cvp->__flags |= USYNC_PROCESS_SHARED; cvp->__clock_id = cattr->c_clockid; } } static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { struct pthread_cond *cvp; const struct pthread_cond_attr *cattr; int pshared; cattr = cond_attr != NULL ? *cond_attr : NULL; if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) { pshared = 0; cvp = calloc(1, sizeof(struct pthread_cond)); if (cvp == NULL) return (ENOMEM); } else { pshared = 1; cvp = __thr_pshared_offpage(cond, 1); if (cvp == NULL) return (EFAULT); } /* * Initialise the condition variable structure: */ cond_init_body(cvp, cattr); *cond = pshared ? THR_PSHARED_PTR : cvp; return (0); } static int init_static(struct pthread *thread, pthread_cond_t *cond) { int ret; THR_LOCK_ACQUIRE(thread, &_cond_static_lock); if (*cond == NULL) ret = cond_init(cond, NULL); else ret = 0; THR_LOCK_RELEASE(thread, &_cond_static_lock); return (ret); } #define CHECK_AND_INIT_COND \ if (*cond == THR_PSHARED_PTR) { \ cvp = __thr_pshared_offpage(cond, 0); \ if (cvp == NULL) \ return (EINVAL); \ } else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \ if (cvp == THR_COND_INITIALIZER) { \ int ret; \ ret = init_static(_get_curthread(), cond); \ if (ret) \ return (ret); \ } else if (cvp == THR_COND_DESTROYED) { \ return (EINVAL); \ } \ cvp = *cond; \ } int _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { *cond = NULL; return (cond_init(cond, cond_attr)); } int _pthread_cond_destroy(pthread_cond_t *cond) { struct pthread_cond *cvp; int error; error = 0; if (*cond == THR_PSHARED_PTR) { cvp = __thr_pshared_offpage(cond, 0); if (cvp != NULL) __thr_pshared_destroy(cond); *cond = THR_COND_DESTROYED; } else if ((cvp = *cond) == THR_COND_INITIALIZER) { /* nothing */ } else if (cvp == THR_COND_DESTROYED) { error = EINVAL; } else { cvp = *cond; *cond = THR_COND_DESTROYED; free(cvp); } return (error); } /* * Cancellation behavior: * Thread may be canceled at start, if thread is canceled, it means it * did not get a wakeup from pthread_cond_signal(), otherwise, it is * not canceled. * Thread cancellation never cause wakeup from pthread_cond_signal() * to be lost. */ static int cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, const struct timespec *abstime, int cancel) { struct pthread *curthread = _get_curthread(); int recurse; int error, error2 = 0; error = _mutex_cv_detach(mp, &recurse); if (error != 0) return (error); if (cancel) { _thr_cancel_enter2(curthread, 0); error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, (struct umutex *)&mp->m_lock, abstime, CVWAIT_ABSTIME|CVWAIT_CLOCKID); _thr_cancel_leave(curthread, 0); } else { error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, (struct umutex *)&mp->m_lock, abstime, CVWAIT_ABSTIME|CVWAIT_CLOCKID); } /* * Note that PP mutex and ROBUST mutex may return * interesting error codes. */ if (error == 0) { error2 = _mutex_cv_lock(mp, recurse); } else if (error == EINTR || error == ETIMEDOUT) { error2 = _mutex_cv_lock(mp, recurse); if (error2 == 0 && cancel) _thr_testcancel(curthread); if (error == EINTR) error = 0; } else { /* We know that it didn't unlock the mutex. */ error2 = _mutex_cv_attach(mp, recurse); if (error2 == 0 && cancel) _thr_testcancel(curthread); } return (error2 != 0 ? error2 : error); } /* * Thread waits in userland queue whenever possible, when thread * is signaled or broadcasted, it is removed from the queue, and * is saved in curthread's defer_waiters[] buffer, but won't be * woken up until mutex is unlocked. */ static int cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, const struct timespec *abstime, int cancel) { struct pthread *curthread = _get_curthread(); struct sleepqueue *sq; int recurse; int error; int defered; if (curthread->wchan != NULL) PANIC("thread was already on queue."); if (cancel) _thr_testcancel(curthread); _sleepq_lock(cvp); /* * set __has_user_waiters before unlocking mutex, this allows * us to check it without locking in pthread_cond_signal(). */ cvp->__has_user_waiters = 1; defered = 0; (void)_mutex_cv_unlock(mp, &recurse, &defered); curthread->mutex_obj = mp; _sleepq_add(cvp, curthread); for(;;) { _thr_clear_wake(curthread); _sleepq_unlock(cvp); if (defered) { defered = 0; if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0) (void)_umtx_op_err(&mp->m_lock, UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags, 0, 0); } if (curthread->nwaiter_defer > 0) { _thr_wake_all(curthread->defer_waiters, curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } if (cancel) { _thr_cancel_enter2(curthread, 0); error = _thr_sleep(curthread, cvp->__clock_id, abstime); _thr_cancel_leave(curthread, 0); } else { error = _thr_sleep(curthread, cvp->__clock_id, abstime); } _sleepq_lock(cvp); if (curthread->wchan == NULL) { error = 0; break; } else if (cancel && SHOULD_CANCEL(curthread)) { sq = _sleepq_lookup(cvp); cvp->__has_user_waiters = _sleepq_remove(sq, curthread); _sleepq_unlock(cvp); curthread->mutex_obj = NULL; _mutex_cv_lock(mp, recurse); if (!THR_IN_CRITICAL(curthread)) _pthread_exit(PTHREAD_CANCELED); else /* this should not happen */ return (0); } else if (error == ETIMEDOUT) { sq = _sleepq_lookup(cvp); cvp->__has_user_waiters = _sleepq_remove(sq, curthread); break; } } _sleepq_unlock(cvp); curthread->mutex_obj = NULL; _mutex_cv_lock(mp, recurse); return (error); } static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int cancel) { struct pthread *curthread = _get_curthread(); struct pthread_cond *cvp; struct pthread_mutex *mp; int error; CHECK_AND_INIT_COND if (*mutex == THR_PSHARED_PTR) { mp = __thr_pshared_offpage(mutex, 0); if (mp == NULL) return (EINVAL); } else { mp = *mutex; } if ((error = _mutex_owned(curthread, mp)) != 0) return (error); if (curthread->attr.sched_policy != SCHED_OTHER || (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT| USYNC_PROCESS_SHARED)) != 0 || (cvp->__flags & USYNC_PROCESS_SHARED) != 0) return cond_wait_kernel(cvp, mp, abstime, cancel); else return cond_wait_user(cvp, mp, abstime, cancel); } int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { return (cond_wait_common(cond, mutex, NULL, 0)); } int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { return (cond_wait_common(cond, mutex, NULL, 1)); } int _pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec * abstime) { if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return (EINVAL); return (cond_wait_common(cond, mutex, abstime, 0)); } int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime) { if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return (EINVAL); return (cond_wait_common(cond, mutex, abstime, 1)); } static int cond_signal_common(pthread_cond_t *cond) { struct pthread *curthread = _get_curthread(); struct pthread *td; struct pthread_cond *cvp; struct pthread_mutex *mp; struct sleepqueue *sq; int *waddr; int pshared; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ CHECK_AND_INIT_COND pshared = CV_PSHARED(cvp); _thr_ucond_signal((struct ucond *)&cvp->__has_kern_waiters); if (pshared || cvp->__has_user_waiters == 0) return (0); curthread = _get_curthread(); waddr = NULL; _sleepq_lock(cvp); sq = _sleepq_lookup(cvp); if (sq == NULL) { _sleepq_unlock(cvp); return (0); } td = _sleepq_first(sq); mp = td->mutex_obj; cvp->__has_user_waiters = _sleepq_remove(sq, td); if (mp->m_owner == TID(curthread)) { if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { _thr_wake_all(curthread->defer_waiters, curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } curthread->defer_waiters[curthread->nwaiter_defer++] = &td->wake_addr->value; mp->m_flags |= PMUTEX_FLAG_DEFERED; } else { waddr = &td->wake_addr->value; } _sleepq_unlock(cvp); if (waddr != NULL) _thr_set_wake(waddr); return (0); } struct broadcast_arg { struct pthread *curthread; unsigned int *waddrs[MAX_DEFER_WAITERS]; int count; }; static void drop_cb(struct pthread *td, void *arg) { struct broadcast_arg *ba = arg; struct pthread_mutex *mp; struct pthread *curthread = ba->curthread; mp = td->mutex_obj; if (mp->m_owner == TID(curthread)) { if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { _thr_wake_all(curthread->defer_waiters, curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } curthread->defer_waiters[curthread->nwaiter_defer++] = &td->wake_addr->value; mp->m_flags |= PMUTEX_FLAG_DEFERED; } else { if (ba->count >= MAX_DEFER_WAITERS) { _thr_wake_all(ba->waddrs, ba->count); ba->count = 0; } ba->waddrs[ba->count++] = &td->wake_addr->value; } } static int cond_broadcast_common(pthread_cond_t *cond) { int pshared; struct pthread_cond *cvp; struct sleepqueue *sq; struct broadcast_arg ba; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ CHECK_AND_INIT_COND pshared = CV_PSHARED(cvp); _thr_ucond_broadcast((struct ucond *)&cvp->__has_kern_waiters); if (pshared || cvp->__has_user_waiters == 0) return (0); ba.curthread = _get_curthread(); ba.count = 0; _sleepq_lock(cvp); sq = _sleepq_lookup(cvp); if (sq == NULL) { _sleepq_unlock(cvp); return (0); } _sleepq_drop(sq, drop_cb, &ba); cvp->__has_user_waiters = 0; _sleepq_unlock(cvp); if (ba.count > 0) _thr_wake_all(ba.waddrs, ba.count); return (0); } int _pthread_cond_signal(pthread_cond_t * cond) { return (cond_signal_common(cond)); } int _pthread_cond_broadcast(pthread_cond_t * cond) { return (cond_broadcast_common(cond)); } Index: head/lib/libthr/thread/thr_mutex.c =================================================================== --- head/lib/libthr/thread/thr_mutex.c (revision 297700) +++ head/lib/libthr/thread/thr_mutex.c (revision 297701) @@ -1,1021 +1,1024 @@ /* * Copyright (c) 1995 John Birrell . * Copyright (c) 2006 David Xu . * Copyright (c) 2015 The FreeBSD Foundation * * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include "namespace.h" #include #include #include #include #include #include #include #include "un-namespace.h" #include "thr_private.h" +_Static_assert(sizeof(struct pthread_mutex) <= PAGE_SIZE, + "pthread_mutex is too large for off-page"); + /* * For adaptive mutexes, how many times to spin doing trylock2 * before entering the kernel to block */ #define MUTEX_ADAPTIVE_SPINS 2000 /* * Prototypes */ int __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr); int __pthread_mutex_trylock(pthread_mutex_t *mutex); int __pthread_mutex_lock(pthread_mutex_t *mutex); int __pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime); int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count); int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); static int mutex_self_trylock(pthread_mutex_t); static int mutex_self_lock(pthread_mutex_t, const struct timespec *abstime); static int mutex_unlock_common(struct pthread_mutex *, int, int *); static int mutex_lock_sleep(struct pthread *, pthread_mutex_t, const struct timespec *); __weak_reference(__pthread_mutex_init, pthread_mutex_init); __strong_reference(__pthread_mutex_init, _pthread_mutex_init); __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock); __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock); __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock); /* Single underscore versions provided for libc internal usage: */ /* No difference between libc and application usage of these: */ __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np); __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np); __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np); __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np); static void mutex_init_link(struct pthread_mutex *m) { #if defined(_PTHREADS_INVARIANTS) m->m_qe.tqe_prev = NULL; m->m_qe.tqe_next = NULL; m->m_pqe.tqe_prev = NULL; m->m_pqe.tqe_next = NULL; #endif } static void mutex_assert_is_owned(struct pthread_mutex *m) { #if defined(_PTHREADS_INVARIANTS) if (__predict_false(m->m_qe.tqe_prev == NULL)) { char msg[128]; snprintf(msg, sizeof(msg), "mutex %p own %#x %#x is not on list %p %p", m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next); PANIC(msg); } #endif } static void mutex_assert_not_owned(struct pthread_mutex *m) { #if defined(_PTHREADS_INVARIANTS) if (__predict_false(m->m_qe.tqe_prev != NULL || m->m_qe.tqe_next != NULL)) { char msg[128]; snprintf(msg, sizeof(msg), "mutex %p own %#x %#x is on list %p %p", m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next); PANIC(msg); } #endif } static int is_pshared_mutex(struct pthread_mutex *m) { return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0); } static int mutex_check_attr(const struct pthread_mutex_attr *attr) { if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) return (EINVAL); if (attr->m_protocol < PTHREAD_PRIO_NONE || attr->m_protocol > PTHREAD_PRIO_PROTECT) return (EINVAL); return (0); } static void mutex_init_body(struct pthread_mutex *pmutex, const struct pthread_mutex_attr *attr) { pmutex->m_flags = attr->m_type; pmutex->m_owner = 0; pmutex->m_count = 0; pmutex->m_spinloops = 0; pmutex->m_yieldloops = 0; mutex_init_link(pmutex); switch (attr->m_protocol) { case PTHREAD_PRIO_NONE: pmutex->m_lock.m_owner = UMUTEX_UNOWNED; pmutex->m_lock.m_flags = 0; break; case PTHREAD_PRIO_INHERIT: pmutex->m_lock.m_owner = UMUTEX_UNOWNED; pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; break; case PTHREAD_PRIO_PROTECT: pmutex->m_lock.m_owner = UMUTEX_CONTESTED; pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; break; } if (attr->m_pshared == PTHREAD_PROCESS_SHARED) pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED; if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) { pmutex->m_spinloops = _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; pmutex->m_yieldloops = _thr_yieldloops; } } static int mutex_init(pthread_mutex_t *mutex, const struct pthread_mutex_attr *mutex_attr, void *(calloc_cb)(size_t, size_t)) { const struct pthread_mutex_attr *attr; struct pthread_mutex *pmutex; int error; if (mutex_attr == NULL) { attr = &_pthread_mutexattr_default; } else { attr = mutex_attr; error = mutex_check_attr(attr); if (error != 0) return (error); } if ((pmutex = (pthread_mutex_t) calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) return (ENOMEM); mutex_init_body(pmutex, attr); *mutex = pmutex; return (0); } static int init_static(struct pthread *thread, pthread_mutex_t *mutex) { int ret; THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); if (*mutex == THR_MUTEX_INITIALIZER) ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc); else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER) ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc); else ret = 0; THR_LOCK_RELEASE(thread, &_mutex_static_lock); return (ret); } static void set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) { struct pthread_mutex *m2; m2 = TAILQ_LAST(&curthread->mq[TMQ_NORM_PP], mutex_queue); if (m2 != NULL) m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; else m->m_lock.m_ceilings[1] = -1; } static void shared_mutex_init(struct pthread_mutex *pmtx, const struct pthread_mutex_attr *mutex_attr) { static const struct pthread_mutex_attr foobar_mutex_attr = { .m_type = PTHREAD_MUTEX_DEFAULT, .m_protocol = PTHREAD_PRIO_NONE, .m_ceiling = 0, .m_pshared = PTHREAD_PROCESS_SHARED }; bool done; /* * Hack to allow multiple pthread_mutex_init() calls on the * same process-shared mutex. We rely on kernel allocating * zeroed offpage for the mutex, i.e. the * PMUTEX_INITSTAGE_ALLOC value must be zero. */ for (done = false; !done;) { switch (pmtx->m_ps) { case PMUTEX_INITSTAGE_DONE: atomic_thread_fence_acq(); done = true; break; case PMUTEX_INITSTAGE_ALLOC: if (atomic_cmpset_int(&pmtx->m_ps, PMUTEX_INITSTAGE_ALLOC, PMUTEX_INITSTAGE_BUSY)) { if (mutex_attr == NULL) mutex_attr = &foobar_mutex_attr; mutex_init_body(pmtx, mutex_attr); atomic_store_rel_int(&pmtx->m_ps, PMUTEX_INITSTAGE_DONE); done = true; } break; case PMUTEX_INITSTAGE_BUSY: _pthread_yield(); break; default: PANIC("corrupted offpage"); break; } } } int __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr) { struct pthread_mutex *pmtx; int ret; if (mutex_attr != NULL) { ret = mutex_check_attr(*mutex_attr); if (ret != 0) return (ret); } if (mutex_attr == NULL || (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) { return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc)); } pmtx = __thr_pshared_offpage(mutex, 1); if (pmtx == NULL) return (EFAULT); *mutex = THR_PSHARED_PTR; shared_mutex_init(pmtx, *mutex_attr); return (0); } /* This function is used internally by malloc. */ int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)) { static const struct pthread_mutex_attr attr = { .m_type = PTHREAD_MUTEX_NORMAL, .m_protocol = PTHREAD_PRIO_NONE, .m_ceiling = 0, .m_pshared = PTHREAD_PROCESS_PRIVATE, }; int ret; ret = mutex_init(mutex, &attr, calloc_cb); if (ret == 0) (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE; return (ret); } /* * Fix mutex ownership for child process. * * Process private mutex ownership is transmitted from the forking * thread to the child process. * * Process shared mutex should not be inherited because owner is * forking thread which is in parent process, they are removed from * the owned mutex list. */ static void queue_fork(struct pthread *curthread, struct mutex_queue *q, struct mutex_queue *qp, uint bit) { struct pthread_mutex *m; TAILQ_INIT(q); TAILQ_FOREACH(m, qp, m_pqe) { TAILQ_INSERT_TAIL(q, m, m_qe); m->m_lock.m_owner = TID(curthread) | bit; m->m_owner = TID(curthread); } } void _mutex_fork(struct pthread *curthread) { queue_fork(curthread, &curthread->mq[TMQ_NORM], &curthread->mq[TMQ_NORM_PRIV], 0); queue_fork(curthread, &curthread->mq[TMQ_NORM_PP], &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED); } int _pthread_mutex_destroy(pthread_mutex_t *mutex) { pthread_mutex_t m, m1; int ret; m = *mutex; if (m < THR_MUTEX_DESTROYED) { ret = 0; } else if (m == THR_MUTEX_DESTROYED) { ret = EINVAL; } else { if (m == THR_PSHARED_PTR) { m1 = __thr_pshared_offpage(mutex, 0); if (m1 != NULL) { mutex_assert_not_owned(m1); __thr_pshared_destroy(mutex); } *mutex = THR_MUTEX_DESTROYED; return (0); } if (m->m_owner != 0) { ret = EBUSY; } else { *mutex = THR_MUTEX_DESTROYED; mutex_assert_not_owned(m); free(m); ret = 0; } } return (ret); } static int mutex_qidx(struct pthread_mutex *m) { if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) return (TMQ_NORM); return (TMQ_NORM_PP); } static void enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m) { int qidx; m->m_owner = TID(curthread); /* Add to the list of owned mutexes: */ mutex_assert_not_owned(m); qidx = mutex_qidx(m); TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe); if (!is_pshared_mutex(m)) TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe); } static void dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m) { int qidx; m->m_owner = 0; mutex_assert_is_owned(m); qidx = mutex_qidx(m); TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe); if (!is_pshared_mutex(m)) TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe); if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0) set_inherited_priority(curthread, m); mutex_init_link(m); } static int check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m) { int ret; *m = *mutex; ret = 0; if (*m == THR_PSHARED_PTR) { *m = __thr_pshared_offpage(mutex, 0); if (*m == NULL) ret = EINVAL; shared_mutex_init(*m, NULL); } else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) { if (*m == THR_MUTEX_DESTROYED) { ret = EINVAL; } else { ret = init_static(_get_curthread(), mutex); if (ret == 0) *m = *mutex; } } return (ret); } int __pthread_mutex_trylock(pthread_mutex_t *mutex) { struct pthread *curthread; struct pthread_mutex *m; uint32_t id; int ret; ret = check_and_init_mutex(mutex, &m); if (ret != 0) return (ret); curthread = _get_curthread(); id = TID(curthread); if (m->m_flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_ENTER(curthread); ret = _thr_umutex_trylock(&m->m_lock, id); if (__predict_true(ret == 0)) { enqueue_mutex(curthread, m); } else if (m->m_owner == id) { ret = mutex_self_trylock(m); } /* else {} */ if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE)) THR_CRITICAL_LEAVE(curthread); return (ret); } static int mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, const struct timespec *abstime) { uint32_t id, owner; int count; int ret; id = TID(curthread); if (m->m_owner == id) return (mutex_self_lock(m, abstime)); /* * For adaptive mutexes, spin for a bit in the expectation * that if the application requests this mutex type then * the lock is likely to be released quickly and it is * faster than entering the kernel */ if (__predict_false( (m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)) goto sleep_in_kernel; if (!_thr_is_smp) goto yield_loop; count = m->m_spinloops; while (count--) { owner = m->m_lock.m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0) { if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { ret = 0; goto done; } } CPU_SPINWAIT; } yield_loop: count = m->m_yieldloops; while (count--) { _sched_yield(); owner = m->m_lock.m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0) { if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { ret = 0; goto done; } } } sleep_in_kernel: if (abstime == NULL) { ret = __thr_umutex_lock(&m->m_lock, id); } else if (__predict_false( abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)) { ret = EINVAL; } else { ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); } done: if (ret == 0) enqueue_mutex(curthread, m); return (ret); } static inline int mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime, int cvattach) { struct pthread *curthread = _get_curthread(); int ret; if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_ENTER(curthread); if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) { enqueue_mutex(curthread, m); ret = 0; } else { ret = mutex_lock_sleep(curthread, m, abstime); } if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach) THR_CRITICAL_LEAVE(curthread); return (ret); } int __pthread_mutex_lock(pthread_mutex_t *mutex) { struct pthread_mutex *m; int ret; _thr_check_init(); ret = check_and_init_mutex(mutex, &m); if (ret == 0) ret = mutex_lock_common(m, NULL, 0); return (ret); } int __pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime) { struct pthread_mutex *m; int ret; _thr_check_init(); ret = check_and_init_mutex(mutex, &m); if (ret == 0) ret = mutex_lock_common(m, abstime, 0); return (ret); } int _pthread_mutex_unlock(pthread_mutex_t *mutex) { struct pthread_mutex *mp; if (*mutex == THR_PSHARED_PTR) { mp = __thr_pshared_offpage(mutex, 0); if (mp == NULL) return (EINVAL); shared_mutex_init(mp, NULL); } else { mp = *mutex; } return (mutex_unlock_common(mp, 0, NULL)); } int _mutex_cv_lock(struct pthread_mutex *m, int count) { int error; error = mutex_lock_common(m, NULL, 1); if (error == 0) m->m_count = count; return (error); } int _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer) { /* * Clear the count in case this is a recursive mutex. */ *count = m->m_count; m->m_count = 0; (void)mutex_unlock_common(m, 1, defer); return (0); } int _mutex_cv_attach(struct pthread_mutex *m, int count) { struct pthread *curthread = _get_curthread(); enqueue_mutex(curthread, m); m->m_count = count; return (0); } int _mutex_cv_detach(struct pthread_mutex *mp, int *recurse) { struct pthread *curthread = _get_curthread(); int defered; int error; if ((error = _mutex_owned(curthread, mp)) != 0) return (error); /* * Clear the count in case this is a recursive mutex. */ *recurse = mp->m_count; mp->m_count = 0; dequeue_mutex(curthread, mp); /* Will this happen in real-world ? */ if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) { defered = 1; mp->m_flags &= ~PMUTEX_FLAG_DEFERED; } else defered = 0; if (defered) { _thr_wake_all(curthread->defer_waiters, curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } return (0); } static int mutex_self_trylock(struct pthread_mutex *m) { int ret; switch (PMUTEX_TYPE(m->m_flags)) { case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: case PTHREAD_MUTEX_ADAPTIVE_NP: ret = EBUSY; break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ if (m->m_count + 1 > 0) { m->m_count++; ret = 0; } else ret = EAGAIN; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return (ret); } static int mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime) { struct timespec ts1, ts2; int ret; switch (PMUTEX_TYPE(m->m_flags)) { case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_ADAPTIVE_NP: if (abstime) { if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) { ret = EINVAL; } else { clock_gettime(CLOCK_REALTIME, &ts1); TIMESPEC_SUB(&ts2, abstime, &ts1); __sys_nanosleep(&ts2, NULL); ret = ETIMEDOUT; } } else { /* * POSIX specifies that mutexes should return * EDEADLK if a recursive lock is detected. */ ret = EDEADLK; } break; case PTHREAD_MUTEX_NORMAL: /* * What SS2 define as a 'normal' mutex. Intentionally * deadlock on attempts to get a lock you already own. */ ret = 0; if (abstime) { if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) { ret = EINVAL; } else { clock_gettime(CLOCK_REALTIME, &ts1); TIMESPEC_SUB(&ts2, abstime, &ts1); __sys_nanosleep(&ts2, NULL); ret = ETIMEDOUT; } } else { ts1.tv_sec = 30; ts1.tv_nsec = 0; for (;;) __sys_nanosleep(&ts1, NULL); } break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ if (m->m_count + 1 > 0) { m->m_count++; ret = 0; } else ret = EAGAIN; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return (ret); } static int mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer) { struct pthread *curthread = _get_curthread(); uint32_t id; int defered, error; if (__predict_false(m <= THR_MUTEX_DESTROYED)) { if (m == THR_MUTEX_DESTROYED) return (EINVAL); return (EPERM); } id = TID(curthread); /* * Check if the running thread is not the owner of the mutex. */ if (__predict_false(m->m_owner != id)) return (EPERM); error = 0; if (__predict_false( PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE && m->m_count > 0)) { m->m_count--; } else { if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) { defered = 1; m->m_flags &= ~PMUTEX_FLAG_DEFERED; } else defered = 0; dequeue_mutex(curthread, m); error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer); if (mtx_defer == NULL && defered) { _thr_wake_all(curthread->defer_waiters, curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } } if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_LEAVE(curthread); return (error); } int _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, int *prioceiling) { struct pthread_mutex *m; if (*mutex == THR_PSHARED_PTR) { m = __thr_pshared_offpage(mutex, 0); if (m == NULL) return (EINVAL); shared_mutex_init(m, NULL); } else { m = *mutex; if (m <= THR_MUTEX_DESTROYED) return (EINVAL); } if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) return (EINVAL); *prioceiling = m->m_lock.m_ceilings[0]; return (0); } int _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, int ceiling, int *old_ceiling) { struct pthread *curthread; struct pthread_mutex *m, *m1, *m2; struct mutex_queue *q, *qp; int ret; if (*mutex == THR_PSHARED_PTR) { m = __thr_pshared_offpage(mutex, 0); if (m == NULL) return (EINVAL); shared_mutex_init(m, NULL); } else { m = *mutex; if (m <= THR_MUTEX_DESTROYED) return (EINVAL); } if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) return (EINVAL); ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); if (ret != 0) return (ret); curthread = _get_curthread(); if (m->m_owner == TID(curthread)) { mutex_assert_is_owned(m); m1 = TAILQ_PREV(m, mutex_queue, m_qe); m2 = TAILQ_NEXT(m, m_qe); if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { q = &curthread->mq[TMQ_NORM_PP]; qp = &curthread->mq[TMQ_NORM_PP_PRIV]; TAILQ_REMOVE(q, m, m_qe); if (!is_pshared_mutex(m)) TAILQ_REMOVE(qp, m, m_pqe); TAILQ_FOREACH(m2, q, m_qe) { if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { TAILQ_INSERT_BEFORE(m2, m, m_qe); if (!is_pshared_mutex(m)) { while (m2 != NULL && is_pshared_mutex(m2)) { m2 = TAILQ_PREV(m2, mutex_queue, m_qe); } if (m2 == NULL) { TAILQ_INSERT_HEAD(qp, m, m_pqe); } else { TAILQ_INSERT_BEFORE(m2, m, m_pqe); } } return (0); } } TAILQ_INSERT_TAIL(q, m, m_qe); if (!is_pshared_mutex(m)) TAILQ_INSERT_TAIL(qp, m, m_pqe); } } return (0); } int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) { struct pthread_mutex *m; int ret; ret = check_and_init_mutex(mutex, &m); if (ret == 0) *count = m->m_spinloops; return (ret); } int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) { struct pthread_mutex *m; int ret; ret = check_and_init_mutex(mutex, &m); if (ret == 0) m->m_spinloops = count; return (ret); } int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) { struct pthread_mutex *m; int ret; ret = check_and_init_mutex(mutex, &m); if (ret == 0) *count = m->m_yieldloops; return (ret); } int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) { struct pthread_mutex *m; int ret; ret = check_and_init_mutex(mutex, &m); if (ret == 0) m->m_yieldloops = count; return (0); } int _pthread_mutex_isowned_np(pthread_mutex_t *mutex) { struct pthread_mutex *m; if (*mutex == THR_PSHARED_PTR) { m = __thr_pshared_offpage(mutex, 0); if (m == NULL) return (0); shared_mutex_init(m, NULL); } else { m = *mutex; if (m <= THR_MUTEX_DESTROYED) return (0); } return (m->m_owner == TID(_get_curthread())); } int _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp) { if (__predict_false(mp <= THR_MUTEX_DESTROYED)) { if (mp == THR_MUTEX_DESTROYED) return (EINVAL); return (EPERM); } if (mp->m_owner != TID(curthread)) return (EPERM); return (0); } Index: head/lib/libthr/thread/thr_pspinlock.c =================================================================== --- head/lib/libthr/thread/thr_pspinlock.c (revision 297700) +++ head/lib/libthr/thread/thr_pspinlock.c (revision 297701) @@ -1,149 +1,152 @@ /*- * Copyright (c) 2003 David Xu * Copyright (c) 2016 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "namespace.h" #include #include #include #include "un-namespace.h" #include "thr_private.h" +_Static_assert(sizeof(struct pthread_spinlock) <= PAGE_SIZE, + "pthread_spinlock is too large for off-page"); + #define SPIN_COUNT 100000 __weak_reference(_pthread_spin_init, pthread_spin_init); __weak_reference(_pthread_spin_destroy, pthread_spin_destroy); __weak_reference(_pthread_spin_trylock, pthread_spin_trylock); __weak_reference(_pthread_spin_lock, pthread_spin_lock); __weak_reference(_pthread_spin_unlock, pthread_spin_unlock); int _pthread_spin_init(pthread_spinlock_t *lock, int pshared) { struct pthread_spinlock *lck; if (lock == NULL) return (EINVAL); if (pshared == PTHREAD_PROCESS_PRIVATE) { lck = malloc(sizeof(struct pthread_spinlock)); if (lck == NULL) return (ENOMEM); *lock = lck; } else if (pshared == PTHREAD_PROCESS_SHARED) { lck = __thr_pshared_offpage(lock, 1); if (lck == NULL) return (EFAULT); *lock = THR_PSHARED_PTR; } else { return (EINVAL); } _thr_umutex_init(&lck->s_lock); return (0); } int _pthread_spin_destroy(pthread_spinlock_t *lock) { void *l; int ret; if (lock == NULL || *lock == NULL) { ret = EINVAL; } else if (*lock == THR_PSHARED_PTR) { l = __thr_pshared_offpage(lock, 0); if (l != NULL) __thr_pshared_destroy(l); ret = 0; } else { free(*lock); *lock = NULL; ret = 0; } return (ret); } int _pthread_spin_trylock(pthread_spinlock_t *lock) { struct pthread_spinlock *lck; if (lock == NULL || *lock == NULL) return (EINVAL); lck = *lock == THR_PSHARED_PTR ? __thr_pshared_offpage(lock, 0) : *lock; if (lck == NULL) return (EINVAL); return (THR_UMUTEX_TRYLOCK(_get_curthread(), &lck->s_lock)); } int _pthread_spin_lock(pthread_spinlock_t *lock) { struct pthread *curthread; struct pthread_spinlock *lck; int count; if (lock == NULL) return (EINVAL); lck = *lock == THR_PSHARED_PTR ? __thr_pshared_offpage(lock, 0) : *lock; if (lck == NULL) return (EINVAL); curthread = _get_curthread(); count = SPIN_COUNT; while (THR_UMUTEX_TRYLOCK(curthread, &lck->s_lock) != 0) { while (lck->s_lock.m_owner) { if (!_thr_is_smp) { _pthread_yield(); } else { CPU_SPINWAIT; if (--count <= 0) { count = SPIN_COUNT; _pthread_yield(); } } } } return (0); } int _pthread_spin_unlock(pthread_spinlock_t *lock) { struct pthread_spinlock *lck; if (lock == NULL) return (EINVAL); lck = *lock == THR_PSHARED_PTR ? __thr_pshared_offpage(lock, 0) : *lock; if (lck == NULL) return (EINVAL); return (THR_UMUTEX_UNLOCK(_get_curthread(), &lck->s_lock)); } Index: head/lib/libthr/thread/thr_rwlock.c =================================================================== --- head/lib/libthr/thread/thr_rwlock.c (revision 297700) +++ head/lib/libthr/thread/thr_rwlock.c (revision 297701) @@ -1,348 +1,351 @@ /*- * Copyright (c) 1998 Alex Nash * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include "namespace.h" #include #include "un-namespace.h" #include "thr_private.h" +_Static_assert(sizeof(struct pthread_rwlock) <= PAGE_SIZE, + "pthread_rwlock is too large for off-page"); + __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy); __weak_reference(_pthread_rwlock_init, pthread_rwlock_init); __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock); __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock); __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock); __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock); __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock); __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock); __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock); #define CHECK_AND_INIT_RWLOCK \ if (*rwlock == THR_PSHARED_PTR) { \ prwlock = __thr_pshared_offpage(rwlock, 0); \ if (prwlock == NULL) \ return (EINVAL); \ } else if (__predict_false((prwlock = (*rwlock)) <= \ THR_RWLOCK_DESTROYED)) { \ if (prwlock == THR_RWLOCK_INITIALIZER) { \ int ret; \ ret = init_static(_get_curthread(), rwlock); \ if (ret) \ return (ret); \ } else if (prwlock == THR_RWLOCK_DESTROYED) { \ return (EINVAL); \ } \ prwlock = *rwlock; \ } /* * Prototypes */ static int rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) { pthread_rwlock_t prwlock; if (attr == NULL || *attr == NULL || (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) { prwlock = calloc(1, sizeof(struct pthread_rwlock)); if (prwlock == NULL) return (ENOMEM); *rwlock = prwlock; } else { prwlock = __thr_pshared_offpage(rwlock, 1); if (prwlock == NULL) return (EFAULT); prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED; *rwlock = THR_PSHARED_PTR; } return (0); } int _pthread_rwlock_destroy (pthread_rwlock_t *rwlock) { pthread_rwlock_t prwlock; int ret; prwlock = *rwlock; if (prwlock == THR_RWLOCK_INITIALIZER) ret = 0; else if (prwlock == THR_RWLOCK_DESTROYED) ret = EINVAL; else if (prwlock == THR_PSHARED_PTR) { *rwlock = THR_RWLOCK_DESTROYED; __thr_pshared_destroy(rwlock); ret = 0; } else { *rwlock = THR_RWLOCK_DESTROYED; free(prwlock); ret = 0; } return (ret); } static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock) { int ret; THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock); if (*rwlock == THR_RWLOCK_INITIALIZER) ret = rwlock_init(rwlock, NULL); else ret = 0; THR_LOCK_RELEASE(thread, &_rwlock_static_lock); return (ret); } int _pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) { *rwlock = NULL; return (rwlock_init(rwlock, attr)); } static int rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); pthread_rwlock_t prwlock; int flags; int ret; CHECK_AND_INIT_RWLOCK if (curthread->rdlock_count) { /* * To avoid having to track all the rdlocks held by * a thread or all of the threads that hold a rdlock, * we keep a simple count of all the rdlocks held by * a thread. If a thread holds any rdlocks it is * possible that it is attempting to take a recursive * rdlock. If there are blocked writers and precedence * is given to them, then that would result in the thread * deadlocking. So allowing a thread to take the rdlock * when it already has one or more rdlocks avoids the * deadlock. I hope the reader can follow that logic ;-) */ flags = URWLOCK_PREFER_READER; } else { flags = 0; } /* * POSIX said the validity of the abstimeout parameter need * not be checked if the lock can be immediately acquired. */ ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags); if (ret == 0) { curthread->rdlock_count++; return (ret); } if (__predict_false(abstime && (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0))) return (EINVAL); for (;;) { /* goto kernel and lock it */ ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime); if (ret != EINTR) break; /* if interrupted, try to lock it in userland again. */ if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) { ret = 0; break; } } if (ret == 0) curthread->rdlock_count++; return (ret); } int _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) { return (rwlock_rdlock_common(rwlock, NULL)); } int _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock, const struct timespec *abstime) { return (rwlock_rdlock_common(rwlock, abstime)); } int _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) { struct pthread *curthread = _get_curthread(); pthread_rwlock_t prwlock; int flags; int ret; CHECK_AND_INIT_RWLOCK if (curthread->rdlock_count) { /* * To avoid having to track all the rdlocks held by * a thread or all of the threads that hold a rdlock, * we keep a simple count of all the rdlocks held by * a thread. If a thread holds any rdlocks it is * possible that it is attempting to take a recursive * rdlock. If there are blocked writers and precedence * is given to them, then that would result in the thread * deadlocking. So allowing a thread to take the rdlock * when it already has one or more rdlocks avoids the * deadlock. I hope the reader can follow that logic ;-) */ flags = URWLOCK_PREFER_READER; } else { flags = 0; } ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags); if (ret == 0) curthread->rdlock_count++; return (ret); } int _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) { struct pthread *curthread = _get_curthread(); pthread_rwlock_t prwlock; int ret; CHECK_AND_INIT_RWLOCK ret = _thr_rwlock_trywrlock(&prwlock->lock); if (ret == 0) prwlock->owner = TID(curthread); return (ret); } static int rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); pthread_rwlock_t prwlock; int ret; CHECK_AND_INIT_RWLOCK /* * POSIX said the validity of the abstimeout parameter need * not be checked if the lock can be immediately acquired. */ ret = _thr_rwlock_trywrlock(&prwlock->lock); if (ret == 0) { prwlock->owner = TID(curthread); return (ret); } if (__predict_false(abstime && (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0))) return (EINVAL); for (;;) { /* goto kernel and lock it */ ret = __thr_rwlock_wrlock(&prwlock->lock, abstime); if (ret == 0) { prwlock->owner = TID(curthread); break; } if (ret != EINTR) break; /* if interrupted, try to lock it in userland again. */ if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) { ret = 0; prwlock->owner = TID(curthread); break; } } return (ret); } int _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock) { return (rwlock_wrlock_common (rwlock, NULL)); } int _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock, const struct timespec *abstime) { return (rwlock_wrlock_common (rwlock, abstime)); } int _pthread_rwlock_unlock(pthread_rwlock_t *rwlock) { struct pthread *curthread = _get_curthread(); pthread_rwlock_t prwlock; int ret; int32_t state; if (*rwlock == THR_PSHARED_PTR) { prwlock = __thr_pshared_offpage(rwlock, 0); if (prwlock == NULL) return (EINVAL); } else { prwlock = *rwlock; } if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED)) return (EINVAL); state = prwlock->lock.rw_state; if (state & URWLOCK_WRITE_OWNER) { if (__predict_false(prwlock->owner != TID(curthread))) return (EPERM); prwlock->owner = 0; } ret = _thr_rwlock_unlock(&prwlock->lock); if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0) curthread->rdlock_count--; return (ret); }