Index: head/lib/libthr/thread/thr_mutex.c =================================================================== --- head/lib/libthr/thread/thr_mutex.c (revision 297184) +++ head/lib/libthr/thread/thr_mutex.c (revision 297185) @@ -1,970 +1,1021 @@ /* * Copyright (c) 1995 John Birrell . * Copyright (c) 2006 David Xu . * Copyright (c) 2015 The FreeBSD Foundation * * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ +#include #include "namespace.h" #include #include #include #include #include #include #include #include "un-namespace.h" #include "thr_private.h" /* * For adaptive mutexes, how many times to spin doing trylock2 * before entering the kernel to block */ #define MUTEX_ADAPTIVE_SPINS 2000 /* * Prototypes */ int __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr); int __pthread_mutex_trylock(pthread_mutex_t *mutex); int __pthread_mutex_lock(pthread_mutex_t *mutex); int __pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime); int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count); int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); static int mutex_self_trylock(pthread_mutex_t); static int mutex_self_lock(pthread_mutex_t, const struct timespec *abstime); static int mutex_unlock_common(struct pthread_mutex *, int, int *); static int mutex_lock_sleep(struct pthread *, pthread_mutex_t, const struct timespec *); __weak_reference(__pthread_mutex_init, pthread_mutex_init); __strong_reference(__pthread_mutex_init, _pthread_mutex_init); __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock); __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock); __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock); /* Single underscore versions provided for libc internal usage: */ /* No difference between libc and application usage of these: */ __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np); __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np); __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np); __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np); static void mutex_init_link(struct pthread_mutex *m) { #if defined(_PTHREADS_INVARIANTS) m->m_qe.tqe_prev = NULL; m->m_qe.tqe_next = NULL; m->m_pqe.tqe_prev = NULL; m->m_pqe.tqe_next = NULL; #endif } static void mutex_assert_is_owned(struct pthread_mutex *m) { #if defined(_PTHREADS_INVARIANTS) if (__predict_false(m->m_qe.tqe_prev == NULL)) { char msg[128]; snprintf(msg, sizeof(msg), "mutex %p own %#x %#x is not on list %p %p", m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next); PANIC(msg); } #endif } static void mutex_assert_not_owned(struct pthread_mutex *m) { #if defined(_PTHREADS_INVARIANTS) if (__predict_false(m->m_qe.tqe_prev != NULL || m->m_qe.tqe_next != NULL)) { char msg[128]; snprintf(msg, sizeof(msg), "mutex %p own %#x %#x is on list %p %p", m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next); PANIC(msg); } #endif } static int is_pshared_mutex(struct pthread_mutex *m) { return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0); } static int mutex_check_attr(const struct pthread_mutex_attr *attr) { if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) return (EINVAL); if (attr->m_protocol < PTHREAD_PRIO_NONE || attr->m_protocol > PTHREAD_PRIO_PROTECT) return (EINVAL); return (0); } static void mutex_init_body(struct pthread_mutex *pmutex, const struct pthread_mutex_attr *attr) { pmutex->m_flags = attr->m_type; pmutex->m_owner = 0; pmutex->m_count = 0; pmutex->m_spinloops = 0; pmutex->m_yieldloops = 0; mutex_init_link(pmutex); switch (attr->m_protocol) { case PTHREAD_PRIO_NONE: pmutex->m_lock.m_owner = UMUTEX_UNOWNED; pmutex->m_lock.m_flags = 0; break; case PTHREAD_PRIO_INHERIT: pmutex->m_lock.m_owner = UMUTEX_UNOWNED; pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; break; case PTHREAD_PRIO_PROTECT: pmutex->m_lock.m_owner = UMUTEX_CONTESTED; pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; break; } if (attr->m_pshared == PTHREAD_PROCESS_SHARED) pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED; if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) { pmutex->m_spinloops = _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; pmutex->m_yieldloops = _thr_yieldloops; } } static int mutex_init(pthread_mutex_t *mutex, const struct pthread_mutex_attr *mutex_attr, void *(calloc_cb)(size_t, size_t)) { const struct pthread_mutex_attr *attr; struct pthread_mutex *pmutex; int error; if (mutex_attr == NULL) { attr = &_pthread_mutexattr_default; } else { attr = mutex_attr; error = mutex_check_attr(attr); if (error != 0) return (error); } if ((pmutex = (pthread_mutex_t) calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) return (ENOMEM); mutex_init_body(pmutex, attr); *mutex = pmutex; return (0); } static int init_static(struct pthread *thread, pthread_mutex_t *mutex) { int ret; THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); if (*mutex == THR_MUTEX_INITIALIZER) ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc); else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER) ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc); else ret = 0; THR_LOCK_RELEASE(thread, &_mutex_static_lock); return (ret); } static void set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) { struct pthread_mutex *m2; m2 = TAILQ_LAST(&curthread->mq[TMQ_NORM_PP], mutex_queue); if (m2 != NULL) m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; else m->m_lock.m_ceilings[1] = -1; } +static void +shared_mutex_init(struct pthread_mutex *pmtx, const struct + pthread_mutex_attr *mutex_attr) +{ + static const struct pthread_mutex_attr foobar_mutex_attr = { + .m_type = PTHREAD_MUTEX_DEFAULT, + .m_protocol = PTHREAD_PRIO_NONE, + .m_ceiling = 0, + .m_pshared = PTHREAD_PROCESS_SHARED + }; + bool done; + + /* + * Hack to allow multiple pthread_mutex_init() calls on the + * same process-shared mutex. We rely on kernel allocating + * zeroed offpage for the mutex, i.e. the + * PMUTEX_INITSTAGE_ALLOC value must be zero. + */ + for (done = false; !done;) { + switch (pmtx->m_ps) { + case PMUTEX_INITSTAGE_DONE: + atomic_thread_fence_acq(); + done = true; + break; + case PMUTEX_INITSTAGE_ALLOC: + if (atomic_cmpset_int(&pmtx->m_ps, + PMUTEX_INITSTAGE_ALLOC, PMUTEX_INITSTAGE_BUSY)) { + if (mutex_attr == NULL) + mutex_attr = &foobar_mutex_attr; + mutex_init_body(pmtx, mutex_attr); + atomic_store_rel_int(&pmtx->m_ps, + PMUTEX_INITSTAGE_DONE); + done = true; + } + break; + case PMUTEX_INITSTAGE_BUSY: + _pthread_yield(); + break; + default: + PANIC("corrupted offpage"); + break; + } + } +} + int __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr) { struct pthread_mutex *pmtx; int ret; if (mutex_attr != NULL) { ret = mutex_check_attr(*mutex_attr); if (ret != 0) return (ret); } if (mutex_attr == NULL || (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) { return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc)); } pmtx = __thr_pshared_offpage(mutex, 1); if (pmtx == NULL) return (EFAULT); *mutex = THR_PSHARED_PTR; - mutex_init_body(pmtx, *mutex_attr); + shared_mutex_init(pmtx, *mutex_attr); return (0); } /* This function is used internally by malloc. */ int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)) { static const struct pthread_mutex_attr attr = { .m_type = PTHREAD_MUTEX_NORMAL, .m_protocol = PTHREAD_PRIO_NONE, .m_ceiling = 0, .m_pshared = PTHREAD_PROCESS_PRIVATE, }; int ret; ret = mutex_init(mutex, &attr, calloc_cb); if (ret == 0) (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE; return (ret); } /* * Fix mutex ownership for child process. * * Process private mutex ownership is transmitted from the forking * thread to the child process. * * Process shared mutex should not be inherited because owner is * forking thread which is in parent process, they are removed from * the owned mutex list. */ static void queue_fork(struct pthread *curthread, struct mutex_queue *q, struct mutex_queue *qp, uint bit) { struct pthread_mutex *m; TAILQ_INIT(q); TAILQ_FOREACH(m, qp, m_pqe) { TAILQ_INSERT_TAIL(q, m, m_qe); m->m_lock.m_owner = TID(curthread) | bit; m->m_owner = TID(curthread); } } void _mutex_fork(struct pthread *curthread) { queue_fork(curthread, &curthread->mq[TMQ_NORM], &curthread->mq[TMQ_NORM_PRIV], 0); queue_fork(curthread, &curthread->mq[TMQ_NORM_PP], &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED); } int _pthread_mutex_destroy(pthread_mutex_t *mutex) { pthread_mutex_t m, m1; int ret; m = *mutex; if (m < THR_MUTEX_DESTROYED) { ret = 0; } else if (m == THR_MUTEX_DESTROYED) { ret = EINVAL; } else { if (m == THR_PSHARED_PTR) { m1 = __thr_pshared_offpage(mutex, 0); if (m1 != NULL) { mutex_assert_not_owned(m1); __thr_pshared_destroy(mutex); } *mutex = THR_MUTEX_DESTROYED; return (0); } if (m->m_owner != 0) { ret = EBUSY; } else { *mutex = THR_MUTEX_DESTROYED; mutex_assert_not_owned(m); free(m); ret = 0; } } return (ret); } static int mutex_qidx(struct pthread_mutex *m) { if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) return (TMQ_NORM); return (TMQ_NORM_PP); } static void enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m) { int qidx; m->m_owner = TID(curthread); /* Add to the list of owned mutexes: */ mutex_assert_not_owned(m); qidx = mutex_qidx(m); TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe); if (!is_pshared_mutex(m)) TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe); } static void dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m) { int qidx; m->m_owner = 0; mutex_assert_is_owned(m); qidx = mutex_qidx(m); TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe); if (!is_pshared_mutex(m)) TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe); if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0) set_inherited_priority(curthread, m); mutex_init_link(m); } static int check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m) { int ret; *m = *mutex; ret = 0; if (*m == THR_PSHARED_PTR) { *m = __thr_pshared_offpage(mutex, 0); if (*m == NULL) ret = EINVAL; + shared_mutex_init(*m, NULL); } else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) { if (*m == THR_MUTEX_DESTROYED) { ret = EINVAL; } else { ret = init_static(_get_curthread(), mutex); if (ret == 0) *m = *mutex; } } return (ret); } int __pthread_mutex_trylock(pthread_mutex_t *mutex) { struct pthread *curthread; struct pthread_mutex *m; uint32_t id; int ret; ret = check_and_init_mutex(mutex, &m); if (ret != 0) return (ret); curthread = _get_curthread(); id = TID(curthread); if (m->m_flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_ENTER(curthread); ret = _thr_umutex_trylock(&m->m_lock, id); if (__predict_true(ret == 0)) { enqueue_mutex(curthread, m); } else if (m->m_owner == id) { ret = mutex_self_trylock(m); } /* else {} */ if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE)) THR_CRITICAL_LEAVE(curthread); return (ret); } static int mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, const struct timespec *abstime) { uint32_t id, owner; int count; int ret; id = TID(curthread); if (m->m_owner == id) return (mutex_self_lock(m, abstime)); /* * For adaptive mutexes, spin for a bit in the expectation * that if the application requests this mutex type then * the lock is likely to be released quickly and it is * faster than entering the kernel */ if (__predict_false( (m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)) goto sleep_in_kernel; if (!_thr_is_smp) goto yield_loop; count = m->m_spinloops; while (count--) { owner = m->m_lock.m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0) { if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { ret = 0; goto done; } } CPU_SPINWAIT; } yield_loop: count = m->m_yieldloops; while (count--) { _sched_yield(); owner = m->m_lock.m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0) { if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { ret = 0; goto done; } } } sleep_in_kernel: if (abstime == NULL) { ret = __thr_umutex_lock(&m->m_lock, id); } else if (__predict_false( abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)) { ret = EINVAL; } else { ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); } done: if (ret == 0) enqueue_mutex(curthread, m); return (ret); } static inline int mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime, int cvattach) { struct pthread *curthread = _get_curthread(); int ret; if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_ENTER(curthread); if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) { enqueue_mutex(curthread, m); ret = 0; } else { ret = mutex_lock_sleep(curthread, m, abstime); } if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach) THR_CRITICAL_LEAVE(curthread); return (ret); } int __pthread_mutex_lock(pthread_mutex_t *mutex) { struct pthread_mutex *m; int ret; _thr_check_init(); ret = check_and_init_mutex(mutex, &m); if (ret == 0) ret = mutex_lock_common(m, NULL, 0); return (ret); } int __pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime) { struct pthread_mutex *m; int ret; _thr_check_init(); ret = check_and_init_mutex(mutex, &m); if (ret == 0) ret = mutex_lock_common(m, abstime, 0); return (ret); } int _pthread_mutex_unlock(pthread_mutex_t *mutex) { struct pthread_mutex *mp; if (*mutex == THR_PSHARED_PTR) { mp = __thr_pshared_offpage(mutex, 0); if (mp == NULL) return (EINVAL); + shared_mutex_init(mp, NULL); } else { mp = *mutex; } return (mutex_unlock_common(mp, 0, NULL)); } int _mutex_cv_lock(struct pthread_mutex *m, int count) { int error; error = mutex_lock_common(m, NULL, 1); if (error == 0) m->m_count = count; return (error); } int _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer) { /* * Clear the count in case this is a recursive mutex. */ *count = m->m_count; m->m_count = 0; (void)mutex_unlock_common(m, 1, defer); return (0); } int _mutex_cv_attach(struct pthread_mutex *m, int count) { struct pthread *curthread = _get_curthread(); enqueue_mutex(curthread, m); m->m_count = count; return (0); } int _mutex_cv_detach(struct pthread_mutex *mp, int *recurse) { struct pthread *curthread = _get_curthread(); int defered; int error; if ((error = _mutex_owned(curthread, mp)) != 0) return (error); /* * Clear the count in case this is a recursive mutex. */ *recurse = mp->m_count; mp->m_count = 0; dequeue_mutex(curthread, mp); /* Will this happen in real-world ? */ if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) { defered = 1; mp->m_flags &= ~PMUTEX_FLAG_DEFERED; } else defered = 0; if (defered) { _thr_wake_all(curthread->defer_waiters, curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } return (0); } static int mutex_self_trylock(struct pthread_mutex *m) { int ret; switch (PMUTEX_TYPE(m->m_flags)) { case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: case PTHREAD_MUTEX_ADAPTIVE_NP: ret = EBUSY; break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ if (m->m_count + 1 > 0) { m->m_count++; ret = 0; } else ret = EAGAIN; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return (ret); } static int mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime) { struct timespec ts1, ts2; int ret; switch (PMUTEX_TYPE(m->m_flags)) { case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_ADAPTIVE_NP: if (abstime) { if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) { ret = EINVAL; } else { clock_gettime(CLOCK_REALTIME, &ts1); TIMESPEC_SUB(&ts2, abstime, &ts1); __sys_nanosleep(&ts2, NULL); ret = ETIMEDOUT; } } else { /* * POSIX specifies that mutexes should return * EDEADLK if a recursive lock is detected. */ ret = EDEADLK; } break; case PTHREAD_MUTEX_NORMAL: /* * What SS2 define as a 'normal' mutex. Intentionally * deadlock on attempts to get a lock you already own. */ ret = 0; if (abstime) { if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) { ret = EINVAL; } else { clock_gettime(CLOCK_REALTIME, &ts1); TIMESPEC_SUB(&ts2, abstime, &ts1); __sys_nanosleep(&ts2, NULL); ret = ETIMEDOUT; } } else { ts1.tv_sec = 30; ts1.tv_nsec = 0; for (;;) __sys_nanosleep(&ts1, NULL); } break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ if (m->m_count + 1 > 0) { m->m_count++; ret = 0; } else ret = EAGAIN; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return (ret); } static int mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer) { struct pthread *curthread = _get_curthread(); uint32_t id; int defered, error; if (__predict_false(m <= THR_MUTEX_DESTROYED)) { if (m == THR_MUTEX_DESTROYED) return (EINVAL); return (EPERM); } id = TID(curthread); /* * Check if the running thread is not the owner of the mutex. */ if (__predict_false(m->m_owner != id)) return (EPERM); error = 0; if (__predict_false( PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE && m->m_count > 0)) { m->m_count--; } else { if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) { defered = 1; m->m_flags &= ~PMUTEX_FLAG_DEFERED; } else defered = 0; dequeue_mutex(curthread, m); error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer); if (mtx_defer == NULL && defered) { _thr_wake_all(curthread->defer_waiters, curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } } if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_LEAVE(curthread); return (error); } int _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, int *prioceiling) { struct pthread_mutex *m; if (*mutex == THR_PSHARED_PTR) { m = __thr_pshared_offpage(mutex, 0); if (m == NULL) return (EINVAL); + shared_mutex_init(m, NULL); } else { m = *mutex; if (m <= THR_MUTEX_DESTROYED) return (EINVAL); } if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) return (EINVAL); *prioceiling = m->m_lock.m_ceilings[0]; return (0); } int _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, int ceiling, int *old_ceiling) { struct pthread *curthread; struct pthread_mutex *m, *m1, *m2; struct mutex_queue *q, *qp; int ret; if (*mutex == THR_PSHARED_PTR) { m = __thr_pshared_offpage(mutex, 0); if (m == NULL) return (EINVAL); + shared_mutex_init(m, NULL); } else { m = *mutex; if (m <= THR_MUTEX_DESTROYED) return (EINVAL); } if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) return (EINVAL); ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); if (ret != 0) return (ret); curthread = _get_curthread(); if (m->m_owner == TID(curthread)) { mutex_assert_is_owned(m); m1 = TAILQ_PREV(m, mutex_queue, m_qe); m2 = TAILQ_NEXT(m, m_qe); if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { q = &curthread->mq[TMQ_NORM_PP]; qp = &curthread->mq[TMQ_NORM_PP_PRIV]; TAILQ_REMOVE(q, m, m_qe); if (!is_pshared_mutex(m)) TAILQ_REMOVE(qp, m, m_pqe); TAILQ_FOREACH(m2, q, m_qe) { if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { TAILQ_INSERT_BEFORE(m2, m, m_qe); if (!is_pshared_mutex(m)) { while (m2 != NULL && is_pshared_mutex(m2)) { m2 = TAILQ_PREV(m2, mutex_queue, m_qe); } if (m2 == NULL) { TAILQ_INSERT_HEAD(qp, m, m_pqe); } else { TAILQ_INSERT_BEFORE(m2, m, m_pqe); } } return (0); } } TAILQ_INSERT_TAIL(q, m, m_qe); if (!is_pshared_mutex(m)) TAILQ_INSERT_TAIL(qp, m, m_pqe); } } return (0); } int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) { struct pthread_mutex *m; int ret; ret = check_and_init_mutex(mutex, &m); if (ret == 0) *count = m->m_spinloops; return (ret); } int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) { struct pthread_mutex *m; int ret; ret = check_and_init_mutex(mutex, &m); if (ret == 0) m->m_spinloops = count; return (ret); } int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) { struct pthread_mutex *m; int ret; ret = check_and_init_mutex(mutex, &m); if (ret == 0) *count = m->m_yieldloops; return (ret); } int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) { struct pthread_mutex *m; int ret; ret = check_and_init_mutex(mutex, &m); if (ret == 0) m->m_yieldloops = count; return (0); } int _pthread_mutex_isowned_np(pthread_mutex_t *mutex) { - struct pthread_mutex *m; + struct pthread_mutex *m; if (*mutex == THR_PSHARED_PTR) { m = __thr_pshared_offpage(mutex, 0); if (m == NULL) return (0); + shared_mutex_init(m, NULL); } else { m = *mutex; if (m <= THR_MUTEX_DESTROYED) return (0); } return (m->m_owner == TID(_get_curthread())); } int _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp) { if (__predict_false(mp <= THR_MUTEX_DESTROYED)) { if (mp == THR_MUTEX_DESTROYED) return (EINVAL); return (EPERM); } if (mp->m_owner != TID(curthread)) return (EPERM); return (0); } Index: head/lib/libthr/thread/thr_private.h =================================================================== --- head/lib/libthr/thread/thr_private.h (revision 297184) +++ head/lib/libthr/thread/thr_private.h (revision 297185) @@ -1,960 +1,968 @@ /* * Copyright (C) 2005 Daniel M. Eischen * Copyright (c) 2005 David Xu * Copyright (c) 1995-1998 John Birrell . * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _THR_PRIVATE_H #define _THR_PRIVATE_H /* * Include files. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SYM_FB10(sym) __CONCAT(sym, _fb10) #define SYM_FBP10(sym) __CONCAT(sym, _fbp10) #define WEAK_REF(sym, alias) __weak_reference(sym, alias) #define SYM_COMPAT(sym, impl, ver) __sym_compat(sym, impl, ver) #define SYM_DEFAULT(sym, impl, ver) __sym_default(sym, impl, ver) #define FB10_COMPAT(func, sym) \ WEAK_REF(func, SYM_FB10(sym)); \ SYM_COMPAT(sym, SYM_FB10(sym), FBSD_1.0) #define FB10_COMPAT_PRIVATE(func, sym) \ WEAK_REF(func, SYM_FBP10(sym)); \ SYM_DEFAULT(sym, SYM_FBP10(sym), FBSDprivate_1.0) #include "pthread_md.h" #include "thr_umtx.h" #include "thread_db.h" #ifdef _PTHREAD_FORCED_UNWIND #define _BSD_SOURCE #include #endif typedef TAILQ_HEAD(pthreadlist, pthread) pthreadlist; typedef TAILQ_HEAD(atfork_head, pthread_atfork) atfork_head; TAILQ_HEAD(mutex_queue, pthread_mutex); /* Signal to do cancellation */ #define SIGCANCEL SIGTHR /* * Kernel fatal error handler macro. */ #define PANIC(string) _thread_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args) #define stderr_debug(args...) _thread_printf(STDERR_FILENO, ##args) #ifdef _PTHREADS_INVARIANTS #define THR_ASSERT(cond, msg) do { \ if (__predict_false(!(cond))) \ PANIC(msg); \ } while (0) #else #define THR_ASSERT(cond, msg) #endif #ifdef PIC # define STATIC_LIB_REQUIRE(name) #else # define STATIC_LIB_REQUIRE(name) __asm (".globl " #name) #endif #define TIMESPEC_ADD(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \ if ((dst)->tv_nsec >= 1000000000) { \ (dst)->tv_sec++; \ (dst)->tv_nsec -= 1000000000; \ } \ } while (0) #define TIMESPEC_SUB(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ if ((dst)->tv_nsec < 0) { \ (dst)->tv_sec--; \ (dst)->tv_nsec += 1000000000; \ } \ } while (0) /* Magic cookie set for shared pthread locks and cv's pointers */ #define THR_PSHARED_PTR \ ((void *)(uintptr_t)((1ULL << (NBBY * sizeof(long) - 1)) | 1)) /* XXX These values should be same as those defined in pthread.h */ #define THR_MUTEX_INITIALIZER ((struct pthread_mutex *)NULL) #define THR_ADAPTIVE_MUTEX_INITIALIZER ((struct pthread_mutex *)1) #define THR_MUTEX_DESTROYED ((struct pthread_mutex *)2) #define THR_COND_INITIALIZER ((struct pthread_cond *)NULL) #define THR_COND_DESTROYED ((struct pthread_cond *)1) #define THR_RWLOCK_INITIALIZER ((struct pthread_rwlock *)NULL) #define THR_RWLOCK_DESTROYED ((struct pthread_rwlock *)1) #define PMUTEX_FLAG_TYPE_MASK 0x0ff #define PMUTEX_FLAG_PRIVATE 0x100 #define PMUTEX_FLAG_DEFERED 0x200 #define PMUTEX_TYPE(mtxflags) ((mtxflags) & PMUTEX_FLAG_TYPE_MASK) #define MAX_DEFER_WAITERS 50 +/* + * Values for pthread_mutex m_ps indicator. + */ +#define PMUTEX_INITSTAGE_ALLOC 0 +#define PMUTEX_INITSTAGE_BUSY 1 +#define PMUTEX_INITSTAGE_DONE 2 + struct pthread_mutex { /* * Lock for accesses to this structure. */ struct umutex m_lock; int m_flags; uint32_t m_owner; int m_count; int m_spinloops; int m_yieldloops; + int m_ps; /* pshared init stage */ /* * Link for all mutexes a thread currently owns, of the same * prio type. */ TAILQ_ENTRY(pthread_mutex) m_qe; /* Link for all private mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_pqe; }; struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; int m_pshared; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } struct pthread_cond { __uint32_t __has_user_waiters; __uint32_t __has_kern_waiters; __uint32_t __flags; __uint32_t __clock_id; }; struct pthread_cond_attr { int c_pshared; int c_clockid; }; struct pthread_barrier { struct umutex b_lock; struct ucond b_cv; int64_t b_cycle; int b_count; int b_waiters; int b_refcount; int b_destroying; }; struct pthread_barrierattr { int pshared; }; struct pthread_spinlock { struct umutex s_lock; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *prev; void (*routine)(void *); void *routine_arg; int onheap; }; #define THR_CLEANUP_PUSH(td, func, arg) { \ struct pthread_cleanup __cup; \ \ __cup.routine = func; \ __cup.routine_arg = arg; \ __cup.onheap = 0; \ __cup.prev = (td)->cleanup; \ (td)->cleanup = &__cup; #define THR_CLEANUP_POP(td, exec) \ (td)->cleanup = __cup.prev; \ if ((exec) != 0) \ __cup.routine(__cup.routine_arg); \ } struct pthread_atfork { TAILQ_ENTRY(pthread_atfork) qe; void (*prepare)(void); void (*parent)(void); void (*child)(void); }; struct pthread_attr { #define pthread_attr_start_copy sched_policy int sched_policy; int sched_inherit; int prio; int suspend; #define THR_STACK_USER 0x100 /* 0xFF reserved for */ int flags; void *stackaddr_attr; size_t stacksize_attr; size_t guardsize_attr; #define pthread_attr_end_copy cpuset cpuset_t *cpuset; size_t cpusetsize; }; struct wake_addr { struct wake_addr *link; unsigned int value; char pad[12]; }; struct sleepqueue { TAILQ_HEAD(, pthread) sq_blocked; SLIST_HEAD(, sleepqueue) sq_freeq; LIST_ENTRY(sleepqueue) sq_hash; SLIST_ENTRY(sleepqueue) sq_flink; void *sq_wchan; int sq_type; }; /* * Thread creation state attributes. */ #define THR_CREATE_RUNNING 0 #define THR_CREATE_SUSPENDED 1 /* * Miscellaneous definitions. */ #define THR_STACK_DEFAULT (sizeof(void *) / 4 * 1024 * 1024) /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define THR_STACK_INITIAL (THR_STACK_DEFAULT * 2) /* * Define priorities returned by kernel. */ #define THR_MIN_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_min) #define THR_MAX_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_max) #define THR_DEF_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_default) #define THR_MIN_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_min) #define THR_MAX_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_max) #define THR_DEF_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_default) /* XXX The SCHED_FIFO should have same priority range as SCHED_RR */ #define THR_MIN_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO_1].pri_min) #define THR_MAX_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO-1].pri_max) #define THR_DEF_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO-1].pri_default) struct pthread_prio { int pri_min; int pri_max; int pri_default; }; struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { struct urwlock lock; uint32_t owner; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_DEAD }; struct pthread_specific_elem { const void *data; int seqno; }; struct pthread_key { volatile int allocated; int seqno; void (*destructor)(void *); }; /* * lwpid_t is 32bit but kernel thr API exports tid as long type * to preserve the ABI for M:N model in very early date (r131431). */ #define TID(thread) ((uint32_t) ((thread)->tid)) /* * Thread structure. */ struct pthread { #define _pthread_startzero tid /* Kernel thread id. */ long tid; #define TID_TERMINATED 1 /* * Lock for accesses to this thread structure. */ struct umutex lock; /* Internal condition variable cycle number. */ uint32_t cycle; /* How many low level locks the thread held. */ int locklevel; /* * Set to non-zero when this thread has entered a critical * region. We allow for recursive entries into critical regions. */ int critical_count; /* Signal blocked counter. */ int sigblock; /* Queue entry for list of all threads. */ TAILQ_ENTRY(pthread) tle; /* link for all threads in process */ /* Queue entry for GC lists. */ TAILQ_ENTRY(pthread) gcle; /* Hash queue entry. */ LIST_ENTRY(pthread) hle; /* Sleep queue entry */ TAILQ_ENTRY(pthread) wle; /* Threads reference count. */ int refcount; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; struct pthread_attr attr; #define SHOULD_CANCEL(thr) \ ((thr)->cancel_pending && (thr)->cancel_enable && \ (thr)->no_cancel == 0) /* Cancellation is enabled */ int cancel_enable; /* Cancellation request is pending */ int cancel_pending; /* Thread is at cancellation point */ int cancel_point; /* Cancellation is temporarily disabled */ int no_cancel; /* Asynchronouse cancellation is enabled */ int cancel_async; /* Cancellation is in progress */ int cancelling; /* Thread temporary signal mask. */ sigset_t sigmask; /* Thread should unblock SIGCANCEL. */ int unblock_sigcancel; /* In sigsuspend state */ int in_sigsuspend; /* deferred signal info */ siginfo_t deferred_siginfo; /* signal mask to restore. */ sigset_t deferred_sigmask; /* the sigaction should be used for deferred signal. */ struct sigaction deferred_sigact; /* deferred signal delivery is performed, do not reenter. */ int deferred_run; /* Force new thread to exit. */ int force_exit; /* Thread state: */ enum pthread_state state; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* * The joiner is the thread that is joining to this thread. The * join status keeps track of a join operation to another thread. */ struct pthread *joiner; /* Miscellaneous flags; only set with scheduling lock held. */ int flags; #define THR_FLAGS_PRIVATE 0x0001 #define THR_FLAGS_NEED_SUSPEND 0x0002 /* thread should be suspended */ #define THR_FLAGS_SUSPENDED 0x0004 /* thread is suspended */ #define THR_FLAGS_DETACHED 0x0008 /* thread is detached */ /* Thread list flags; only set with thread list lock held. */ int tlflags; #define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */ #define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */ #define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */ /* * Queues of the owned mutexes. Private queue must have index * + 1 of the corresponding full queue. */ #define TMQ_NORM 0 /* NORMAL or PRIO_INHERIT normal */ #define TMQ_NORM_PRIV 1 /* NORMAL or PRIO_INHERIT normal priv */ #define TMQ_NORM_PP 2 /* PRIO_PROTECT normal mutexes */ #define TMQ_NORM_PP_PRIV 3 /* PRIO_PROTECT normal priv */ #define TMQ_NITEMS 4 struct mutex_queue mq[TMQ_NITEMS]; void *ret; struct pthread_specific_elem *specific; int specific_data_count; /* Number rwlocks rdlocks held. */ int rdlock_count; /* * Current locks bitmap for rtld. */ int rtld_bits; /* Thread control block */ struct tcb *tcb; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; #ifdef _PTHREAD_FORCED_UNWIND struct _Unwind_Exception ex; void *unwind_stackend; int unwind_disabled; #endif /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define THR_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; /* Enable event reporting */ int report_events; /* Event mask */ int event_mask; /* Event */ td_event_msg_t event_buf; /* Wait channel */ void *wchan; /* Referenced mutex. */ struct pthread_mutex *mutex_obj; /* Thread will sleep. */ int will_sleep; /* Number of threads deferred. */ int nwaiter_defer; /* Deferred threads from pthread_cond_signal. */ unsigned int *defer_waiters[MAX_DEFER_WAITERS]; #define _pthread_endzero wake_addr struct wake_addr *wake_addr; #define WAKE_ADDR(td) ((td)->wake_addr) /* Sleep queue */ struct sleepqueue *sleepqueue; }; #define THR_SHOULD_GC(thrd) \ ((thrd)->refcount == 0 && (thrd)->state == PS_DEAD && \ ((thrd)->flags & THR_FLAGS_DETACHED) != 0) #define THR_IN_CRITICAL(thrd) \ (((thrd)->locklevel > 0) || \ ((thrd)->critical_count > 0)) #define THR_CRITICAL_ENTER(thrd) \ (thrd)->critical_count++ #define THR_CRITICAL_LEAVE(thrd) \ do { \ (thrd)->critical_count--; \ _thr_ast(thrd); \ } while (0) #define THR_UMUTEX_TRYLOCK(thrd, lck) \ _thr_umutex_trylock((lck), TID(thrd)) #define THR_UMUTEX_LOCK(thrd, lck) \ _thr_umutex_lock((lck), TID(thrd)) #define THR_UMUTEX_TIMEDLOCK(thrd, lck, timo) \ _thr_umutex_timedlock((lck), TID(thrd), (timo)) #define THR_UMUTEX_UNLOCK(thrd, lck) \ _thr_umutex_unlock((lck), TID(thrd)) #define THR_LOCK_ACQUIRE(thrd, lck) \ do { \ (thrd)->locklevel++; \ _thr_umutex_lock(lck, TID(thrd)); \ } while (0) #define THR_LOCK_ACQUIRE_SPIN(thrd, lck) \ do { \ (thrd)->locklevel++; \ _thr_umutex_lock_spin(lck, TID(thrd)); \ } while (0) #ifdef _PTHREADS_INVARIANTS #define THR_ASSERT_LOCKLEVEL(thrd) \ do { \ if (__predict_false((thrd)->locklevel <= 0)) \ _thr_assert_lock_level(); \ } while (0) #else #define THR_ASSERT_LOCKLEVEL(thrd) #endif #define THR_LOCK_RELEASE(thrd, lck) \ do { \ THR_ASSERT_LOCKLEVEL(thrd); \ _thr_umutex_unlock((lck), TID(thrd)); \ (thrd)->locklevel--; \ _thr_ast(thrd); \ } while (0) #define THR_LOCK(curthrd) THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock) #define THR_UNLOCK(curthrd) THR_LOCK_RELEASE(curthrd, &(curthrd)->lock) #define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock) #define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock) #define THREAD_LIST_RDLOCK(curthrd) \ do { \ (curthrd)->locklevel++; \ _thr_rwl_rdlock(&_thr_list_lock); \ } while (0) #define THREAD_LIST_WRLOCK(curthrd) \ do { \ (curthrd)->locklevel++; \ _thr_rwl_wrlock(&_thr_list_lock); \ } while (0) #define THREAD_LIST_UNLOCK(curthrd) \ do { \ _thr_rwl_unlock(&_thr_list_lock); \ (curthrd)->locklevel--; \ _thr_ast(curthrd); \ } while (0) /* * Macros to insert/remove threads to the all thread list and * the gc list. */ #define THR_LIST_ADD(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \ TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \ _thr_hash_add(thrd); \ (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \ } \ } while (0) #define THR_LIST_REMOVE(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \ TAILQ_REMOVE(&_thread_list, thrd, tle); \ _thr_hash_remove(thrd); \ (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \ } \ } while (0) #define THR_GCLIST_ADD(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \ TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\ (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \ _gc_count++; \ } \ } while (0) #define THR_GCLIST_REMOVE(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \ TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \ (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \ _gc_count--; \ } \ } while (0) #define THR_REF_ADD(curthread, pthread) { \ THR_CRITICAL_ENTER(curthread); \ pthread->refcount++; \ } while (0) #define THR_REF_DEL(curthread, pthread) { \ pthread->refcount--; \ THR_CRITICAL_LEAVE(curthread); \ } while (0) #define GC_NEEDED() (_gc_count >= 5) #define SHOULD_REPORT_EVENT(curthr, e) \ (curthr->report_events && \ (((curthr)->event_mask | _thread_event_mask ) & e) != 0) extern int __isthreaded; /* * Global variables for the pthread kernel. */ extern char *_usrstack __hidden; extern struct pthread *_thr_initial __hidden; /* For debugger */ extern int _libthr_debug; extern int _thread_event_mask; extern struct pthread *_thread_last_event; /* List of all threads: */ extern pthreadlist _thread_list; /* List of threads needing GC: */ extern pthreadlist _thread_gc_list __hidden; extern int _thread_active_threads; extern atfork_head _thr_atfork_list __hidden; extern struct urwlock _thr_atfork_lock __hidden; /* Default thread attributes: */ extern struct pthread_attr _pthread_attr_default __hidden; /* Default mutex attributes: */ extern struct pthread_mutex_attr _pthread_mutexattr_default __hidden; extern struct pthread_mutex_attr _pthread_mutexattr_adaptive_default __hidden; /* Default condition variable attributes: */ extern struct pthread_cond_attr _pthread_condattr_default __hidden; extern struct pthread_prio _thr_priorities[] __hidden; extern pid_t _thr_pid __hidden; extern int _thr_is_smp __hidden; extern size_t _thr_guard_default __hidden; extern size_t _thr_stack_default __hidden; extern size_t _thr_stack_initial __hidden; extern int _thr_page_size __hidden; extern int _thr_spinloops __hidden; extern int _thr_yieldloops __hidden; extern int _thr_queuefifo __hidden; /* Garbage thread count. */ extern int _gc_count __hidden; extern struct umutex _mutex_static_lock __hidden; extern struct umutex _cond_static_lock __hidden; extern struct umutex _rwlock_static_lock __hidden; extern struct umutex _keytable_lock __hidden; extern struct urwlock _thr_list_lock __hidden; extern struct umutex _thr_event_lock __hidden; extern struct umutex _suspend_all_lock __hidden; extern int _suspend_all_waiters __hidden; extern int _suspend_all_cycle __hidden; extern struct pthread *_single_thread __hidden; /* * Function prototype definitions. */ __BEGIN_DECLS int _thr_setthreaded(int) __hidden; int _mutex_cv_lock(struct pthread_mutex *, int) __hidden; int _mutex_cv_unlock(struct pthread_mutex *, int *, int *) __hidden; int _mutex_cv_attach(struct pthread_mutex *, int) __hidden; int _mutex_cv_detach(struct pthread_mutex *, int *) __hidden; int _mutex_owned(struct pthread *, const struct pthread_mutex *) __hidden; int _mutex_reinit(pthread_mutex_t *) __hidden; void _mutex_fork(struct pthread *curthread) __hidden; void _libpthread_init(struct pthread *) __hidden; struct pthread *_thr_alloc(struct pthread *) __hidden; void _thread_exit(const char *, int, const char *) __hidden __dead2; int _thr_ref_add(struct pthread *, struct pthread *, int) __hidden; void _thr_ref_delete(struct pthread *, struct pthread *) __hidden; void _thr_ref_delete_unlocked(struct pthread *, struct pthread *) __hidden; int _thr_find_thread(struct pthread *, struct pthread *, int) __hidden; void _thr_rtld_init(void) __hidden; void _thr_rtld_postfork_child(void) __hidden; int _thr_stack_alloc(struct pthread_attr *) __hidden; void _thr_stack_free(struct pthread_attr *) __hidden; void _thr_free(struct pthread *, struct pthread *) __hidden; void _thr_gc(struct pthread *) __hidden; void _thread_cleanupspecific(void) __hidden; void _thread_printf(int, const char *, ...) __hidden; void _thr_spinlock_init(void) __hidden; void _thr_cancel_enter(struct pthread *) __hidden; void _thr_cancel_enter2(struct pthread *, int) __hidden; void _thr_cancel_leave(struct pthread *, int) __hidden; void _thr_testcancel(struct pthread *) __hidden; void _thr_signal_block(struct pthread *) __hidden; void _thr_signal_unblock(struct pthread *) __hidden; void _thr_signal_init(int) __hidden; void _thr_signal_deinit(void) __hidden; int _thr_send_sig(struct pthread *, int sig) __hidden; void _thr_list_init(void) __hidden; void _thr_hash_add(struct pthread *) __hidden; void _thr_hash_remove(struct pthread *) __hidden; struct pthread *_thr_hash_find(struct pthread *) __hidden; void _thr_link(struct pthread *, struct pthread *) __hidden; void _thr_unlink(struct pthread *, struct pthread *) __hidden; void _thr_assert_lock_level(void) __hidden __dead2; void _thr_ast(struct pthread *) __hidden; void _thr_once_init(void) __hidden; void _thr_report_creation(struct pthread *curthread, struct pthread *newthread) __hidden; void _thr_report_death(struct pthread *curthread) __hidden; int _thr_getscheduler(lwpid_t, int *, struct sched_param *) __hidden; int _thr_setscheduler(lwpid_t, int, const struct sched_param *) __hidden; void _thr_signal_prefork(void) __hidden; void _thr_signal_postfork(void) __hidden; void _thr_signal_postfork_child(void) __hidden; void _thr_suspend_all_lock(struct pthread *) __hidden; void _thr_suspend_all_unlock(struct pthread *) __hidden; void _thr_try_gc(struct pthread *, struct pthread *) __hidden; int _rtp_to_schedparam(const struct rtprio *rtp, int *policy, struct sched_param *param) __hidden; int _schedparam_to_rtp(int policy, const struct sched_param *param, struct rtprio *rtp) __hidden; void _thread_bp_create(void); void _thread_bp_death(void); int _sched_yield(void); void _pthread_cleanup_push(void (*)(void *), void *); void _pthread_cleanup_pop(int); void _pthread_exit_mask(void *status, sigset_t *mask) __dead2 __hidden; void _pthread_cancel_enter(int maycancel); void _pthread_cancel_leave(int maycancel); /* #include */ #ifdef _SYS_FCNTL_H_ int __sys_fcntl(int, int, ...); int __sys_openat(int, const char *, int, ...); #endif /* #include */ #ifdef _SIGNAL_H_ int __sys_kill(pid_t, int); int __sys_sigaction(int, const struct sigaction *, struct sigaction *); int __sys_sigpending(sigset_t *); int __sys_sigprocmask(int, const sigset_t *, sigset_t *); int __sys_sigsuspend(const sigset_t *); int __sys_sigreturn(const ucontext_t *); int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); int __sys_sigwait(const sigset_t *, int *); int __sys_sigtimedwait(const sigset_t *, siginfo_t *, const struct timespec *); int __sys_sigwaitinfo(const sigset_t *set, siginfo_t *info); #endif /* #include */ #ifdef _TIME_H_ int __sys_nanosleep(const struct timespec *, struct timespec *); #endif /* #include */ #ifdef _SYS_UCONTEXT_H_ int __sys_setcontext(const ucontext_t *ucp); int __sys_swapcontext(ucontext_t *oucp, const ucontext_t *ucp); #endif /* #include */ #ifdef _UNISTD_H_ int __sys_close(int); int __sys_fork(void); pid_t __sys_getpid(void); ssize_t __sys_read(int, void *, size_t); void __sys_exit(int); #endif static inline int _thr_isthreaded(void) { return (__isthreaded != 0); } static inline int _thr_is_inited(void) { return (_thr_initial != NULL); } static inline void _thr_check_init(void) { if (_thr_initial == NULL) _libpthread_init(NULL); } struct wake_addr *_thr_alloc_wake_addr(void); void _thr_release_wake_addr(struct wake_addr *); int _thr_sleep(struct pthread *, int, const struct timespec *); void _thr_wake_addr_init(void) __hidden; static inline void _thr_clear_wake(struct pthread *td) { td->wake_addr->value = 0; } static inline int _thr_is_woken(struct pthread *td) { return td->wake_addr->value != 0; } static inline void _thr_set_wake(unsigned int *waddr) { *waddr = 1; _thr_umtx_wake(waddr, INT_MAX, 0); } void _thr_wake_all(unsigned int *waddrs[], int) __hidden; static inline struct pthread * _sleepq_first(struct sleepqueue *sq) { return TAILQ_FIRST(&sq->sq_blocked); } void _sleepq_init(void) __hidden; struct sleepqueue *_sleepq_alloc(void) __hidden; void _sleepq_free(struct sleepqueue *) __hidden; void _sleepq_lock(void *) __hidden; void _sleepq_unlock(void *) __hidden; struct sleepqueue *_sleepq_lookup(void *) __hidden; void _sleepq_add(void *, struct pthread *) __hidden; int _sleepq_remove(struct sleepqueue *, struct pthread *) __hidden; void _sleepq_drop(struct sleepqueue *, void (*cb)(struct pthread *, void *arg), void *) __hidden; int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); struct dl_phdr_info; void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info); void _thr_tsd_unload(struct dl_phdr_info *phdr_info) __hidden; void _thr_sigact_unload(struct dl_phdr_info *phdr_info) __hidden; void _thr_stack_fix_protection(struct pthread *thrd); int *__error_threaded(void) __hidden; void __thr_interpose_libc(void) __hidden; pid_t __thr_fork(void); int __thr_setcontext(const ucontext_t *ucp); int __thr_sigaction(int sig, const struct sigaction *act, struct sigaction *oact) __hidden; int __thr_sigprocmask(int how, const sigset_t *set, sigset_t *oset); int __thr_sigsuspend(const sigset_t * set); int __thr_sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec * timeout); int __thr_sigwait(const sigset_t *set, int *sig); int __thr_sigwaitinfo(const sigset_t *set, siginfo_t *info); int __thr_swapcontext(ucontext_t *oucp, const ucontext_t *ucp); void __thr_map_stacks_exec(void); struct _spinlock; void __thr_spinunlock(struct _spinlock *lck); void __thr_spinlock(struct _spinlock *lck); struct tcb *_tcb_ctor(struct pthread *, int); void _tcb_dtor(struct tcb *); void __thr_pshared_init(void) __hidden; void *__thr_pshared_offpage(void *key, int doalloc) __hidden; void __thr_pshared_destroy(void *key) __hidden; void __thr_pshared_atfork_pre(void) __hidden; void __thr_pshared_atfork_post(void) __hidden; __END_DECLS #endif /* !_THR_PRIVATE_H */