diff --git a/lib/libthr/thread/thr_mutex.c b/lib/libthr/thread/thr_mutex.c --- a/lib/libthr/thread/thr_mutex.c +++ b/lib/libthr/thread/thr_mutex.c @@ -81,7 +81,6 @@ static int mutex_lock_sleep(struct pthread *, pthread_mutex_t, const struct timespec *); static void mutex_init_robust(struct pthread *curthread); -static int mutex_qidx(struct pthread_mutex *m); static bool is_robust_mutex(struct pthread_mutex *m); static bool is_pshared_mutex(struct pthread_mutex *m); @@ -242,6 +241,7 @@ { pmutex->m_flags = attr->m_type; + pmutex->m_qidx = TMQ_NORM; pmutex->m_count = 0; pmutex->m_spinloops = 0; pmutex->m_yieldloops = 0; @@ -272,6 +272,8 @@ _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; pmutex->m_yieldloops = _thr_yieldloops; } + if ((pmutex->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0) + pmutex->m_qidx = (is_robust_mutex(pmutex) ? TMQ_ROBUST_PP : TMQ_NORM_PP); } static int @@ -324,7 +326,7 @@ { struct pthread_mutex *m2; - m2 = TAILQ_LAST(&curthread->mq[mutex_qidx(m)], mutex_queue); + m2 = TAILQ_LAST(&curthread->mq[m->m_qidx], mutex_queue); if (m2 != NULL) m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; else @@ -500,15 +502,6 @@ return (ret); } -static int -mutex_qidx(struct pthread_mutex *m) -{ - - if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) - return (TMQ_NORM); - return (is_robust_mutex(m) ? TMQ_ROBUST_PP : TMQ_NORM_PP); -} - /* * Both enqueue_mutex() and dequeue_mutex() operate on the * thread-private linkage of the locked mutexes and on the robust @@ -527,15 +520,13 @@ { struct pthread_mutex *m1; uintptr_t *rl; - int qidx; /* Add to the list of owned mutexes: */ if (error != EOWNERDEAD) mutex_assert_not_owned(curthread, m); - qidx = mutex_qidx(m); - TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe); + TAILQ_INSERT_TAIL(&curthread->mq[m->m_qidx], m, m_qe); if (!is_pshared_mutex(m)) - TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe); + TAILQ_INSERT_TAIL(&curthread->mq[m->m_qidx + 1], m, m_pqe); if (is_robust_mutex(m)) { rl = is_pshared_mutex(m) ? &curthread->robust_list : &curthread->priv_robust_list; @@ -557,10 +548,8 @@ dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m) { struct pthread_mutex *mp, *mn; - int qidx; mutex_assert_is_owned(m); - qidx = mutex_qidx(m); if (is_robust_mutex(m)) { mp = m->m_rb_prev; if (mp == NULL) { @@ -581,9 +570,9 @@ m->m_lock.m_rb_lnk = 0; m->m_rb_prev = NULL; } - TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe); + TAILQ_REMOVE(&curthread->mq[m->m_qidx], m, m_qe); if (!is_pshared_mutex(m)) - TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe); + TAILQ_REMOVE(&curthread->mq[m->m_qidx + 1], m, m_pqe); if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0) set_inherited_priority(curthread, m); mutex_init_link(m); @@ -596,7 +585,7 @@ *m = *mutex; ret = 0; - if (*m == THR_PSHARED_PTR) { + if (__predict_false(*m == THR_PSHARED_PTR)) { *m = __thr_pshared_offpage(mutex, 0); if (*m == NULL) ret = EINVAL; @@ -714,7 +703,7 @@ return (ret); } -static inline int +static __always_inline int mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime, bool cvattach, bool rb_onlist) { @@ -728,7 +717,7 @@ if (!rb_onlist) robust = _mutex_enter_robust(curthread, m); ret = _thr_umutex_trylock2(&m->m_lock, TID(curthread)); - if (ret == 0 || ret == EOWNERDEAD) { + if (__predict_true(ret == 0) || ret == EOWNERDEAD) { enqueue_mutex(curthread, m, ret); if (ret == EOWNERDEAD) m->m_lock.m_flags |= UMUTEX_NONCONSISTENT; @@ -737,7 +726,7 @@ } if (!rb_onlist && robust) _mutex_leave_robust(curthread, m); - if (ret != 0 && ret != EOWNERDEAD && + if (__predict_false(ret != 0) && ret != EOWNERDEAD && (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0 && !cvattach) THR_CRITICAL_LEAVE(curthread); return (ret); @@ -951,7 +940,7 @@ return (ret); } -static int +static __always_inline int mutex_unlock_common(struct pthread_mutex *m, bool cv, int *mtx_defer) { struct pthread *curthread; @@ -979,11 +968,9 @@ PTHREAD_MUTEX_RECURSIVE && m->m_count > 0)) { m->m_count--; } else { - if ((m->m_flags & PMUTEX_FLAG_DEFERRED) != 0) { - deferred = 1; + deferred = ((m->m_flags & PMUTEX_FLAG_DEFERRED) != 0); + if (deferred) m->m_flags &= ~PMUTEX_FLAG_DEFERRED; - } else - deferred = 0; robust = _mutex_enter_robust(curthread, m); dequeue_mutex(curthread, m); @@ -1033,7 +1020,7 @@ struct pthread *curthread; struct pthread_mutex *m, *m1, *m2; struct mutex_queue *q, *qp; - int qidx, ret; + int ret; if (*mutex == THR_PSHARED_PTR) { m = __thr_pshared_offpage(mutex, 0); @@ -1059,9 +1046,8 @@ m2 = TAILQ_NEXT(m, m_qe); if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { - qidx = mutex_qidx(m); - q = &curthread->mq[qidx]; - qp = &curthread->mq[qidx + 1]; + q = &curthread->mq[m->m_qidx]; + qp = &curthread->mq[m->m_qidx + 1]; TAILQ_REMOVE(q, m, m_qe); if (!is_pshared_mutex(m)) TAILQ_REMOVE(qp, m, m_pqe); diff --git a/lib/libthr/thread/thr_private.h b/lib/libthr/thread/thr_private.h --- a/lib/libthr/thread/thr_private.h +++ b/lib/libthr/thread/thr_private.h @@ -169,6 +169,7 @@ */ struct umutex m_lock; int m_flags; + int m_qidx; int m_count; int m_spinloops; int m_yieldloops;