diff --git a/lib/libthr/arch/aarch64/include/pthread_md.h b/lib/libthr/arch/aarch64/include/pthread_md.h index f3ce2d0bdbce..2ae5a0d2e808 100644 --- a/lib/libthr/arch/aarch64/include/pthread_md.h +++ b/lib/libthr/arch/aarch64/include/pthread_md.h @@ -1,51 +1,54 @@ /*- * Copyright (c) 2005 David Xu . * Copyright (c) 2014 the FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Andrew Turner * under sponsorship from the FreeBSD Foundation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include #define CPU_SPINWAIT +/* For use in _Static_assert to check structs will fit in a page */ +#define THR_PAGE_SIZE_MIN PAGE_SIZE_4K + static __inline struct pthread * _get_curthread(void) { return (_tcb_get()->tcb_thread); } #endif /* _PTHREAD_MD_H_ */ diff --git a/lib/libthr/arch/amd64/include/pthread_md.h b/lib/libthr/arch/amd64/include/pthread_md.h index d7bf5d5e5753..acfbd551b3de 100644 --- a/lib/libthr/arch/amd64/include/pthread_md.h +++ b/lib/libthr/arch/amd64/include/pthread_md.h @@ -1,56 +1,59 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2003 David Xu * Copyright (c) 2001 Daniel Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include #include #define CPU_SPINWAIT __asm __volatile("pause") +/* For use in _Static_assert to check structs will fit in a page */ +#define THR_PAGE_SIZE_MIN PAGE_SIZE + static __inline struct pthread * _get_curthread(void) { struct pthread *thr; __asm __volatile("movq %%fs:%1, %0" : "=r" (thr) : "m" (*(volatile u_long *)offsetof(struct tcb, tcb_thread))); return (thr); } #define HAS__UMTX_OP_ERR 1 #endif diff --git a/lib/libthr/arch/arm/include/pthread_md.h b/lib/libthr/arch/arm/include/pthread_md.h index 75570337c627..dfd7e7d951bf 100644 --- a/lib/libthr/arch/arm/include/pthread_md.h +++ b/lib/libthr/arch/arm/include/pthread_md.h @@ -1,50 +1,53 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005 David Xu . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include #define CPU_SPINWAIT +/* For use in _Static_assert to check structs will fit in a page */ +#define THR_PAGE_SIZE_MIN PAGE_SIZE + static __inline struct pthread * _get_curthread(void) { if (_thr_initial) return (_tcb_get()->tcb_thread); return (NULL); } #endif /* _PTHREAD_MD_H_ */ diff --git a/lib/libthr/arch/i386/include/pthread_md.h b/lib/libthr/arch/i386/include/pthread_md.h index 525a9c4368a3..8940acee6fee 100644 --- a/lib/libthr/arch/i386/include/pthread_md.h +++ b/lib/libthr/arch/i386/include/pthread_md.h @@ -1,56 +1,59 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002 Daniel Eischen . * Copyright (c) 2005 David Xu . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include #include #define CPU_SPINWAIT __asm __volatile("pause") +/* For use in _Static_assert to check structs will fit in a page */ +#define THR_PAGE_SIZE_MIN PAGE_SIZE + static __inline struct pthread * _get_curthread(void) { struct pthread *thr; __asm __volatile("movl %%gs:%1, %0" : "=r" (thr) : "m" (*(volatile u_int *)offsetof(struct tcb, tcb_thread))); return (thr); } #define HAS__UMTX_OP_ERR 1 #endif diff --git a/lib/libthr/arch/powerpc/include/pthread_md.h b/lib/libthr/arch/powerpc/include/pthread_md.h index 10c787ae189a..076c43e1ef43 100644 --- a/lib/libthr/arch/powerpc/include/pthread_md.h +++ b/lib/libthr/arch/powerpc/include/pthread_md.h @@ -1,53 +1,56 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright 2004 by Peter Grehan. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include #define CPU_SPINWAIT +/* For use in _Static_assert to check structs will fit in a page */ +#define THR_PAGE_SIZE_MIN PAGE_SIZE + static __inline struct pthread * _get_curthread(void) { if (_thr_initial) return (_tcb_get()->tcb_thread); return (NULL); } #define HAS__UMTX_OP_ERR 1 #endif /* _PTHREAD_MD_H_ */ diff --git a/lib/libthr/arch/riscv/include/pthread_md.h b/lib/libthr/arch/riscv/include/pthread_md.h index 092d40266e1c..7c7b9a787c24 100644 --- a/lib/libthr/arch/riscv/include/pthread_md.h +++ b/lib/libthr/arch/riscv/include/pthread_md.h @@ -1,58 +1,61 @@ /*- * Copyright (c) 2005 David Xu * Copyright (c) 2015 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include #define CPU_SPINWAIT +/* For use in _Static_assert to check structs will fit in a page */ +#define THR_PAGE_SIZE_MIN PAGE_SIZE + static __inline struct pthread * _get_curthread(void) { if (_thr_initial) return (_tcb_get()->tcb_thread); return (NULL); } #endif /* _PTHREAD_MD_H_ */ diff --git a/lib/libthr/thread/thr_barrier.c b/lib/libthr/thread/thr_barrier.c index fa205ad509c9..fc42588cb412 100644 --- a/lib/libthr/thread/thr_barrier.c +++ b/lib/libthr/thread/thr_barrier.c @@ -1,174 +1,174 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "namespace.h" #include #include #include #include "un-namespace.h" #include "thr_private.h" -_Static_assert(sizeof(struct pthread_barrier) <= PAGE_SIZE, +_Static_assert(sizeof(struct pthread_barrier) <= THR_PAGE_SIZE_MIN, "pthread_barrier is too large for off-page"); __weak_reference(_pthread_barrier_init, pthread_barrier_init); __weak_reference(_pthread_barrier_wait, pthread_barrier_wait); __weak_reference(_pthread_barrier_destroy, pthread_barrier_destroy); int _pthread_barrier_destroy(pthread_barrier_t *barrier) { pthread_barrier_t bar; struct pthread *curthread; int pshared; if (barrier == NULL || *barrier == NULL) return (EINVAL); if (*barrier == THR_PSHARED_PTR) { bar = __thr_pshared_offpage(barrier, 0); if (bar == NULL) { *barrier = NULL; return (0); } pshared = 1; } else { bar = *barrier; pshared = 0; } curthread = _get_curthread(); THR_UMUTEX_LOCK(curthread, &bar->b_lock); if (bar->b_destroying) { THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); return (EBUSY); } bar->b_destroying = 1; do { if (bar->b_waiters > 0) { bar->b_destroying = 0; THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); return (EBUSY); } if (bar->b_refcount != 0) { _thr_ucond_wait(&bar->b_cv, &bar->b_lock, NULL, 0); THR_UMUTEX_LOCK(curthread, &bar->b_lock); } else break; } while (1); bar->b_destroying = 0; THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); *barrier = NULL; if (pshared) __thr_pshared_destroy(barrier); else free(bar); return (0); } int _pthread_barrier_init(pthread_barrier_t * __restrict barrier, const pthread_barrierattr_t * __restrict attr, unsigned count) { pthread_barrier_t bar; int pshared; if (barrier == NULL || count == 0 || count > INT_MAX) return (EINVAL); if (attr == NULL || *attr == NULL || (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) { bar = calloc(1, sizeof(struct pthread_barrier)); if (bar == NULL) return (ENOMEM); *barrier = bar; pshared = 0; } else { bar = __thr_pshared_offpage(barrier, 1); if (bar == NULL) return (EFAULT); *barrier = THR_PSHARED_PTR; pshared = 1; } _thr_umutex_init(&bar->b_lock); _thr_ucond_init(&bar->b_cv); if (pshared) { bar->b_lock.m_flags |= USYNC_PROCESS_SHARED; bar->b_cv.c_flags |= USYNC_PROCESS_SHARED; } bar->b_count = count; return (0); } int _pthread_barrier_wait(pthread_barrier_t *barrier) { struct pthread *curthread; pthread_barrier_t bar; int64_t cycle; int ret; if (barrier == NULL || *barrier == NULL) return (EINVAL); if (*barrier == THR_PSHARED_PTR) { bar = __thr_pshared_offpage(barrier, 0); if (bar == NULL) return (EINVAL); } else { bar = *barrier; } curthread = _get_curthread(); THR_UMUTEX_LOCK(curthread, &bar->b_lock); if (++bar->b_waiters == bar->b_count) { /* Current thread is lastest thread */ bar->b_waiters = 0; bar->b_cycle++; _thr_ucond_broadcast(&bar->b_cv); THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); ret = PTHREAD_BARRIER_SERIAL_THREAD; } else { cycle = bar->b_cycle; bar->b_refcount++; do { _thr_ucond_wait(&bar->b_cv, &bar->b_lock, NULL, 0); THR_UMUTEX_LOCK(curthread, &bar->b_lock); /* test cycle to avoid bogus wakeup */ } while (cycle == bar->b_cycle); if (--bar->b_refcount == 0 && bar->b_destroying) _thr_ucond_broadcast(&bar->b_cv); THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); ret = 0; } return (ret); } diff --git a/lib/libthr/thread/thr_cond.c b/lib/libthr/thread/thr_cond.c index 9eba3dd437f0..925792a92662 100644 --- a/lib/libthr/thread/thr_cond.c +++ b/lib/libthr/thread/thr_cond.c @@ -1,559 +1,559 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005 David Xu * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "namespace.h" #include #include #include #include #include #include "un-namespace.h" #include "thr_private.h" -_Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE, +_Static_assert(sizeof(struct pthread_cond) <= THR_PAGE_SIZE_MIN, "pthread_cond too large"); /* * Prototypes */ int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec * abstime); static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int cancel); static int cond_signal_common(pthread_cond_t *cond); static int cond_broadcast_common(pthread_cond_t *cond); /* * Double underscore versions are cancellation points. Single underscore * versions are not and are provided for libc internal usage (which * shouldn't introduce cancellation points). */ __weak_reference(__thr_cond_wait, pthread_cond_wait); __weak_reference(__thr_cond_wait, __pthread_cond_wait); __weak_reference(_thr_cond_wait, _pthread_cond_wait); __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait); __weak_reference(_thr_cond_init, pthread_cond_init); __weak_reference(_thr_cond_init, _pthread_cond_init); __weak_reference(_thr_cond_destroy, pthread_cond_destroy); __weak_reference(_thr_cond_destroy, _pthread_cond_destroy); __weak_reference(_thr_cond_signal, pthread_cond_signal); __weak_reference(_thr_cond_signal, _pthread_cond_signal); __weak_reference(_thr_cond_broadcast, pthread_cond_broadcast); __weak_reference(_thr_cond_broadcast, _pthread_cond_broadcast); #define CV_PSHARED(cvp) (((cvp)->kcond.c_flags & USYNC_PROCESS_SHARED) != 0) static void cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr) { if (cattr == NULL) { cvp->kcond.c_clockid = CLOCK_REALTIME; } else { if (cattr->c_pshared) cvp->kcond.c_flags |= USYNC_PROCESS_SHARED; cvp->kcond.c_clockid = cattr->c_clockid; } } static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { struct pthread_cond *cvp; const struct pthread_cond_attr *cattr; int pshared; cattr = cond_attr != NULL ? *cond_attr : NULL; if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) { pshared = 0; cvp = calloc(1, sizeof(struct pthread_cond)); if (cvp == NULL) return (ENOMEM); } else { pshared = 1; cvp = __thr_pshared_offpage(cond, 1); if (cvp == NULL) return (EFAULT); } /* * Initialise the condition variable structure: */ cond_init_body(cvp, cattr); *cond = pshared ? THR_PSHARED_PTR : cvp; return (0); } static int init_static(struct pthread *thread, pthread_cond_t *cond) { int ret; THR_LOCK_ACQUIRE(thread, &_cond_static_lock); if (*cond == NULL) ret = cond_init(cond, NULL); else ret = 0; THR_LOCK_RELEASE(thread, &_cond_static_lock); return (ret); } #define CHECK_AND_INIT_COND \ if (*cond == THR_PSHARED_PTR) { \ cvp = __thr_pshared_offpage(cond, 0); \ if (cvp == NULL) \ return (EINVAL); \ } else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \ if (cvp == THR_COND_INITIALIZER) { \ int ret; \ ret = init_static(_get_curthread(), cond); \ if (ret) \ return (ret); \ } else if (cvp == THR_COND_DESTROYED) { \ return (EINVAL); \ } \ cvp = *cond; \ } int _thr_cond_init(pthread_cond_t * __restrict cond, const pthread_condattr_t * __restrict cond_attr) { *cond = NULL; return (cond_init(cond, cond_attr)); } int _thr_cond_destroy(pthread_cond_t *cond) { struct pthread_cond *cvp; int error; error = 0; if (*cond == THR_PSHARED_PTR) { cvp = __thr_pshared_offpage(cond, 0); if (cvp != NULL) { if (cvp->kcond.c_has_waiters) error = EBUSY; else __thr_pshared_destroy(cond); } if (error == 0) *cond = THR_COND_DESTROYED; } else if ((cvp = *cond) == THR_COND_INITIALIZER) { /* nothing */ } else if (cvp == THR_COND_DESTROYED) { error = EINVAL; } else { cvp = *cond; if (cvp->__has_user_waiters || cvp->kcond.c_has_waiters) error = EBUSY; else { *cond = THR_COND_DESTROYED; free(cvp); } } return (error); } /* * Cancellation behavior: * Thread may be canceled at start, if thread is canceled, it means it * did not get a wakeup from pthread_cond_signal(), otherwise, it is * not canceled. * Thread cancellation never cause wakeup from pthread_cond_signal() * to be lost. */ static int cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, const struct timespec *abstime, int cancel) { struct pthread *curthread; int error, error2, recurse, robust; curthread = _get_curthread(); robust = _mutex_enter_robust(curthread, mp); error = _mutex_cv_detach(mp, &recurse); if (error != 0) { if (robust) _mutex_leave_robust(curthread, mp); return (error); } if (cancel) _thr_cancel_enter2(curthread, 0); error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime, CVWAIT_ABSTIME | CVWAIT_CLOCKID); if (cancel) _thr_cancel_leave(curthread, 0); /* * Note that PP mutex and ROBUST mutex may return * interesting error codes. */ if (error == 0) { error2 = _mutex_cv_lock(mp, recurse, true); } else if (error == EINTR || error == ETIMEDOUT) { error2 = _mutex_cv_lock(mp, recurse, true); /* * Do not do cancellation on EOWNERDEAD there. The * cancellation cleanup handler will use the protected * state and unlock the mutex without making the state * consistent and the state will be unrecoverable. */ if (error2 == 0 && cancel) { if (robust) { _mutex_leave_robust(curthread, mp); robust = false; } _thr_testcancel(curthread); } if (error == EINTR) error = 0; } else { /* We know that it didn't unlock the mutex. */ _mutex_cv_attach(mp, recurse); if (cancel) { if (robust) { _mutex_leave_robust(curthread, mp); robust = false; } _thr_testcancel(curthread); } error2 = 0; } if (robust) _mutex_leave_robust(curthread, mp); return (error2 != 0 ? error2 : error); } /* * Thread waits in userland queue whenever possible, when thread * is signaled or broadcasted, it is removed from the queue, and * is saved in curthread's defer_waiters[] buffer, but won't be * woken up until mutex is unlocked. */ static int cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, const struct timespec *abstime, int cancel) { struct pthread *curthread; struct sleepqueue *sq; int deferred, error, error2, recurse; curthread = _get_curthread(); if (curthread->wchan != NULL) PANIC("thread %p was already on queue.", curthread); if (cancel) _thr_testcancel(curthread); _sleepq_lock(cvp); /* * set __has_user_waiters before unlocking mutex, this allows * us to check it without locking in pthread_cond_signal(). */ cvp->__has_user_waiters = 1; deferred = 0; (void)_mutex_cv_unlock(mp, &recurse, &deferred); curthread->mutex_obj = mp; _sleepq_add(cvp, curthread); for(;;) { _thr_clear_wake(curthread); _sleepq_unlock(cvp); if (deferred) { deferred = 0; if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0) (void)_umtx_op_err(&mp->m_lock, UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags, 0, 0); } if (curthread->nwaiter_defer > 0) { _thr_wake_all(curthread->defer_waiters, curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } if (cancel) _thr_cancel_enter2(curthread, 0); error = _thr_sleep(curthread, cvp->kcond.c_clockid, abstime); if (cancel) _thr_cancel_leave(curthread, 0); _sleepq_lock(cvp); if (curthread->wchan == NULL) { error = 0; break; } else if (cancel && SHOULD_CANCEL(curthread)) { sq = _sleepq_lookup(cvp); cvp->__has_user_waiters = _sleepq_remove(sq, curthread); _sleepq_unlock(cvp); curthread->mutex_obj = NULL; error2 = _mutex_cv_lock(mp, recurse, false); if (!THR_IN_CRITICAL(curthread)) _pthread_exit(PTHREAD_CANCELED); else /* this should not happen */ return (error2); } else if (error == ETIMEDOUT) { sq = _sleepq_lookup(cvp); cvp->__has_user_waiters = _sleepq_remove(sq, curthread); break; } } _sleepq_unlock(cvp); curthread->mutex_obj = NULL; error2 = _mutex_cv_lock(mp, recurse, false); if (error == 0) error = error2; return (error); } static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int cancel) { struct pthread *curthread = _get_curthread(); struct pthread_cond *cvp; struct pthread_mutex *mp; int error; CHECK_AND_INIT_COND if (*mutex == THR_PSHARED_PTR) { mp = __thr_pshared_offpage(mutex, 0); if (mp == NULL) return (EINVAL); } else { mp = *mutex; } if ((error = _mutex_owned(curthread, mp)) != 0) return (error); if (curthread->attr.sched_policy != SCHED_OTHER || (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | USYNC_PROCESS_SHARED)) != 0 || CV_PSHARED(cvp)) return (cond_wait_kernel(cvp, mp, abstime, cancel)); else return (cond_wait_user(cvp, mp, abstime, cancel)); } int _thr_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { return (cond_wait_common(cond, mutex, NULL, 0)); } int __thr_cond_wait(pthread_cond_t * __restrict cond, pthread_mutex_t * __restrict mutex) { return (cond_wait_common(cond, mutex, NULL, 1)); } int _thr_cond_timedwait(pthread_cond_t * __restrict cond, pthread_mutex_t * __restrict mutex, const struct timespec * __restrict abstime) { if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return (EINVAL); return (cond_wait_common(cond, mutex, abstime, 0)); } int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime) { if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return (EINVAL); return (cond_wait_common(cond, mutex, abstime, 1)); } static int cond_signal_common(pthread_cond_t *cond) { struct pthread *curthread = _get_curthread(); struct pthread *td; struct pthread_cond *cvp; struct pthread_mutex *mp; struct sleepqueue *sq; int *waddr; int pshared; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ CHECK_AND_INIT_COND pshared = CV_PSHARED(cvp); _thr_ucond_signal(&cvp->kcond); if (pshared || cvp->__has_user_waiters == 0) return (0); curthread = _get_curthread(); waddr = NULL; _sleepq_lock(cvp); sq = _sleepq_lookup(cvp); if (sq == NULL) { _sleepq_unlock(cvp); return (0); } td = _sleepq_first(sq); mp = td->mutex_obj; cvp->__has_user_waiters = _sleepq_remove(sq, td); if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { _thr_wake_all(curthread->defer_waiters, curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } curthread->defer_waiters[curthread->nwaiter_defer++] = &td->wake_addr->value; mp->m_flags |= PMUTEX_FLAG_DEFERRED; } else { waddr = &td->wake_addr->value; } _sleepq_unlock(cvp); if (waddr != NULL) _thr_set_wake(waddr); return (0); } struct broadcast_arg { struct pthread *curthread; unsigned int *waddrs[MAX_DEFER_WAITERS]; int count; }; static void drop_cb(struct pthread *td, void *arg) { struct broadcast_arg *ba = arg; struct pthread_mutex *mp; struct pthread *curthread = ba->curthread; mp = td->mutex_obj; if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { _thr_wake_all(curthread->defer_waiters, curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } curthread->defer_waiters[curthread->nwaiter_defer++] = &td->wake_addr->value; mp->m_flags |= PMUTEX_FLAG_DEFERRED; } else { if (ba->count >= MAX_DEFER_WAITERS) { _thr_wake_all(ba->waddrs, ba->count); ba->count = 0; } ba->waddrs[ba->count++] = &td->wake_addr->value; } } static int cond_broadcast_common(pthread_cond_t *cond) { int pshared; struct pthread_cond *cvp; struct sleepqueue *sq; struct broadcast_arg ba; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ CHECK_AND_INIT_COND pshared = CV_PSHARED(cvp); _thr_ucond_broadcast(&cvp->kcond); if (pshared || cvp->__has_user_waiters == 0) return (0); ba.curthread = _get_curthread(); ba.count = 0; _sleepq_lock(cvp); sq = _sleepq_lookup(cvp); if (sq == NULL) { _sleepq_unlock(cvp); return (0); } _sleepq_drop(sq, drop_cb, &ba); cvp->__has_user_waiters = 0; _sleepq_unlock(cvp); if (ba.count > 0) _thr_wake_all(ba.waddrs, ba.count); return (0); } int _thr_cond_signal(pthread_cond_t * cond) { return (cond_signal_common(cond)); } int _thr_cond_broadcast(pthread_cond_t * cond) { return (cond_broadcast_common(cond)); } diff --git a/lib/libthr/thread/thr_malloc.c b/lib/libthr/thread/thr_malloc.c index 80a81f9c6c27..b4a627cc55eb 100644 --- a/lib/libthr/thread/thr_malloc.c +++ b/lib/libthr/thread/thr_malloc.c @@ -1,152 +1,151 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "thr_private.h" int npagesizes; size_t *pagesizes; static size_t pagesizes_d[2]; static struct umutex thr_malloc_umtx; static u_int thr_malloc_umtx_level; void __thr_malloc_init(void) { if (npagesizes != 0) return; npagesizes = getpagesizes(pagesizes_d, nitems(pagesizes_d)); if (npagesizes == -1) { - npagesizes = 1; - pagesizes_d[0] = PAGE_SIZE; + PANIC("Unable to read page sizes"); } pagesizes = pagesizes_d; _thr_umutex_init(&thr_malloc_umtx); } static void thr_malloc_lock(struct pthread *curthread) { uint32_t curtid; if (curthread == NULL) return; curthread->locklevel++; curtid = TID(curthread); if ((uint32_t)thr_malloc_umtx.m_owner == curtid) thr_malloc_umtx_level++; else _thr_umutex_lock(&thr_malloc_umtx, curtid); } static void thr_malloc_unlock(struct pthread *curthread) { if (curthread == NULL) return; if (thr_malloc_umtx_level > 0) thr_malloc_umtx_level--; else _thr_umutex_unlock(&thr_malloc_umtx, TID(curthread)); curthread->locklevel--; _thr_ast(curthread); } void * __thr_calloc(size_t num, size_t size) { struct pthread *curthread; void *res; curthread = _get_curthread(); thr_malloc_lock(curthread); res = __crt_calloc(num, size); thr_malloc_unlock(curthread); return (res); } void __thr_free(void *cp) { struct pthread *curthread; curthread = _get_curthread(); thr_malloc_lock(curthread); __crt_free(cp); thr_malloc_unlock(curthread); } void * __thr_malloc(size_t nbytes) { struct pthread *curthread; void *res; curthread = _get_curthread(); thr_malloc_lock(curthread); res = __crt_malloc(nbytes); thr_malloc_unlock(curthread); return (res); } void * __thr_realloc(void *cp, size_t nbytes) { struct pthread *curthread; void *res; curthread = _get_curthread(); thr_malloc_lock(curthread); res = __crt_realloc(cp, nbytes); thr_malloc_unlock(curthread); return (res); } void __thr_malloc_prefork(struct pthread *curthread) { _thr_umutex_lock(&thr_malloc_umtx, TID(curthread)); } void __thr_malloc_postfork(struct pthread *curthread) { _thr_umutex_unlock(&thr_malloc_umtx, TID(curthread)); } diff --git a/lib/libthr/thread/thr_mutex.c b/lib/libthr/thread/thr_mutex.c index 303386db7fe7..8dccdf4dfa8c 100644 --- a/lib/libthr/thread/thr_mutex.c +++ b/lib/libthr/thread/thr_mutex.c @@ -1,1200 +1,1200 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1995 John Birrell . * Copyright (c) 2006 David Xu . * Copyright (c) 2015, 2016 The FreeBSD Foundation * * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "namespace.h" #include #include #include #include #include #include #include #include "un-namespace.h" #include "thr_private.h" -_Static_assert(sizeof(struct pthread_mutex) <= PAGE_SIZE, +_Static_assert(sizeof(struct pthread_mutex) <= THR_PAGE_SIZE_MIN, "pthread_mutex is too large for off-page"); /* * For adaptive mutexes, how many times to spin doing trylock2 * before entering the kernel to block */ #define MUTEX_ADAPTIVE_SPINS 2000 /* * Prototypes */ int __pthread_mutex_timedlock(pthread_mutex_t * __restrict mutex, const struct timespec * __restrict abstime); int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count); int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); static int mutex_self_trylock(pthread_mutex_t); static int mutex_self_lock(pthread_mutex_t, const struct timespec *abstime); static int mutex_unlock_common(struct pthread_mutex *, bool, int *); static int mutex_lock_sleep(struct pthread *, pthread_mutex_t, const struct timespec *); static void mutex_init_robust(struct pthread *curthread); static int mutex_qidx(struct pthread_mutex *m); static bool is_robust_mutex(struct pthread_mutex *m); static bool is_pshared_mutex(struct pthread_mutex *m); __weak_reference(__Tthr_mutex_init, pthread_mutex_init); __weak_reference(__Tthr_mutex_init, __pthread_mutex_init); __strong_reference(__Tthr_mutex_init, _pthread_mutex_init); __weak_reference(__Tthr_mutex_lock, pthread_mutex_lock); __weak_reference(__Tthr_mutex_lock, __pthread_mutex_lock); __strong_reference(__Tthr_mutex_lock, _pthread_mutex_lock); __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock); __weak_reference(__Tthr_mutex_trylock, pthread_mutex_trylock); __weak_reference(__Tthr_mutex_trylock, __pthread_mutex_trylock); __strong_reference(__Tthr_mutex_trylock, _pthread_mutex_trylock); __weak_reference(_Tthr_mutex_consistent, pthread_mutex_consistent); __weak_reference(_Tthr_mutex_consistent, _pthread_mutex_consistent); __strong_reference(_Tthr_mutex_consistent, __pthread_mutex_consistent); /* Single underscore versions provided for libc internal usage: */ /* No difference between libc and application usage of these: */ __weak_reference(_thr_mutex_destroy, pthread_mutex_destroy); __weak_reference(_thr_mutex_destroy, _pthread_mutex_destroy); __weak_reference(_thr_mutex_unlock, pthread_mutex_unlock); __weak_reference(_thr_mutex_unlock, _pthread_mutex_unlock); __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np); __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np); __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np); __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np); static void mutex_init_link(struct pthread_mutex *m) { #if defined(_PTHREADS_INVARIANTS) m->m_qe.tqe_prev = NULL; m->m_qe.tqe_next = NULL; m->m_pqe.tqe_prev = NULL; m->m_pqe.tqe_next = NULL; #endif } static void mutex_assert_is_owned(struct pthread_mutex *m __unused) { #if defined(_PTHREADS_INVARIANTS) if (__predict_false(m->m_qe.tqe_prev == NULL)) PANIC("mutex %p own %#x is not on list %p %p", m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next); #endif } static void mutex_assert_not_owned(struct pthread *curthread __unused, struct pthread_mutex *m __unused) { #if defined(_PTHREADS_INVARIANTS) if (__predict_false(m->m_qe.tqe_prev != NULL || m->m_qe.tqe_next != NULL)) PANIC("mutex %p own %#x is on list %p %p", m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next); if (__predict_false(is_robust_mutex(m) && (m->m_lock.m_rb_lnk != 0 || m->m_rb_prev != NULL || (is_pshared_mutex(m) && curthread->robust_list == (uintptr_t)&m->m_lock) || (!is_pshared_mutex(m) && curthread->priv_robust_list == (uintptr_t)&m->m_lock)))) PANIC( "mutex %p own %#x is on robust linkage %p %p head %p phead %p", m, m->m_lock.m_owner, (void *)m->m_lock.m_rb_lnk, m->m_rb_prev, (void *)curthread->robust_list, (void *)curthread->priv_robust_list); #endif } static bool is_pshared_mutex(struct pthread_mutex *m) { return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0); } static bool is_robust_mutex(struct pthread_mutex *m) { return ((m->m_lock.m_flags & UMUTEX_ROBUST) != 0); } int _mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m) { #if defined(_PTHREADS_INVARIANTS) if (__predict_false(curthread->inact_mtx != 0)) PANIC("inact_mtx enter"); #endif if (!is_robust_mutex(m)) return (0); mutex_init_robust(curthread); curthread->inact_mtx = (uintptr_t)&m->m_lock; return (1); } void _mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m __unused) { #if defined(_PTHREADS_INVARIANTS) if (__predict_false(curthread->inact_mtx != (uintptr_t)&m->m_lock)) PANIC("inact_mtx leave"); #endif curthread->inact_mtx = 0; } static int mutex_check_attr(const struct pthread_mutex_attr *attr) { if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) return (EINVAL); if (attr->m_protocol < PTHREAD_PRIO_NONE || attr->m_protocol > PTHREAD_PRIO_PROTECT) return (EINVAL); return (0); } static void mutex_init_robust(struct pthread *curthread) { struct umtx_robust_lists_params rb; if (curthread == NULL) curthread = _get_curthread(); if (curthread->robust_inited) return; rb.robust_list_offset = (uintptr_t)&curthread->robust_list; rb.robust_priv_list_offset = (uintptr_t)&curthread->priv_robust_list; rb.robust_inact_offset = (uintptr_t)&curthread->inact_mtx; _umtx_op(NULL, UMTX_OP_ROBUST_LISTS, sizeof(rb), &rb, NULL); curthread->robust_inited = 1; } static void mutex_init_body(struct pthread_mutex *pmutex, const struct pthread_mutex_attr *attr) { pmutex->m_flags = attr->m_type; pmutex->m_count = 0; pmutex->m_spinloops = 0; pmutex->m_yieldloops = 0; mutex_init_link(pmutex); switch (attr->m_protocol) { case PTHREAD_PRIO_NONE: pmutex->m_lock.m_owner = UMUTEX_UNOWNED; pmutex->m_lock.m_flags = 0; break; case PTHREAD_PRIO_INHERIT: pmutex->m_lock.m_owner = UMUTEX_UNOWNED; pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; break; case PTHREAD_PRIO_PROTECT: pmutex->m_lock.m_owner = UMUTEX_CONTESTED; pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; break; } if (attr->m_pshared == PTHREAD_PROCESS_SHARED) pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED; if (attr->m_robust == PTHREAD_MUTEX_ROBUST) { mutex_init_robust(NULL); pmutex->m_lock.m_flags |= UMUTEX_ROBUST; } if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) { pmutex->m_spinloops = _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; pmutex->m_yieldloops = _thr_yieldloops; } } static int mutex_init(pthread_mutex_t *mutex, const struct pthread_mutex_attr *mutex_attr, void *(calloc_cb)(size_t, size_t)) { const struct pthread_mutex_attr *attr; struct pthread_mutex *pmutex; int error; if (mutex_attr == NULL) { attr = &_pthread_mutexattr_default; } else { attr = mutex_attr; error = mutex_check_attr(attr); if (error != 0) return (error); } if ((pmutex = (pthread_mutex_t) calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) return (ENOMEM); mutex_init_body(pmutex, attr); *mutex = pmutex; return (0); } static int init_static(struct pthread *thread, pthread_mutex_t *mutex) { int ret; THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); if (*mutex == THR_MUTEX_INITIALIZER) ret = mutex_init(mutex, &_pthread_mutexattr_default, __thr_calloc); else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER) ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, __thr_calloc); else ret = 0; THR_LOCK_RELEASE(thread, &_mutex_static_lock); return (ret); } static void set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) { struct pthread_mutex *m2; m2 = TAILQ_LAST(&curthread->mq[mutex_qidx(m)], mutex_queue); if (m2 != NULL) m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; else m->m_lock.m_ceilings[1] = -1; } static void shared_mutex_init(struct pthread_mutex *pmtx, const struct pthread_mutex_attr *mutex_attr) { static const struct pthread_mutex_attr foobar_mutex_attr = { .m_type = PTHREAD_MUTEX_DEFAULT, .m_protocol = PTHREAD_PRIO_NONE, .m_ceiling = 0, .m_pshared = PTHREAD_PROCESS_SHARED, .m_robust = PTHREAD_MUTEX_STALLED, }; bool done; /* * Hack to allow multiple pthread_mutex_init() calls on the * same process-shared mutex. We rely on kernel allocating * zeroed offpage for the mutex, i.e. the * PMUTEX_INITSTAGE_ALLOC value must be zero. */ for (done = false; !done;) { switch (pmtx->m_ps) { case PMUTEX_INITSTAGE_DONE: atomic_thread_fence_acq(); done = true; break; case PMUTEX_INITSTAGE_ALLOC: if (atomic_cmpset_int(&pmtx->m_ps, PMUTEX_INITSTAGE_ALLOC, PMUTEX_INITSTAGE_BUSY)) { if (mutex_attr == NULL) mutex_attr = &foobar_mutex_attr; mutex_init_body(pmtx, mutex_attr); atomic_store_rel_int(&pmtx->m_ps, PMUTEX_INITSTAGE_DONE); done = true; } break; case PMUTEX_INITSTAGE_BUSY: _pthread_yield(); break; default: PANIC("corrupted offpage"); break; } } } int __Tthr_mutex_init(pthread_mutex_t * __restrict mutex, const pthread_mutexattr_t * __restrict mutex_attr) { struct pthread_mutex *pmtx; int ret; _thr_check_init(); if (mutex_attr != NULL) { ret = mutex_check_attr(*mutex_attr); if (ret != 0) return (ret); } if (mutex_attr == NULL || (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) { __thr_malloc_init(); return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, __thr_calloc)); } pmtx = __thr_pshared_offpage(__DECONST(void *, mutex), 1); if (pmtx == NULL) return (EFAULT); *mutex = THR_PSHARED_PTR; shared_mutex_init(pmtx, *mutex_attr); return (0); } /* This function is used internally by malloc. */ int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)) { static const struct pthread_mutex_attr attr = { .m_type = PTHREAD_MUTEX_NORMAL, .m_protocol = PTHREAD_PRIO_NONE, .m_ceiling = 0, .m_pshared = PTHREAD_PROCESS_PRIVATE, .m_robust = PTHREAD_MUTEX_STALLED, }; int ret; ret = mutex_init(mutex, &attr, calloc_cb); if (ret == 0) (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE; return (ret); } /* * Fix mutex ownership for child process. * * Process private mutex ownership is transmitted from the forking * thread to the child process. * * Process shared mutex should not be inherited because owner is * forking thread which is in parent process, they are removed from * the owned mutex list. */ static void queue_fork(struct pthread *curthread, struct mutex_queue *q, struct mutex_queue *qp, uint bit) { struct pthread_mutex *m; TAILQ_INIT(q); TAILQ_FOREACH(m, qp, m_pqe) { TAILQ_INSERT_TAIL(q, m, m_qe); m->m_lock.m_owner = TID(curthread) | bit; } } void _mutex_fork(struct pthread *curthread) { queue_fork(curthread, &curthread->mq[TMQ_NORM], &curthread->mq[TMQ_NORM_PRIV], 0); queue_fork(curthread, &curthread->mq[TMQ_NORM_PP], &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED); queue_fork(curthread, &curthread->mq[TMQ_ROBUST_PP], &curthread->mq[TMQ_ROBUST_PP_PRIV], UMUTEX_CONTESTED); curthread->robust_list = 0; } int _thr_mutex_destroy(pthread_mutex_t *mutex) { pthread_mutex_t m, m1; int ret; m = *mutex; if (m < THR_MUTEX_DESTROYED) { ret = 0; } else if (m == THR_MUTEX_DESTROYED) { ret = EINVAL; } else { if (m == THR_PSHARED_PTR) { m1 = __thr_pshared_offpage(mutex, 0); if (m1 != NULL) { if ((uint32_t)m1->m_lock.m_owner != UMUTEX_RB_OWNERDEAD) { mutex_assert_not_owned( _get_curthread(), m1); } __thr_pshared_destroy(mutex); } *mutex = THR_MUTEX_DESTROYED; return (0); } if (PMUTEX_OWNER_ID(m) != 0 && (uint32_t)m->m_lock.m_owner != UMUTEX_RB_NOTRECOV) { ret = EBUSY; } else { *mutex = THR_MUTEX_DESTROYED; mutex_assert_not_owned(_get_curthread(), m); __thr_free(m); ret = 0; } } return (ret); } static int mutex_qidx(struct pthread_mutex *m) { if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) return (TMQ_NORM); return (is_robust_mutex(m) ? TMQ_ROBUST_PP : TMQ_NORM_PP); } /* * Both enqueue_mutex() and dequeue_mutex() operate on the * thread-private linkage of the locked mutexes and on the robust * linkage. * * Robust list, as seen by kernel, must be consistent even in the case * of thread termination at arbitrary moment. Since either enqueue or * dequeue for list walked by kernel consists of rewriting a single * forward pointer, it is safe. On the other hand, rewrite of the * back pointer is not atomic WRT the forward one, but kernel does not * care. */ static void enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m, int error) { struct pthread_mutex *m1; uintptr_t *rl; int qidx; /* Add to the list of owned mutexes: */ if (error != EOWNERDEAD) mutex_assert_not_owned(curthread, m); qidx = mutex_qidx(m); TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe); if (!is_pshared_mutex(m)) TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe); if (is_robust_mutex(m)) { rl = is_pshared_mutex(m) ? &curthread->robust_list : &curthread->priv_robust_list; m->m_rb_prev = NULL; if (*rl != 0) { m1 = __containerof((void *)*rl, struct pthread_mutex, m_lock); m->m_lock.m_rb_lnk = (uintptr_t)&m1->m_lock; m1->m_rb_prev = m; } else { m1 = NULL; m->m_lock.m_rb_lnk = 0; } *rl = (uintptr_t)&m->m_lock; } } static void dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m) { struct pthread_mutex *mp, *mn; int qidx; mutex_assert_is_owned(m); qidx = mutex_qidx(m); if (is_robust_mutex(m)) { mp = m->m_rb_prev; if (mp == NULL) { if (is_pshared_mutex(m)) { curthread->robust_list = m->m_lock.m_rb_lnk; } else { curthread->priv_robust_list = m->m_lock.m_rb_lnk; } } else { mp->m_lock.m_rb_lnk = m->m_lock.m_rb_lnk; } if (m->m_lock.m_rb_lnk != 0) { mn = __containerof((void *)m->m_lock.m_rb_lnk, struct pthread_mutex, m_lock); mn->m_rb_prev = m->m_rb_prev; } m->m_lock.m_rb_lnk = 0; m->m_rb_prev = NULL; } TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe); if (!is_pshared_mutex(m)) TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe); if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0) set_inherited_priority(curthread, m); mutex_init_link(m); } static int check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m) { int ret; *m = *mutex; ret = 0; if (*m == THR_PSHARED_PTR) { *m = __thr_pshared_offpage(mutex, 0); if (*m == NULL) ret = EINVAL; else shared_mutex_init(*m, NULL); } else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) { if (*m == THR_MUTEX_DESTROYED) { ret = EINVAL; } else { ret = init_static(_get_curthread(), mutex); if (ret == 0) *m = *mutex; } } return (ret); } int __Tthr_mutex_trylock(pthread_mutex_t *mutex) { struct pthread *curthread; struct pthread_mutex *m; uint32_t id; int ret, robust; ret = check_and_init_mutex(mutex, &m); if (ret != 0) return (ret); curthread = _get_curthread(); id = TID(curthread); if (m->m_flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_ENTER(curthread); robust = _mutex_enter_robust(curthread, m); ret = _thr_umutex_trylock(&m->m_lock, id); if (__predict_true(ret == 0) || ret == EOWNERDEAD) { enqueue_mutex(curthread, m, ret); if (ret == EOWNERDEAD) m->m_lock.m_flags |= UMUTEX_NONCONSISTENT; } else if (PMUTEX_OWNER_ID(m) == id) { ret = mutex_self_trylock(m); } /* else {} */ if (robust) _mutex_leave_robust(curthread, m); if (ret != 0 && ret != EOWNERDEAD && (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0) THR_CRITICAL_LEAVE(curthread); return (ret); } static int mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, const struct timespec *abstime) { uint32_t id, owner; int count, ret; id = TID(curthread); if (PMUTEX_OWNER_ID(m) == id) return (mutex_self_lock(m, abstime)); /* * For adaptive mutexes, spin for a bit in the expectation * that if the application requests this mutex type then * the lock is likely to be released quickly and it is * faster than entering the kernel */ if (__predict_false((m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) != 0)) goto sleep_in_kernel; if (!_thr_is_smp) goto yield_loop; count = m->m_spinloops; while (count--) { owner = m->m_lock.m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0) { if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id | owner)) { ret = 0; goto done; } } CPU_SPINWAIT; } yield_loop: count = m->m_yieldloops; while (count--) { _sched_yield(); owner = m->m_lock.m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0) { if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id | owner)) { ret = 0; goto done; } } } sleep_in_kernel: if (abstime == NULL) ret = __thr_umutex_lock(&m->m_lock, id); else if (__predict_false(abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)) ret = EINVAL; else ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); done: if (ret == 0 || ret == EOWNERDEAD) { enqueue_mutex(curthread, m, ret); if (ret == EOWNERDEAD) m->m_lock.m_flags |= UMUTEX_NONCONSISTENT; } return (ret); } static inline int mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime, bool cvattach, bool rb_onlist) { struct pthread *curthread; int ret, robust; robust = 0; /* pacify gcc */ curthread = _get_curthread(); if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_ENTER(curthread); if (!rb_onlist) robust = _mutex_enter_robust(curthread, m); ret = _thr_umutex_trylock2(&m->m_lock, TID(curthread)); if (ret == 0 || ret == EOWNERDEAD) { enqueue_mutex(curthread, m, ret); if (ret == EOWNERDEAD) m->m_lock.m_flags |= UMUTEX_NONCONSISTENT; } else { ret = mutex_lock_sleep(curthread, m, abstime); } if (!rb_onlist && robust) _mutex_leave_robust(curthread, m); if (ret != 0 && ret != EOWNERDEAD && (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0 && !cvattach) THR_CRITICAL_LEAVE(curthread); return (ret); } int __Tthr_mutex_lock(pthread_mutex_t *mutex) { struct pthread_mutex *m; int ret; _thr_check_init(); ret = check_and_init_mutex(mutex, &m); if (ret == 0) ret = mutex_lock_common(m, NULL, false, false); return (ret); } int __pthread_mutex_timedlock(pthread_mutex_t * __restrict mutex, const struct timespec * __restrict abstime) { struct pthread_mutex *m; int ret; _thr_check_init(); ret = check_and_init_mutex(mutex, &m); if (ret == 0) ret = mutex_lock_common(m, abstime, false, false); return (ret); } int _thr_mutex_unlock(pthread_mutex_t *mutex) { struct pthread_mutex *mp; if (*mutex == THR_PSHARED_PTR) { mp = __thr_pshared_offpage(mutex, 0); if (mp == NULL) return (EINVAL); shared_mutex_init(mp, NULL); } else { mp = *mutex; } return (mutex_unlock_common(mp, false, NULL)); } int _mutex_cv_lock(struct pthread_mutex *m, int count, bool rb_onlist) { int error; error = mutex_lock_common(m, NULL, true, rb_onlist); if (error == 0 || error == EOWNERDEAD) m->m_count = count; return (error); } int _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer) { /* * Clear the count in case this is a recursive mutex. */ *count = m->m_count; m->m_count = 0; (void)mutex_unlock_common(m, true, defer); return (0); } int _mutex_cv_attach(struct pthread_mutex *m, int count) { struct pthread *curthread; curthread = _get_curthread(); enqueue_mutex(curthread, m, 0); m->m_count = count; return (0); } int _mutex_cv_detach(struct pthread_mutex *mp, int *recurse) { struct pthread *curthread; int deferred, error; curthread = _get_curthread(); if ((error = _mutex_owned(curthread, mp)) != 0) return (error); /* * Clear the count in case this is a recursive mutex. */ *recurse = mp->m_count; mp->m_count = 0; dequeue_mutex(curthread, mp); /* Will this happen in real-world ? */ if ((mp->m_flags & PMUTEX_FLAG_DEFERRED) != 0) { deferred = 1; mp->m_flags &= ~PMUTEX_FLAG_DEFERRED; } else deferred = 0; if (deferred) { _thr_wake_all(curthread->defer_waiters, curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } return (0); } static int mutex_self_trylock(struct pthread_mutex *m) { int ret; switch (PMUTEX_TYPE(m->m_flags)) { case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: case PTHREAD_MUTEX_ADAPTIVE_NP: ret = EBUSY; break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ if (m->m_count + 1 > 0) { m->m_count++; ret = 0; } else ret = EAGAIN; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return (ret); } static int mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime) { struct timespec ts1, ts2; int ret; switch (PMUTEX_TYPE(m->m_flags)) { case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_ADAPTIVE_NP: if (abstime) { if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) { ret = EINVAL; } else { clock_gettime(CLOCK_REALTIME, &ts1); TIMESPEC_SUB(&ts2, abstime, &ts1); __sys_nanosleep(&ts2, NULL); ret = ETIMEDOUT; } } else { /* * POSIX specifies that mutexes should return * EDEADLK if a recursive lock is detected. */ ret = EDEADLK; } break; case PTHREAD_MUTEX_NORMAL: /* * What SS2 define as a 'normal' mutex. Intentionally * deadlock on attempts to get a lock you already own. */ ret = 0; if (abstime) { if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) { ret = EINVAL; } else { clock_gettime(CLOCK_REALTIME, &ts1); TIMESPEC_SUB(&ts2, abstime, &ts1); __sys_nanosleep(&ts2, NULL); ret = ETIMEDOUT; } } else { ts1.tv_sec = 30; ts1.tv_nsec = 0; for (;;) __sys_nanosleep(&ts1, NULL); } break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ if (m->m_count + 1 > 0) { m->m_count++; ret = 0; } else ret = EAGAIN; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return (ret); } static int mutex_unlock_common(struct pthread_mutex *m, bool cv, int *mtx_defer) { struct pthread *curthread; uint32_t id; int deferred, error, private, robust; if (__predict_false(m <= THR_MUTEX_DESTROYED)) { if (m == THR_MUTEX_DESTROYED) return (EINVAL); return (EPERM); } curthread = _get_curthread(); id = TID(curthread); /* * Check if the running thread is not the owner of the mutex. */ if (__predict_false(PMUTEX_OWNER_ID(m) != id)) return (EPERM); error = 0; private = (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0; if (__predict_false(PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE && m->m_count > 0)) { m->m_count--; } else { if ((m->m_flags & PMUTEX_FLAG_DEFERRED) != 0) { deferred = 1; m->m_flags &= ~PMUTEX_FLAG_DEFERRED; } else deferred = 0; robust = _mutex_enter_robust(curthread, m); dequeue_mutex(curthread, m); error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer); if (deferred) { if (mtx_defer == NULL) { _thr_wake_all(curthread->defer_waiters, curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } else *mtx_defer = 1; } if (robust) _mutex_leave_robust(curthread, m); } if (!cv && private) THR_CRITICAL_LEAVE(curthread); return (error); } int _pthread_mutex_getprioceiling(const pthread_mutex_t * __restrict mutex, int * __restrict prioceiling) { struct pthread_mutex *m; if (*mutex == THR_PSHARED_PTR) { m = __thr_pshared_offpage(__DECONST(void *, mutex), 0); if (m == NULL) return (EINVAL); shared_mutex_init(m, NULL); } else { m = *mutex; if (m <= THR_MUTEX_DESTROYED) return (EINVAL); } if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) return (EINVAL); *prioceiling = m->m_lock.m_ceilings[0]; return (0); } int _pthread_mutex_setprioceiling(pthread_mutex_t * __restrict mutex, int ceiling, int * __restrict old_ceiling) { struct pthread *curthread; struct pthread_mutex *m, *m1, *m2; struct mutex_queue *q, *qp; int qidx, ret; if (*mutex == THR_PSHARED_PTR) { m = __thr_pshared_offpage(mutex, 0); if (m == NULL) return (EINVAL); shared_mutex_init(m, NULL); } else { m = *mutex; if (m <= THR_MUTEX_DESTROYED) return (EINVAL); } if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) return (EINVAL); ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); if (ret != 0) return (ret); curthread = _get_curthread(); if (PMUTEX_OWNER_ID(m) == TID(curthread)) { mutex_assert_is_owned(m); m1 = TAILQ_PREV(m, mutex_queue, m_qe); m2 = TAILQ_NEXT(m, m_qe); if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { qidx = mutex_qidx(m); q = &curthread->mq[qidx]; qp = &curthread->mq[qidx + 1]; TAILQ_REMOVE(q, m, m_qe); if (!is_pshared_mutex(m)) TAILQ_REMOVE(qp, m, m_pqe); TAILQ_FOREACH(m2, q, m_qe) { if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { TAILQ_INSERT_BEFORE(m2, m, m_qe); if (!is_pshared_mutex(m)) { while (m2 != NULL && is_pshared_mutex(m2)) { m2 = TAILQ_PREV(m2, mutex_queue, m_qe); } if (m2 == NULL) { TAILQ_INSERT_HEAD(qp, m, m_pqe); } else { TAILQ_INSERT_BEFORE(m2, m, m_pqe); } } return (0); } } TAILQ_INSERT_TAIL(q, m, m_qe); if (!is_pshared_mutex(m)) TAILQ_INSERT_TAIL(qp, m, m_pqe); } } return (0); } int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) { struct pthread_mutex *m; int ret; ret = check_and_init_mutex(mutex, &m); if (ret == 0) *count = m->m_spinloops; return (ret); } int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) { struct pthread_mutex *m; int ret; ret = check_and_init_mutex(mutex, &m); if (ret == 0) m->m_spinloops = count; return (ret); } int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) { struct pthread_mutex *m; int ret; ret = check_and_init_mutex(mutex, &m); if (ret == 0) *count = m->m_yieldloops; return (ret); } int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) { struct pthread_mutex *m; int ret; ret = check_and_init_mutex(mutex, &m); if (ret == 0) m->m_yieldloops = count; return (0); } int _pthread_mutex_isowned_np(pthread_mutex_t *mutex) { struct pthread_mutex *m; if (*mutex == THR_PSHARED_PTR) { m = __thr_pshared_offpage(mutex, 0); if (m == NULL) return (0); shared_mutex_init(m, NULL); } else { m = *mutex; if (m <= THR_MUTEX_DESTROYED) return (0); } return (PMUTEX_OWNER_ID(m) == TID(_get_curthread())); } int _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp) { if (__predict_false(mp <= THR_MUTEX_DESTROYED)) { if (mp == THR_MUTEX_DESTROYED) return (EINVAL); return (EPERM); } if (PMUTEX_OWNER_ID(mp) != TID(curthread)) return (EPERM); return (0); } int _Tthr_mutex_consistent(pthread_mutex_t *mutex) { struct pthread_mutex *m; struct pthread *curthread; if (*mutex == THR_PSHARED_PTR) { m = __thr_pshared_offpage(mutex, 0); if (m == NULL) return (EINVAL); shared_mutex_init(m, NULL); } else { m = *mutex; if (m <= THR_MUTEX_DESTROYED) return (EINVAL); } curthread = _get_curthread(); if ((m->m_lock.m_flags & (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) != (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) return (EINVAL); if (PMUTEX_OWNER_ID(m) != TID(curthread)) return (EPERM); m->m_lock.m_flags &= ~UMUTEX_NONCONSISTENT; return (0); } diff --git a/lib/libthr/thread/thr_pshared.c b/lib/libthr/thread/thr_pshared.c index 83714785f9b1..d0219d0488f4 100644 --- a/lib/libthr/thread/thr_pshared.c +++ b/lib/libthr/thread/thr_pshared.c @@ -1,268 +1,273 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "namespace.h" #include #include "un-namespace.h" #include "thr_private.h" struct psh { LIST_ENTRY(psh) link; void *key; void *val; }; LIST_HEAD(pshared_hash_head, psh); #define HASH_SIZE 128 static struct pshared_hash_head pshared_hash[HASH_SIZE]; #define PSHARED_KEY_HASH(key) (((unsigned long)(key) >> 8) % HASH_SIZE) /* XXXKIB: lock could be split to per-hash chain, if appears contested */ static struct urwlock pshared_lock = DEFAULT_URWLOCK; +static int page_size; void __thr_pshared_init(void) { int i; + page_size = getpagesize(); + THR_ASSERT(page_size >= THR_PAGE_SIZE_MIN, + "THR_PAGE_SIZE_MIN is too large"); + _thr_urwlock_init(&pshared_lock); for (i = 0; i < HASH_SIZE; i++) LIST_INIT(&pshared_hash[i]); } static void pshared_rlock(struct pthread *curthread) { curthread->locklevel++; _thr_rwl_rdlock(&pshared_lock); } static void pshared_wlock(struct pthread *curthread) { curthread->locklevel++; _thr_rwl_wrlock(&pshared_lock); } static void pshared_unlock(struct pthread *curthread) { _thr_rwl_unlock(&pshared_lock); curthread->locklevel--; _thr_ast(curthread); } /* * Among all processes sharing a lock only one executes * pthread_lock_destroy(). Other processes still have the hash and * mapped off-page. * * Mitigate the problem by checking the liveness of all hashed keys * periodically. Right now this is executed on each * pthread_lock_destroy(), but may be done less often if found to be * too time-consuming. */ static void pshared_gc(struct pthread *curthread) { struct pshared_hash_head *hd; struct psh *h, *h1; int error, i; pshared_wlock(curthread); for (i = 0; i < HASH_SIZE; i++) { hd = &pshared_hash[i]; LIST_FOREACH_SAFE(h, hd, link, h1) { error = _umtx_op(NULL, UMTX_OP_SHM, UMTX_SHM_ALIVE, h->val, NULL); if (error == 0) continue; LIST_REMOVE(h, link); - munmap(h->val, PAGE_SIZE); + munmap(h->val, page_size); free(h); } } pshared_unlock(curthread); } static void * pshared_lookup(void *key) { struct pshared_hash_head *hd; struct psh *h; hd = &pshared_hash[PSHARED_KEY_HASH(key)]; LIST_FOREACH(h, hd, link) { if (h->key == key) return (h->val); } return (NULL); } static int pshared_insert(void *key, void **val) { struct pshared_hash_head *hd; struct psh *h; hd = &pshared_hash[PSHARED_KEY_HASH(key)]; LIST_FOREACH(h, hd, link) { /* * When the key already exists in the hash, we should * return either the new (just mapped) or old (hashed) * val, and the other val should be unmapped to avoid * address space leak. * * If two threads perform lock of the same object * which is not yet stored in the pshared_hash, then * the val already inserted by the first thread should * be returned, and the second val freed (order is by * the pshared_lock()). Otherwise, if we unmap the * value obtained from the hash, the first thread * might operate on an unmapped off-page object. * * There is still an issue: if hashed key was unmapped * and then other page is mapped at the same key * address, the hash would return the old val. I * decided to handle the race of simultaneous hash * insertion, leaving the unlikely remap problem * unaddressed. */ if (h->key == key) { if (h->val != *val) { - munmap(*val, PAGE_SIZE); + munmap(*val, page_size); *val = h->val; } return (1); } } h = malloc(sizeof(*h)); if (h == NULL) return (0); h->key = key; h->val = *val; LIST_INSERT_HEAD(hd, h, link); return (1); } static void * pshared_remove(void *key) { struct pshared_hash_head *hd; struct psh *h; void *val; hd = &pshared_hash[PSHARED_KEY_HASH(key)]; LIST_FOREACH(h, hd, link) { if (h->key == key) { LIST_REMOVE(h, link); val = h->val; free(h); return (val); } } return (NULL); } static void pshared_clean(void *key, void *val) { if (val != NULL) - munmap(val, PAGE_SIZE); + munmap(val, page_size); _umtx_op(NULL, UMTX_OP_SHM, UMTX_SHM_DESTROY, key, NULL); } void * __thr_pshared_offpage(void *key, int doalloc) { struct pthread *curthread; void *res; int fd, ins_done; curthread = _get_curthread(); pshared_rlock(curthread); res = pshared_lookup(key); pshared_unlock(curthread); if (res != NULL) return (res); fd = _umtx_op(NULL, UMTX_OP_SHM, doalloc ? UMTX_SHM_CREAT : UMTX_SHM_LOOKUP, key, NULL); if (fd == -1) return (NULL); - res = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + res = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); close(fd); if (res == MAP_FAILED) return (NULL); pshared_wlock(curthread); ins_done = pshared_insert(key, &res); pshared_unlock(curthread); if (!ins_done) { pshared_clean(key, res); res = NULL; } return (res); } void __thr_pshared_destroy(void *key) { struct pthread *curthread; void *val; curthread = _get_curthread(); pshared_wlock(curthread); val = pshared_remove(key); pshared_unlock(curthread); pshared_clean(key, val); pshared_gc(curthread); } void __thr_pshared_atfork_pre(void) { _thr_rwl_rdlock(&pshared_lock); } void __thr_pshared_atfork_post(void) { _thr_rwl_unlock(&pshared_lock); } diff --git a/lib/libthr/thread/thr_pspinlock.c b/lib/libthr/thread/thr_pspinlock.c index c71bdbb3f196..d1a53446f654 100644 --- a/lib/libthr/thread/thr_pspinlock.c +++ b/lib/libthr/thread/thr_pspinlock.c @@ -1,155 +1,155 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 David Xu * Copyright (c) 2016 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "namespace.h" #include #include #include #include "un-namespace.h" #include "thr_private.h" -_Static_assert(sizeof(struct pthread_spinlock) <= PAGE_SIZE, +_Static_assert(sizeof(struct pthread_spinlock) <= THR_PAGE_SIZE_MIN, "pthread_spinlock is too large for off-page"); #define SPIN_COUNT 100000 __weak_reference(_pthread_spin_init, pthread_spin_init); __weak_reference(_pthread_spin_destroy, pthread_spin_destroy); __weak_reference(_pthread_spin_trylock, pthread_spin_trylock); __weak_reference(_pthread_spin_lock, pthread_spin_lock); __weak_reference(_pthread_spin_unlock, pthread_spin_unlock); int _pthread_spin_init(pthread_spinlock_t *lock, int pshared) { struct pthread_spinlock *lck; if (lock == NULL) return (EINVAL); if (pshared == PTHREAD_PROCESS_PRIVATE) { lck = malloc(sizeof(struct pthread_spinlock)); if (lck == NULL) return (ENOMEM); *lock = lck; } else if (pshared == PTHREAD_PROCESS_SHARED) { lck = __thr_pshared_offpage(lock, 1); if (lck == NULL) return (EFAULT); *lock = THR_PSHARED_PTR; } else { return (EINVAL); } _thr_umutex_init(&lck->s_lock); return (0); } int _pthread_spin_destroy(pthread_spinlock_t *lock) { void *l; int ret; if (lock == NULL || *lock == NULL) { ret = EINVAL; } else if (*lock == THR_PSHARED_PTR) { l = __thr_pshared_offpage(lock, 0); if (l != NULL) __thr_pshared_destroy(l); ret = 0; } else { free(*lock); *lock = NULL; ret = 0; } return (ret); } int _pthread_spin_trylock(pthread_spinlock_t *lock) { struct pthread_spinlock *lck; if (lock == NULL || *lock == NULL) return (EINVAL); lck = *lock == THR_PSHARED_PTR ? __thr_pshared_offpage(lock, 0) : *lock; if (lck == NULL) return (EINVAL); return (THR_UMUTEX_TRYLOCK(_get_curthread(), &lck->s_lock)); } int _pthread_spin_lock(pthread_spinlock_t *lock) { struct pthread *curthread; struct pthread_spinlock *lck; int count; if (lock == NULL) return (EINVAL); lck = *lock == THR_PSHARED_PTR ? __thr_pshared_offpage(lock, 0) : *lock; if (lck == NULL) return (EINVAL); curthread = _get_curthread(); count = SPIN_COUNT; while (THR_UMUTEX_TRYLOCK(curthread, &lck->s_lock) != 0) { while (lck->s_lock.m_owner) { if (!_thr_is_smp) { _pthread_yield(); } else { CPU_SPINWAIT; if (--count <= 0) { count = SPIN_COUNT; _pthread_yield(); } } } } return (0); } int _pthread_spin_unlock(pthread_spinlock_t *lock) { struct pthread_spinlock *lck; if (lock == NULL) return (EINVAL); lck = *lock == THR_PSHARED_PTR ? __thr_pshared_offpage(lock, 0) : *lock; if (lck == NULL) return (EINVAL); return (THR_UMUTEX_UNLOCK(_get_curthread(), &lck->s_lock)); } diff --git a/lib/libthr/thread/thr_rwlock.c b/lib/libthr/thread/thr_rwlock.c index 22c6c878b2a5..1d110a7bf285 100644 --- a/lib/libthr/thread/thr_rwlock.c +++ b/lib/libthr/thread/thr_rwlock.c @@ -1,384 +1,384 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1998 Alex Nash * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "namespace.h" #include #include "un-namespace.h" #include "thr_private.h" -_Static_assert(sizeof(struct pthread_rwlock) <= PAGE_SIZE, +_Static_assert(sizeof(struct pthread_rwlock) <= THR_PAGE_SIZE_MIN, "pthread_rwlock is too large for off-page"); __weak_reference(_thr_rwlock_destroy, pthread_rwlock_destroy); __weak_reference(_thr_rwlock_destroy, _pthread_rwlock_destroy); __weak_reference(_thr_rwlock_init, pthread_rwlock_init); __weak_reference(_thr_rwlock_init, _pthread_rwlock_init); __weak_reference(_Tthr_rwlock_rdlock, pthread_rwlock_rdlock); __weak_reference(_Tthr_rwlock_rdlock, _pthread_rwlock_rdlock); __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock); __weak_reference(_Tthr_rwlock_tryrdlock, pthread_rwlock_tryrdlock); __weak_reference(_Tthr_rwlock_tryrdlock, _pthread_rwlock_tryrdlock); __weak_reference(_Tthr_rwlock_trywrlock, pthread_rwlock_trywrlock); __weak_reference(_Tthr_rwlock_trywrlock, _pthread_rwlock_trywrlock); __weak_reference(_Tthr_rwlock_unlock, pthread_rwlock_unlock); __weak_reference(_Tthr_rwlock_unlock, _pthread_rwlock_unlock); __weak_reference(_Tthr_rwlock_wrlock, pthread_rwlock_wrlock); __weak_reference(_Tthr_rwlock_wrlock, _pthread_rwlock_wrlock); __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock); static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock); static int init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out); static int __always_inline check_and_init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out) { if (__predict_false(*rwlock == THR_PSHARED_PTR || *rwlock <= THR_RWLOCK_DESTROYED)) return (init_rwlock(rwlock, rwlock_out)); *rwlock_out = *rwlock; return (0); } static int __noinline init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out) { pthread_rwlock_t prwlock; int ret; if (*rwlock == THR_PSHARED_PTR) { prwlock = __thr_pshared_offpage(rwlock, 0); if (prwlock == NULL) return (EINVAL); } else if ((prwlock = *rwlock) <= THR_RWLOCK_DESTROYED) { if (prwlock == THR_RWLOCK_INITIALIZER) { ret = init_static(_get_curthread(), rwlock); if (ret != 0) return (ret); } else if (prwlock == THR_RWLOCK_DESTROYED) { return (EINVAL); } prwlock = *rwlock; } *rwlock_out = prwlock; return (0); } static int rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) { pthread_rwlock_t prwlock; if (attr == NULL || *attr == NULL || (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) { prwlock = calloc(1, sizeof(struct pthread_rwlock)); if (prwlock == NULL) return (ENOMEM); *rwlock = prwlock; } else { prwlock = __thr_pshared_offpage(rwlock, 1); if (prwlock == NULL) return (EFAULT); prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED; *rwlock = THR_PSHARED_PTR; } return (0); } int _thr_rwlock_destroy(pthread_rwlock_t *rwlock) { pthread_rwlock_t prwlock; int ret; prwlock = *rwlock; if (prwlock == THR_RWLOCK_INITIALIZER) ret = 0; else if (prwlock == THR_RWLOCK_DESTROYED) ret = EINVAL; else if (prwlock == THR_PSHARED_PTR) { *rwlock = THR_RWLOCK_DESTROYED; __thr_pshared_destroy(rwlock); ret = 0; } else { *rwlock = THR_RWLOCK_DESTROYED; free(prwlock); ret = 0; } return (ret); } static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock) { int ret; THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock); if (*rwlock == THR_RWLOCK_INITIALIZER) ret = rwlock_init(rwlock, NULL); else ret = 0; THR_LOCK_RELEASE(thread, &_rwlock_static_lock); return (ret); } int _thr_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) { *rwlock = NULL; return (rwlock_init(rwlock, attr)); } static int rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); pthread_rwlock_t prwlock; int flags; int ret; ret = check_and_init_rwlock(rwlock, &prwlock); if (ret != 0) return (ret); if (curthread->rdlock_count) { /* * To avoid having to track all the rdlocks held by * a thread or all of the threads that hold a rdlock, * we keep a simple count of all the rdlocks held by * a thread. If a thread holds any rdlocks it is * possible that it is attempting to take a recursive * rdlock. If there are blocked writers and precedence * is given to them, then that would result in the thread * deadlocking. So allowing a thread to take the rdlock * when it already has one or more rdlocks avoids the * deadlock. I hope the reader can follow that logic ;-) */ flags = URWLOCK_PREFER_READER; } else { flags = 0; } /* * POSIX said the validity of the abstimeout parameter need * not be checked if the lock can be immediately acquired. */ ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags); if (ret == 0) { curthread->rdlock_count++; return (ret); } if (__predict_false(abstime && (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0))) return (EINVAL); for (;;) { /* goto kernel and lock it */ ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime); if (ret != EINTR) break; /* if interrupted, try to lock it in userland again. */ if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) { ret = 0; break; } } if (ret == 0) curthread->rdlock_count++; return (ret); } int _Tthr_rwlock_rdlock(pthread_rwlock_t *rwlock) { return (rwlock_rdlock_common(rwlock, NULL)); } int _pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock, const struct timespec * __restrict abstime) { return (rwlock_rdlock_common(rwlock, abstime)); } int _Tthr_rwlock_tryrdlock(pthread_rwlock_t *rwlock) { struct pthread *curthread = _get_curthread(); pthread_rwlock_t prwlock; int flags; int ret; ret = check_and_init_rwlock(rwlock, &prwlock); if (ret != 0) return (ret); if (curthread->rdlock_count) { /* * To avoid having to track all the rdlocks held by * a thread or all of the threads that hold a rdlock, * we keep a simple count of all the rdlocks held by * a thread. If a thread holds any rdlocks it is * possible that it is attempting to take a recursive * rdlock. If there are blocked writers and precedence * is given to them, then that would result in the thread * deadlocking. So allowing a thread to take the rdlock * when it already has one or more rdlocks avoids the * deadlock. I hope the reader can follow that logic ;-) */ flags = URWLOCK_PREFER_READER; } else { flags = 0; } ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags); if (ret == 0) curthread->rdlock_count++; return (ret); } int _Tthr_rwlock_trywrlock(pthread_rwlock_t *rwlock) { struct pthread *curthread = _get_curthread(); pthread_rwlock_t prwlock; int ret; ret = check_and_init_rwlock(rwlock, &prwlock); if (ret != 0) return (ret); ret = _thr_rwlock_trywrlock(&prwlock->lock); if (ret == 0) prwlock->owner = TID(curthread); return (ret); } static int rwlock_wrlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); pthread_rwlock_t prwlock; int ret; ret = check_and_init_rwlock(rwlock, &prwlock); if (ret != 0) return (ret); /* * POSIX said the validity of the abstimeout parameter need * not be checked if the lock can be immediately acquired. */ ret = _thr_rwlock_trywrlock(&prwlock->lock); if (ret == 0) { prwlock->owner = TID(curthread); return (ret); } if (__predict_false(abstime && (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0))) return (EINVAL); for (;;) { /* goto kernel and lock it */ ret = __thr_rwlock_wrlock(&prwlock->lock, abstime); if (ret == 0) { prwlock->owner = TID(curthread); break; } if (ret != EINTR) break; /* if interrupted, try to lock it in userland again. */ if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) { ret = 0; prwlock->owner = TID(curthread); break; } } return (ret); } int _Tthr_rwlock_wrlock(pthread_rwlock_t *rwlock) { return (rwlock_wrlock_common (rwlock, NULL)); } int _pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock, const struct timespec * __restrict abstime) { return (rwlock_wrlock_common (rwlock, abstime)); } int _Tthr_rwlock_unlock(pthread_rwlock_t *rwlock) { struct pthread *curthread = _get_curthread(); pthread_rwlock_t prwlock; int ret; int32_t state; if (*rwlock == THR_PSHARED_PTR) { prwlock = __thr_pshared_offpage(rwlock, 0); if (prwlock == NULL) return (EINVAL); } else { prwlock = *rwlock; } if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED)) return (EINVAL); state = prwlock->lock.rw_state; if (state & URWLOCK_WRITE_OWNER) { if (__predict_false(prwlock->owner != TID(curthread))) return (EPERM); prwlock->owner = 0; } ret = _thr_rwlock_unlock(&prwlock->lock); if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0) curthread->rdlock_count--; return (ret); }