diff --git a/lib/libthr/thread/thr_init.c b/lib/libthr/thread/thr_init.c index 82bde10a153e..f2a66c406e85 100644 --- a/lib/libthr/thread/thr_init.c +++ b/lib/libthr/thread/thr_init.c @@ -1,498 +1,501 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2003 Daniel M. Eischen * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "namespace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "un-namespace.h" #include "libc_private.h" #include "thr_private.h" -char *_usrstack; +char *_stacktop; struct pthread *_thr_initial; int _libthr_debug; int _thread_event_mask; struct pthread *_thread_last_event; pthreadlist _thread_list = TAILQ_HEAD_INITIALIZER(_thread_list); pthreadlist _thread_gc_list = TAILQ_HEAD_INITIALIZER(_thread_gc_list); int _thread_active_threads = 1; atfork_head _thr_atfork_list = TAILQ_HEAD_INITIALIZER(_thr_atfork_list); struct urwlock _thr_atfork_lock = DEFAULT_URWLOCK; struct pthread_prio _thr_priorities[3] = { {RTP_PRIO_MIN, RTP_PRIO_MAX, 0}, /* FIFO */ {0, 0, 63}, /* OTHER */ {RTP_PRIO_MIN, RTP_PRIO_MAX, 0} /* RR */ }; struct pthread_attr _pthread_attr_default = { .sched_policy = SCHED_OTHER, .sched_inherit = PTHREAD_INHERIT_SCHED, .prio = 0, .suspend = THR_CREATE_RUNNING, .flags = PTHREAD_SCOPE_SYSTEM, .stackaddr_attr = NULL, .stacksize_attr = THR_STACK_DEFAULT, .guardsize_attr = 0, .cpusetsize = 0, .cpuset = NULL }; struct pthread_mutex_attr _pthread_mutexattr_default = { .m_type = PTHREAD_MUTEX_DEFAULT, .m_protocol = PTHREAD_PRIO_NONE, .m_ceiling = 0, .m_pshared = PTHREAD_PROCESS_PRIVATE, .m_robust = PTHREAD_MUTEX_STALLED, }; struct pthread_mutex_attr _pthread_mutexattr_adaptive_default = { .m_type = PTHREAD_MUTEX_ADAPTIVE_NP, .m_protocol = PTHREAD_PRIO_NONE, .m_ceiling = 0, .m_pshared = PTHREAD_PROCESS_PRIVATE, .m_robust = PTHREAD_MUTEX_STALLED, }; /* Default condition variable attributes: */ struct pthread_cond_attr _pthread_condattr_default = { .c_pshared = PTHREAD_PROCESS_PRIVATE, .c_clockid = CLOCK_REALTIME }; int _thr_is_smp = 0; size_t _thr_guard_default; size_t _thr_stack_default = THR_STACK_DEFAULT; size_t _thr_stack_initial = THR_STACK_INITIAL; int _thr_page_size; int _thr_spinloops; int _thr_yieldloops; int _thr_queuefifo = 4; int _gc_count; struct umutex _mutex_static_lock = DEFAULT_UMUTEX; struct umutex _cond_static_lock = DEFAULT_UMUTEX; struct umutex _rwlock_static_lock = DEFAULT_UMUTEX; struct umutex _keytable_lock = DEFAULT_UMUTEX; struct urwlock _thr_list_lock = DEFAULT_URWLOCK; struct umutex _thr_event_lock = DEFAULT_UMUTEX; struct umutex _suspend_all_lock = DEFAULT_UMUTEX; struct pthread *_single_thread; int _suspend_all_cycle; int _suspend_all_waiters; int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *); int __pthread_mutex_lock(pthread_mutex_t *); int __pthread_mutex_trylock(pthread_mutex_t *); void _thread_init_hack(void) __attribute__ ((constructor)); static void init_private(void); static void init_main_thread(struct pthread *thread); /* * All weak references used within libc should be in this table. * This is so that static libraries will work. */ STATIC_LIB_REQUIRE(_fork); STATIC_LIB_REQUIRE(_pthread_getspecific); STATIC_LIB_REQUIRE(_pthread_key_create); STATIC_LIB_REQUIRE(_pthread_key_delete); STATIC_LIB_REQUIRE(_pthread_mutex_destroy); STATIC_LIB_REQUIRE(_pthread_mutex_init); STATIC_LIB_REQUIRE(_pthread_mutex_lock); STATIC_LIB_REQUIRE(_pthread_mutex_trylock); STATIC_LIB_REQUIRE(_pthread_mutex_unlock); STATIC_LIB_REQUIRE(_pthread_mutexattr_init); STATIC_LIB_REQUIRE(_pthread_mutexattr_destroy); STATIC_LIB_REQUIRE(_pthread_mutexattr_settype); STATIC_LIB_REQUIRE(_pthread_once); STATIC_LIB_REQUIRE(_pthread_setspecific); STATIC_LIB_REQUIRE(_raise); STATIC_LIB_REQUIRE(_sem_destroy); STATIC_LIB_REQUIRE(_sem_getvalue); STATIC_LIB_REQUIRE(_sem_init); STATIC_LIB_REQUIRE(_sem_post); STATIC_LIB_REQUIRE(_sem_timedwait); STATIC_LIB_REQUIRE(_sem_trywait); STATIC_LIB_REQUIRE(_sem_wait); STATIC_LIB_REQUIRE(_sigaction); STATIC_LIB_REQUIRE(_sigprocmask); STATIC_LIB_REQUIRE(_sigsuspend); STATIC_LIB_REQUIRE(_sigtimedwait); STATIC_LIB_REQUIRE(_sigwait); STATIC_LIB_REQUIRE(_sigwaitinfo); STATIC_LIB_REQUIRE(_spinlock); STATIC_LIB_REQUIRE(_spinunlock); STATIC_LIB_REQUIRE(_thread_init_hack); /* * These are needed when linking statically. All references within * libgcc (and in the future libc) to these routines are weak, but * if they are not (strongly) referenced by the application or other * libraries, then the actual functions will not be loaded. */ STATIC_LIB_REQUIRE(_pthread_once); STATIC_LIB_REQUIRE(_pthread_key_create); STATIC_LIB_REQUIRE(_pthread_key_delete); STATIC_LIB_REQUIRE(_pthread_getspecific); STATIC_LIB_REQUIRE(_pthread_setspecific); STATIC_LIB_REQUIRE(_pthread_mutex_init); STATIC_LIB_REQUIRE(_pthread_mutex_destroy); STATIC_LIB_REQUIRE(_pthread_mutex_lock); STATIC_LIB_REQUIRE(_pthread_mutex_trylock); STATIC_LIB_REQUIRE(_pthread_mutex_unlock); STATIC_LIB_REQUIRE(_pthread_create); /* Pull in all symbols required by libthread_db */ STATIC_LIB_REQUIRE(_thread_state_running); #define DUAL_ENTRY(entry) \ (pthread_func_t)entry, (pthread_func_t)entry static pthread_func_t jmp_table[][2] = { [PJT_ATFORK] = {DUAL_ENTRY(_thr_atfork)}, [PJT_ATTR_DESTROY] = {DUAL_ENTRY(_thr_attr_destroy)}, [PJT_ATTR_GETDETACHSTATE] = {DUAL_ENTRY(_thr_attr_getdetachstate)}, [PJT_ATTR_GETGUARDSIZE] = {DUAL_ENTRY(_thr_attr_getguardsize)}, [PJT_ATTR_GETINHERITSCHED] = {DUAL_ENTRY(_thr_attr_getinheritsched)}, [PJT_ATTR_GETSCHEDPARAM] = {DUAL_ENTRY(_thr_attr_getschedparam)}, [PJT_ATTR_GETSCHEDPOLICY] = {DUAL_ENTRY(_thr_attr_getschedpolicy)}, [PJT_ATTR_GETSCOPE] = {DUAL_ENTRY(_thr_attr_getscope)}, [PJT_ATTR_GETSTACKADDR] = {DUAL_ENTRY(_thr_attr_getstackaddr)}, [PJT_ATTR_GETSTACKSIZE] = {DUAL_ENTRY(_thr_attr_getstacksize)}, [PJT_ATTR_INIT] = {DUAL_ENTRY(_thr_attr_init)}, [PJT_ATTR_SETDETACHSTATE] = {DUAL_ENTRY(_thr_attr_setdetachstate)}, [PJT_ATTR_SETGUARDSIZE] = {DUAL_ENTRY(_thr_attr_setguardsize)}, [PJT_ATTR_SETINHERITSCHED] = {DUAL_ENTRY(_thr_attr_setinheritsched)}, [PJT_ATTR_SETSCHEDPARAM] = {DUAL_ENTRY(_thr_attr_setschedparam)}, [PJT_ATTR_SETSCHEDPOLICY] = {DUAL_ENTRY(_thr_attr_setschedpolicy)}, [PJT_ATTR_SETSCOPE] = {DUAL_ENTRY(_thr_attr_setscope)}, [PJT_ATTR_SETSTACKADDR] = {DUAL_ENTRY(_thr_attr_setstackaddr)}, [PJT_ATTR_SETSTACKSIZE] = {DUAL_ENTRY(_thr_attr_setstacksize)}, [PJT_CANCEL] = {DUAL_ENTRY(_thr_cancel)}, [PJT_CLEANUP_POP] = {DUAL_ENTRY(_thr_cleanup_pop)}, [PJT_CLEANUP_PUSH] = {DUAL_ENTRY(_thr_cleanup_push)}, [PJT_COND_BROADCAST] = {DUAL_ENTRY(_thr_cond_broadcast)}, [PJT_COND_DESTROY] = {DUAL_ENTRY(_thr_cond_destroy)}, [PJT_COND_INIT] = {DUAL_ENTRY(_thr_cond_init)}, [PJT_COND_SIGNAL] = {DUAL_ENTRY(_thr_cond_signal)}, [PJT_COND_TIMEDWAIT] = {DUAL_ENTRY(_thr_cond_timedwait)}, [PJT_COND_WAIT] = {(pthread_func_t)__thr_cond_wait, (pthread_func_t)_thr_cond_wait}, [PJT_DETACH] = {DUAL_ENTRY(_thr_detach)}, [PJT_EQUAL] = {DUAL_ENTRY(_thr_equal)}, [PJT_EXIT] = {DUAL_ENTRY(_Tthr_exit)}, [PJT_GETSPECIFIC] = {DUAL_ENTRY(_thr_getspecific)}, [PJT_JOIN] = {DUAL_ENTRY(_thr_join)}, [PJT_KEY_CREATE] = {DUAL_ENTRY(_thr_key_create)}, [PJT_KEY_DELETE] = {DUAL_ENTRY(_thr_key_delete)}, [PJT_KILL] = {DUAL_ENTRY(_Tthr_kill)}, [PJT_MAIN_NP] = {DUAL_ENTRY(_thr_main_np)}, [PJT_MUTEXATTR_DESTROY] = {DUAL_ENTRY(_thr_mutexattr_destroy)}, [PJT_MUTEXATTR_INIT] = {DUAL_ENTRY(_thr_mutexattr_init)}, [PJT_MUTEXATTR_SETTYPE] = {DUAL_ENTRY(_thr_mutexattr_settype)}, [PJT_MUTEX_DESTROY] = {DUAL_ENTRY(_thr_mutex_destroy)}, [PJT_MUTEX_INIT] = {DUAL_ENTRY(__Tthr_mutex_init)}, [PJT_MUTEX_LOCK] = {DUAL_ENTRY(__Tthr_mutex_lock)}, [PJT_MUTEX_TRYLOCK] = {DUAL_ENTRY(__Tthr_mutex_trylock)}, [PJT_MUTEX_UNLOCK] = {DUAL_ENTRY(_thr_mutex_unlock)}, [PJT_ONCE] = {DUAL_ENTRY(_thr_once)}, [PJT_RWLOCK_DESTROY] = {DUAL_ENTRY(_thr_rwlock_destroy)}, [PJT_RWLOCK_INIT] = {DUAL_ENTRY(_thr_rwlock_init)}, [PJT_RWLOCK_RDLOCK] = {DUAL_ENTRY(_Tthr_rwlock_rdlock)}, [PJT_RWLOCK_TRYRDLOCK] = {DUAL_ENTRY(_Tthr_rwlock_tryrdlock)}, [PJT_RWLOCK_TRYWRLOCK] = {DUAL_ENTRY(_Tthr_rwlock_trywrlock)}, [PJT_RWLOCK_UNLOCK] = {DUAL_ENTRY(_Tthr_rwlock_unlock)}, [PJT_RWLOCK_WRLOCK] = {DUAL_ENTRY(_Tthr_rwlock_wrlock)}, [PJT_SELF] = {DUAL_ENTRY(_Tthr_self)}, [PJT_SETCANCELSTATE] = {DUAL_ENTRY(_thr_setcancelstate)}, [PJT_SETCANCELTYPE] = {DUAL_ENTRY(_thr_setcanceltype)}, [PJT_SETSPECIFIC] = {DUAL_ENTRY(_thr_setspecific)}, [PJT_SIGMASK] = {DUAL_ENTRY(_thr_sigmask)}, [PJT_TESTCANCEL] = {DUAL_ENTRY(_Tthr_testcancel)}, [PJT_CLEANUP_POP_IMP] = {DUAL_ENTRY(__thr_cleanup_pop_imp)}, [PJT_CLEANUP_PUSH_IMP] = {DUAL_ENTRY(__thr_cleanup_push_imp)}, [PJT_CANCEL_ENTER] = {DUAL_ENTRY(_Tthr_cancel_enter)}, [PJT_CANCEL_LEAVE] = {DUAL_ENTRY(_Tthr_cancel_leave)}, [PJT_MUTEX_CONSISTENT] = {DUAL_ENTRY(_Tthr_mutex_consistent)}, [PJT_MUTEXATTR_GETROBUST] = {DUAL_ENTRY(_thr_mutexattr_getrobust)}, [PJT_MUTEXATTR_SETROBUST] = {DUAL_ENTRY(_thr_mutexattr_setrobust)}, [PJT_GETTHREADID_NP] = {DUAL_ENTRY(_thr_getthreadid_np)}, [PJT_ATTR_GET_NP] = {DUAL_ENTRY(_thr_attr_get_np)}, }; static int init_once = 0; /* * For the shared version of the threads library, the above is sufficient. * But for the archive version of the library, we need a little bit more. * Namely, we must arrange for this particular module to be pulled in from * the archive library at link time. To accomplish that, we define and * initialize a variable, "_thread_autoinit_dummy_decl". This variable is * referenced (as an extern) from libc/stdlib/exit.c. This will always * create a need for this module, ensuring that it is present in the * executable. */ extern int _thread_autoinit_dummy_decl; int _thread_autoinit_dummy_decl = 0; void _thread_init_hack(void) { _libpthread_init(NULL); } /* * Threaded process initialization. * * This is only called under two conditions: * * 1) Some thread routines have detected that the library hasn't yet * been initialized (_thr_initial == NULL && curthread == NULL), or * * 2) An explicit call to reinitialize after a fork (indicated * by curthread != NULL) */ void _libpthread_init(struct pthread *curthread) { int first, dlopened; /* Check if this function has already been called: */ if (_thr_initial != NULL && curthread == NULL) /* Only initialize the threaded application once. */ return; /* * Check the size of the jump table to make sure it is preset * with the correct number of entries. */ if (sizeof(jmp_table) != sizeof(pthread_func_t) * PJT_MAX * 2) PANIC("Thread jump table not properly initialized"); memcpy(__thr_jtable, jmp_table, sizeof(jmp_table)); __thr_interpose_libc(); /* Initialize pthread private data. */ init_private(); /* Set the initial thread. */ if (curthread == NULL) { first = 1; /* Create and initialize the initial thread. */ curthread = _thr_alloc(NULL); if (curthread == NULL) PANIC("Can't allocate initial thread"); init_main_thread(curthread); } else { first = 0; } /* * Add the thread to the thread list queue. */ THR_LIST_ADD(curthread); _thread_active_threads = 1; /* Setup the thread specific data */ _tcb_set(curthread->tcb); if (first) { _thr_initial = curthread; dlopened = _rtld_is_dlopened(&_thread_autoinit_dummy_decl) != 0; _thr_signal_init(dlopened); if (_thread_event_mask & TD_CREATE) _thr_report_creation(curthread, curthread); /* * Always use our rtld lock implementation. * It is faster because it postpones signal handlers * instead of calling sigprocmask(2). */ _thr_rtld_init(); } } /* * This function and pthread_create() do a lot of the same things. * It'd be nice to consolidate the common stuff in one place. */ static void init_main_thread(struct pthread *thread) { struct sched_param sched_param; int i; /* Setup the thread attributes. */ thr_self(&thread->tid); thread->attr = _pthread_attr_default; /* * Set up the thread stack. * * Create a red zone below the main stack. All other stacks * are constrained to a maximum size by the parameters * passed to mmap(), but this stack is only limited by * resource limits, so this stack needs an explicitly mapped * red zone to protect the thread stack that is just beyond. */ - if (mmap(_usrstack - _thr_stack_initial - + if (mmap(_stacktop - _thr_stack_initial - _thr_guard_default, _thr_guard_default, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* * Mark the stack as an application supplied stack so that it * isn't deallocated. * * XXX - I'm not sure it would hurt anything to deallocate * the main thread stack because deallocation doesn't * actually free() it; it just puts it in the free * stack queue for later reuse. */ - thread->attr.stackaddr_attr = _usrstack - _thr_stack_initial; + thread->attr.stackaddr_attr = _stacktop - _thr_stack_initial; thread->attr.stacksize_attr = _thr_stack_initial; thread->attr.guardsize_attr = _thr_guard_default; thread->attr.flags |= THR_STACK_USER; /* * Write a magic value to the thread structure * to help identify valid ones: */ thread->magic = THR_MAGIC; thread->cancel_enable = 1; thread->cancel_async = 0; /* Initialize the mutex queues */ for (i = 0; i < TMQ_NITEMS; i++) TAILQ_INIT(&thread->mq[i]); thread->state = PS_RUNNING; _thr_getscheduler(thread->tid, &thread->attr.sched_policy, &sched_param); thread->attr.prio = sched_param.sched_priority; #ifdef _PTHREAD_FORCED_UNWIND - thread->unwind_stackend = _usrstack; + thread->unwind_stackend = _stacktop; #endif /* Others cleared to zero by thr_alloc() */ } static void init_private(void) { struct rlimit rlim; size_t len; int mib[2]; char *env, *env_bigstack, *env_splitstack; _thr_umutex_init(&_mutex_static_lock); _thr_umutex_init(&_cond_static_lock); _thr_umutex_init(&_rwlock_static_lock); _thr_umutex_init(&_keytable_lock); _thr_urwlock_init(&_thr_atfork_lock); _thr_umutex_init(&_thr_event_lock); _thr_umutex_init(&_suspend_all_lock); _thr_spinlock_init(); _thr_list_init(); _thr_wake_addr_init(); _sleepq_init(); _single_thread = NULL; _suspend_all_waiters = 0; /* * Avoid reinitializing some things if they don't need to be, * e.g. after a fork(). */ if (init_once == 0) { __thr_pshared_init(); __thr_malloc_init(); /* Find the stack top */ mib[0] = CTL_KERN; - mib[1] = KERN_USRSTACK; - len = sizeof (_usrstack); - if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1) - PANIC("Cannot get kern.usrstack from sysctl"); + mib[1] = KERN_STACKTOP; + len = sizeof (_stacktop); + if (sysctl(mib, 2, &_stacktop, &len, NULL, 0) == -1) { + mib[1] = KERN_USRSTACK; + if (sysctl(mib, 2, &_stacktop, &len, NULL, 0) == -1) + PANIC("Cannot get kern.usrstack from sysctl"); + } env_bigstack = getenv("LIBPTHREAD_BIGSTACK_MAIN"); env_splitstack = getenv("LIBPTHREAD_SPLITSTACK_MAIN"); if (env_bigstack != NULL || env_splitstack == NULL) { if (getrlimit(RLIMIT_STACK, &rlim) == -1) PANIC("Cannot get stack rlimit"); _thr_stack_initial = rlim.rlim_cur; } _thr_is_smp = sysconf(_SC_NPROCESSORS_CONF); if (_thr_is_smp == -1) PANIC("Cannot get _SC_NPROCESSORS_CONF"); _thr_is_smp = (_thr_is_smp > 1); _thr_page_size = getpagesize(); _thr_guard_default = _thr_page_size; _pthread_attr_default.guardsize_attr = _thr_guard_default; _pthread_attr_default.stacksize_attr = _thr_stack_default; env = getenv("LIBPTHREAD_SPINLOOPS"); if (env) _thr_spinloops = atoi(env); env = getenv("LIBPTHREAD_YIELDLOOPS"); if (env) _thr_yieldloops = atoi(env); env = getenv("LIBPTHREAD_QUEUE_FIFO"); if (env) _thr_queuefifo = atoi(env); TAILQ_INIT(&_thr_atfork_list); } init_once = 1; } diff --git a/lib/libthr/thread/thr_private.h b/lib/libthr/thread/thr_private.h index a5bbc5997d30..d6fb74bb4372 100644 --- a/lib/libthr/thread/thr_private.h +++ b/lib/libthr/thread/thr_private.h @@ -1,1109 +1,1109 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2005 Daniel M. Eischen * Copyright (c) 2005 David Xu * Copyright (c) 1995-1998 John Birrell . * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _THR_PRIVATE_H #define _THR_PRIVATE_H /* * Include files. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include __NULLABILITY_PRAGMA_PUSH #define SYM_FB10(sym) __CONCAT(sym, _fb10) #define SYM_FBP10(sym) __CONCAT(sym, _fbp10) #define WEAK_REF(sym, alias) __weak_reference(sym, alias) #define SYM_COMPAT(sym, impl, ver) __sym_compat(sym, impl, ver) #define SYM_DEFAULT(sym, impl, ver) __sym_default(sym, impl, ver) #define FB10_COMPAT(func, sym) \ WEAK_REF(func, SYM_FB10(sym)); \ SYM_COMPAT(sym, SYM_FB10(sym), FBSD_1.0) #define FB10_COMPAT_PRIVATE(func, sym) \ WEAK_REF(func, SYM_FBP10(sym)); \ SYM_DEFAULT(sym, SYM_FBP10(sym), FBSDprivate_1.0) struct pthread; extern struct pthread *_thr_initial __hidden; #include "pthread_md.h" #include "thr_umtx.h" #include "thread_db.h" #ifdef _PTHREAD_FORCED_UNWIND #define _BSD_SOURCE #include #endif typedef TAILQ_HEAD(pthreadlist, pthread) pthreadlist; typedef TAILQ_HEAD(atfork_head, pthread_atfork) atfork_head; TAILQ_HEAD(mutex_queue, pthread_mutex); /* Signal to do cancellation */ #define SIGCANCEL SIGTHR /* * Kernel fatal error handler macro. */ #define PANIC(args...) _thread_exitf(__FILE__, __LINE__, ##args) /* Output debug messages like this: */ #define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args) #define stderr_debug(args...) _thread_printf(STDERR_FILENO, ##args) #ifdef _PTHREADS_INVARIANTS #define THR_ASSERT(cond, msg) do { \ if (__predict_false(!(cond))) \ PANIC(msg); \ } while (0) #else #define THR_ASSERT(cond, msg) #endif #ifdef PIC # define STATIC_LIB_REQUIRE(name) #else # define STATIC_LIB_REQUIRE(name) __asm (".globl " #name) #endif #define TIMESPEC_ADD(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \ if ((dst)->tv_nsec >= 1000000000) { \ (dst)->tv_sec++; \ (dst)->tv_nsec -= 1000000000; \ } \ } while (0) #define TIMESPEC_SUB(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ if ((dst)->tv_nsec < 0) { \ (dst)->tv_sec--; \ (dst)->tv_nsec += 1000000000; \ } \ } while (0) /* Magic cookie set for shared pthread locks and cv's pointers */ #define THR_PSHARED_PTR \ ((void *)(uintptr_t)((1ULL << (NBBY * sizeof(long) - 1)) | 1)) /* XXX These values should be same as those defined in pthread.h */ #define THR_MUTEX_INITIALIZER ((struct pthread_mutex *)NULL) #define THR_ADAPTIVE_MUTEX_INITIALIZER ((struct pthread_mutex *)1) #define THR_MUTEX_DESTROYED ((struct pthread_mutex *)2) #define THR_COND_INITIALIZER ((struct pthread_cond *)NULL) #define THR_COND_DESTROYED ((struct pthread_cond *)1) #define THR_RWLOCK_INITIALIZER ((struct pthread_rwlock *)NULL) #define THR_RWLOCK_DESTROYED ((struct pthread_rwlock *)1) #define PMUTEX_FLAG_TYPE_MASK 0x0ff #define PMUTEX_FLAG_PRIVATE 0x100 #define PMUTEX_FLAG_DEFERRED 0x200 #define PMUTEX_TYPE(mtxflags) ((mtxflags) & PMUTEX_FLAG_TYPE_MASK) #define PMUTEX_OWNER_ID(m) ((m)->m_lock.m_owner & ~UMUTEX_CONTESTED) #define MAX_DEFER_WAITERS 50 /* * Values for pthread_mutex m_ps indicator. */ #define PMUTEX_INITSTAGE_ALLOC 0 #define PMUTEX_INITSTAGE_BUSY 1 #define PMUTEX_INITSTAGE_DONE 2 struct pthread_mutex { /* * Lock for accesses to this structure. */ struct umutex m_lock; int m_flags; int m_count; int m_spinloops; int m_yieldloops; int m_ps; /* pshared init stage */ /* * Link for all mutexes a thread currently owns, of the same * prio type. */ TAILQ_ENTRY(pthread_mutex) m_qe; /* Link for all private mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_pqe; struct pthread_mutex *m_rb_prev; }; struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; int m_pshared; int m_robust; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE, \ PTHREAD_MUTEX_STALLED } struct pthread_cond { __uint32_t __has_user_waiters; struct ucond kcond; }; struct pthread_cond_attr { int c_pshared; int c_clockid; }; struct pthread_barrier { struct umutex b_lock; struct ucond b_cv; int64_t b_cycle; int b_count; int b_waiters; int b_refcount; int b_destroying; }; struct pthread_barrierattr { int pshared; }; struct pthread_spinlock { struct umutex s_lock; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *prev; void (*routine)(void *); void *routine_arg; int onheap; }; #define THR_CLEANUP_PUSH(td, func, arg) { \ struct pthread_cleanup __cup; \ \ __cup.routine = func; \ __cup.routine_arg = arg; \ __cup.onheap = 0; \ __cup.prev = (td)->cleanup; \ (td)->cleanup = &__cup; #define THR_CLEANUP_POP(td, exec) \ (td)->cleanup = __cup.prev; \ if ((exec) != 0) \ __cup.routine(__cup.routine_arg); \ } struct pthread_atfork { TAILQ_ENTRY(pthread_atfork) qe; void (*prepare)(void); void (*parent)(void); void (*child)(void); }; struct pthread_attr { #define pthread_attr_start_copy sched_policy int sched_policy; int sched_inherit; int prio; int suspend; #define THR_STACK_USER 0x100 /* 0xFF reserved for */ int flags; void *stackaddr_attr; size_t stacksize_attr; size_t guardsize_attr; #define pthread_attr_end_copy cpuset cpuset_t *cpuset; size_t cpusetsize; }; struct wake_addr { struct wake_addr *link; unsigned int value; char pad[12]; }; struct sleepqueue { TAILQ_HEAD(, pthread) sq_blocked; SLIST_HEAD(, sleepqueue) sq_freeq; LIST_ENTRY(sleepqueue) sq_hash; SLIST_ENTRY(sleepqueue) sq_flink; void *sq_wchan; int sq_type; }; /* * Thread creation state attributes. */ #define THR_CREATE_RUNNING 0 #define THR_CREATE_SUSPENDED 1 /* * Miscellaneous definitions. */ #define THR_STACK_DEFAULT (sizeof(void *) / 4 * 1024 * 1024) /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define THR_STACK_INITIAL (THR_STACK_DEFAULT * 2) /* * Define priorities returned by kernel. */ #define THR_MIN_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_min) #define THR_MAX_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_max) #define THR_DEF_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_default) #define THR_MIN_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_min) #define THR_MAX_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_max) #define THR_DEF_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_default) /* XXX The SCHED_FIFO should have same priority range as SCHED_RR */ #define THR_MIN_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO_1].pri_min) #define THR_MAX_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO-1].pri_max) #define THR_DEF_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO-1].pri_default) struct pthread_prio { int pri_min; int pri_max; int pri_default; }; struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { struct urwlock lock; uint32_t owner; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_DEAD }; struct pthread_specific_elem { const void *data; int seqno; }; struct pthread_key { volatile int allocated; int seqno; void (*destructor)(void *); }; /* * lwpid_t is 32bit but kernel thr API exports tid as long type * to preserve the ABI for M:N model in very early date (r131431). */ #define TID(thread) ((uint32_t) ((thread)->tid)) /* * Thread structure. */ struct pthread { #define _pthread_startzero tid /* Kernel thread id. */ long tid; #define TID_TERMINATED 1 /* * Lock for accesses to this thread structure. */ struct umutex lock; /* Internal condition variable cycle number. */ uint32_t cycle; /* How many low level locks the thread held. */ int locklevel; /* * Set to non-zero when this thread has entered a critical * region. We allow for recursive entries into critical regions. */ int critical_count; /* Signal blocked counter. */ int sigblock; /* Fast sigblock var. */ uint32_t fsigblock; /* Queue entry for list of all threads. */ TAILQ_ENTRY(pthread) tle; /* link for all threads in process */ /* Queue entry for GC lists. */ TAILQ_ENTRY(pthread) gcle; /* Hash queue entry. */ LIST_ENTRY(pthread) hle; /* Sleep queue entry */ TAILQ_ENTRY(pthread) wle; /* Threads reference count. */ int refcount; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; struct pthread_attr attr; #define SHOULD_CANCEL(thr) \ ((thr)->cancel_pending && (thr)->cancel_enable && \ (thr)->no_cancel == 0) /* Cancellation is enabled */ int cancel_enable; /* Cancellation request is pending */ int cancel_pending; /* Thread is at cancellation point */ int cancel_point; /* Cancellation is temporarily disabled */ int no_cancel; /* Asynchronouse cancellation is enabled */ int cancel_async; /* Cancellation is in progress */ int cancelling; /* Thread temporary signal mask. */ sigset_t sigmask; /* Thread should unblock SIGCANCEL. */ int unblock_sigcancel; /* In sigsuspend state */ int in_sigsuspend; /* deferred signal info */ siginfo_t deferred_siginfo; /* signal mask to restore. */ sigset_t deferred_sigmask; /* the sigaction should be used for deferred signal. */ struct sigaction deferred_sigact; /* deferred signal delivery is performed, do not reenter. */ int deferred_run; /* Force new thread to exit. */ int force_exit; /* Thread state: */ enum pthread_state state; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* * The joiner is the thread that is joining to this thread. The * join status keeps track of a join operation to another thread. */ struct pthread *joiner; /* Miscellaneous flags; only set with scheduling lock held. */ int flags; #define THR_FLAGS_PRIVATE 0x0001 #define THR_FLAGS_NEED_SUSPEND 0x0002 /* thread should be suspended */ #define THR_FLAGS_SUSPENDED 0x0004 /* thread is suspended */ #define THR_FLAGS_DETACHED 0x0008 /* thread is detached */ /* Thread list flags; only set with thread list lock held. */ int tlflags; #define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */ #define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */ #define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */ /* * Queues of the owned mutexes. Private queue must have index * + 1 of the corresponding full queue. */ #define TMQ_NORM 0 /* NORMAL or PRIO_INHERIT normal */ #define TMQ_NORM_PRIV 1 /* NORMAL or PRIO_INHERIT normal priv */ #define TMQ_NORM_PP 2 /* PRIO_PROTECT normal mutexes */ #define TMQ_NORM_PP_PRIV 3 /* PRIO_PROTECT normal priv */ #define TMQ_ROBUST_PP 4 /* PRIO_PROTECT robust mutexes */ #define TMQ_ROBUST_PP_PRIV 5 /* PRIO_PROTECT robust priv */ #define TMQ_NITEMS 6 struct mutex_queue mq[TMQ_NITEMS]; void *ret; struct pthread_specific_elem *specific; int specific_data_count; /* Number rwlocks rdlocks held. */ int rdlock_count; /* * Current locks bitmap for rtld. */ int rtld_bits; /* Thread control block */ struct tcb *tcb; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; #ifdef _PTHREAD_FORCED_UNWIND struct _Unwind_Exception ex; void *unwind_stackend; int unwind_disabled; #endif /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define THR_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; /* Enable event reporting */ int report_events; /* Event mask */ int event_mask; /* Event */ td_event_msg_t event_buf; /* Wait channel */ void *wchan; /* Referenced mutex. */ struct pthread_mutex *mutex_obj; /* Thread will sleep. */ int will_sleep; /* Number of threads deferred. */ int nwaiter_defer; int robust_inited; uintptr_t robust_list; uintptr_t priv_robust_list; uintptr_t inact_mtx; /* Deferred threads from pthread_cond_signal. */ unsigned int *defer_waiters[MAX_DEFER_WAITERS]; #define _pthread_endzero wake_addr struct wake_addr *wake_addr; #define WAKE_ADDR(td) ((td)->wake_addr) /* Sleep queue */ struct sleepqueue *sleepqueue; /* pthread_set/get_name_np */ char *name; /* rtld thread-local dlerror message and seen control */ char dlerror_msg[512]; int dlerror_seen; }; #define THR_SHOULD_GC(thrd) \ ((thrd)->refcount == 0 && (thrd)->state == PS_DEAD && \ ((thrd)->flags & THR_FLAGS_DETACHED) != 0) #define THR_IN_CRITICAL(thrd) \ (((thrd)->locklevel > 0) || \ ((thrd)->critical_count > 0)) #define THR_CRITICAL_ENTER(thrd) \ (thrd)->critical_count++ #define THR_CRITICAL_LEAVE(thrd) \ do { \ (thrd)->critical_count--; \ _thr_ast(thrd); \ } while (0) #define THR_UMUTEX_TRYLOCK(thrd, lck) \ _thr_umutex_trylock((lck), TID(thrd)) #define THR_UMUTEX_LOCK(thrd, lck) \ _thr_umutex_lock((lck), TID(thrd)) #define THR_UMUTEX_TIMEDLOCK(thrd, lck, timo) \ _thr_umutex_timedlock((lck), TID(thrd), (timo)) #define THR_UMUTEX_UNLOCK(thrd, lck) \ _thr_umutex_unlock((lck), TID(thrd)) #define THR_LOCK_ACQUIRE(thrd, lck) \ do { \ (thrd)->locklevel++; \ _thr_umutex_lock(lck, TID(thrd)); \ } while (0) #define THR_LOCK_ACQUIRE_SPIN(thrd, lck) \ do { \ (thrd)->locklevel++; \ _thr_umutex_lock_spin(lck, TID(thrd)); \ } while (0) #ifdef _PTHREADS_INVARIANTS #define THR_ASSERT_LOCKLEVEL(thrd) \ do { \ if (__predict_false((thrd)->locklevel <= 0)) \ _thr_assert_lock_level(); \ } while (0) #else #define THR_ASSERT_LOCKLEVEL(thrd) #endif #define THR_LOCK_RELEASE(thrd, lck) \ do { \ THR_ASSERT_LOCKLEVEL(thrd); \ _thr_umutex_unlock((lck), TID(thrd)); \ (thrd)->locklevel--; \ _thr_ast(thrd); \ } while (0) #define THR_LOCK(curthrd) THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock) #define THR_UNLOCK(curthrd) THR_LOCK_RELEASE(curthrd, &(curthrd)->lock) #define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock) #define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock) #define THREAD_LIST_RDLOCK(curthrd) \ do { \ (curthrd)->locklevel++; \ _thr_rwl_rdlock(&_thr_list_lock); \ } while (0) #define THREAD_LIST_WRLOCK(curthrd) \ do { \ (curthrd)->locklevel++; \ _thr_rwl_wrlock(&_thr_list_lock); \ } while (0) #define THREAD_LIST_UNLOCK(curthrd) \ do { \ _thr_rwl_unlock(&_thr_list_lock); \ (curthrd)->locklevel--; \ _thr_ast(curthrd); \ } while (0) /* * Macros to insert/remove threads to the all thread list and * the gc list. */ #define THR_LIST_ADD(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \ TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \ _thr_hash_add(thrd); \ (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \ } \ } while (0) #define THR_LIST_REMOVE(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \ TAILQ_REMOVE(&_thread_list, thrd, tle); \ _thr_hash_remove(thrd); \ (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \ } \ } while (0) #define THR_GCLIST_ADD(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \ TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\ (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \ _gc_count++; \ } \ } while (0) #define THR_GCLIST_REMOVE(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \ TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \ (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \ _gc_count--; \ } \ } while (0) #define THR_REF_ADD(curthread, pthread) { \ THR_CRITICAL_ENTER(curthread); \ pthread->refcount++; \ } while (0) #define THR_REF_DEL(curthread, pthread) { \ pthread->refcount--; \ THR_CRITICAL_LEAVE(curthread); \ } while (0) #define GC_NEEDED() (_gc_count >= 5) #define SHOULD_REPORT_EVENT(curthr, e) \ (curthr->report_events && \ (((curthr)->event_mask | _thread_event_mask ) & e) != 0) #ifndef __LIBC_ISTHREADED_DECLARED #define __LIBC_ISTHREADED_DECLARED extern int __isthreaded; #endif /* * Global variables for the pthread kernel. */ -extern char *_usrstack __hidden; +extern char *_stacktop __hidden; /* For debugger */ extern int _libthr_debug; extern int _thread_event_mask; extern struct pthread *_thread_last_event; /* Used in symbol lookup of libthread_db */ extern struct pthread_key _thread_keytable[]; /* List of all threads: */ extern pthreadlist _thread_list; /* List of threads needing GC: */ extern pthreadlist _thread_gc_list __hidden; extern int _thread_active_threads; extern atfork_head _thr_atfork_list __hidden; extern struct urwlock _thr_atfork_lock __hidden; /* Default thread attributes: */ extern struct pthread_attr _pthread_attr_default __hidden; /* Default mutex attributes: */ extern struct pthread_mutex_attr _pthread_mutexattr_default __hidden; extern struct pthread_mutex_attr _pthread_mutexattr_adaptive_default __hidden; /* Default condition variable attributes: */ extern struct pthread_cond_attr _pthread_condattr_default __hidden; extern struct pthread_prio _thr_priorities[] __hidden; extern int _thr_is_smp __hidden; extern size_t _thr_guard_default __hidden; extern size_t _thr_stack_default __hidden; extern size_t _thr_stack_initial __hidden; extern int _thr_page_size __hidden; extern int _thr_spinloops __hidden; extern int _thr_yieldloops __hidden; extern int _thr_queuefifo __hidden; /* Garbage thread count. */ extern int _gc_count __hidden; extern struct umutex _mutex_static_lock __hidden; extern struct umutex _cond_static_lock __hidden; extern struct umutex _rwlock_static_lock __hidden; extern struct umutex _keytable_lock __hidden; extern struct urwlock _thr_list_lock __hidden; extern struct umutex _thr_event_lock __hidden; extern struct umutex _suspend_all_lock __hidden; extern int _suspend_all_waiters __hidden; extern int _suspend_all_cycle __hidden; extern struct pthread *_single_thread __hidden; /* * Function prototype definitions. */ __BEGIN_DECLS void _thr_setthreaded(int) __hidden; int _mutex_cv_lock(struct pthread_mutex *, int, bool) __hidden; int _mutex_cv_unlock(struct pthread_mutex *, int *, int *) __hidden; int _mutex_cv_attach(struct pthread_mutex *, int) __hidden; int _mutex_cv_detach(struct pthread_mutex *, int *) __hidden; int _mutex_owned(struct pthread *, const struct pthread_mutex *) __hidden; int _mutex_reinit(pthread_mutex_t *) __hidden; void _mutex_fork(struct pthread *curthread) __hidden; int _mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m) __hidden; void _mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m) __hidden; void _libpthread_init(struct pthread *) __hidden; struct pthread *_thr_alloc(struct pthread *) __hidden; void _thread_exit(const char *, int, const char *) __hidden __dead2; void _thread_exitf(const char *, int, const char *, ...) __hidden __dead2 __printflike(3, 4); int _thr_ref_add(struct pthread *, struct pthread *, int) __hidden; void _thr_ref_delete(struct pthread *, struct pthread *) __hidden; void _thr_ref_delete_unlocked(struct pthread *, struct pthread *) __hidden; int _thr_find_thread(struct pthread *, struct pthread *, int) __hidden; void _thr_rtld_init(void) __hidden; void _thr_rtld_postfork_child(void) __hidden; int _thr_stack_alloc(struct pthread_attr *) __hidden; void _thr_stack_free(struct pthread_attr *) __hidden; void _thr_free(struct pthread *, struct pthread *) __hidden; void _thr_gc(struct pthread *) __hidden; void _thread_cleanupspecific(void) __hidden; void _thread_printf(int, const char *, ...) __hidden __printflike(2, 3); void _thread_vprintf(int, const char *, va_list) __hidden; void _thr_spinlock_init(void) __hidden; void _thr_cancel_enter(struct pthread *) __hidden; void _thr_cancel_enter2(struct pthread *, int) __hidden; void _thr_cancel_leave(struct pthread *, int) __hidden; void _thr_testcancel(struct pthread *) __hidden; void _thr_signal_block(struct pthread *) __hidden; void _thr_signal_unblock(struct pthread *) __hidden; void _thr_signal_block_check_fast(void) __hidden; void _thr_signal_block_setup(struct pthread *) __hidden; void _thr_signal_init(int) __hidden; void _thr_signal_deinit(void) __hidden; int _thr_send_sig(struct pthread *, int sig) __hidden; void _thr_list_init(void) __hidden; void _thr_hash_add(struct pthread *) __hidden; void _thr_hash_remove(struct pthread *) __hidden; struct pthread *_thr_hash_find(struct pthread *) __hidden; void _thr_link(struct pthread *, struct pthread *) __hidden; void _thr_unlink(struct pthread *, struct pthread *) __hidden; void _thr_assert_lock_level(void) __hidden __dead2; void _thr_ast(struct pthread *) __hidden; void _thr_report_creation(struct pthread *curthread, struct pthread *newthread) __hidden; void _thr_report_death(struct pthread *curthread) __hidden; int _thr_getscheduler(lwpid_t, int *, struct sched_param *) __hidden; int _thr_setscheduler(lwpid_t, int, const struct sched_param *) __hidden; void _thr_signal_prefork(void) __hidden; void _thr_signal_postfork(void) __hidden; void _thr_signal_postfork_child(void) __hidden; void _thr_suspend_all_lock(struct pthread *) __hidden; void _thr_suspend_all_unlock(struct pthread *) __hidden; void _thr_try_gc(struct pthread *, struct pthread *) __hidden; int _rtp_to_schedparam(const struct rtprio *rtp, int *policy, struct sched_param *param) __hidden; int _schedparam_to_rtp(int policy, const struct sched_param *param, struct rtprio *rtp) __hidden; void _thread_bp_create(void); void _thread_bp_death(void); int _sched_yield(void); void _pthread_cleanup_push(void (*)(void *), void *); void _pthread_cleanup_pop(int); void _pthread_exit_mask(void *status, sigset_t *mask) __dead2 __hidden; #ifndef _LIBC_PRIVATE_H_ void _pthread_cancel_enter(int maycancel); void _pthread_cancel_leave(int maycancel); #endif int _pthread_mutex_consistent(pthread_mutex_t * _Nonnull); int _pthread_mutexattr_getrobust(pthread_mutexattr_t * _Nonnull __restrict, int * _Nonnull __restrict); int _pthread_mutexattr_setrobust(pthread_mutexattr_t * _Nonnull, int); /* #include */ #ifdef _SYS_FCNTL_H_ #ifndef _LIBC_PRIVATE_H_ int __sys_fcntl(int, int, ...); int __sys_openat(int, const char *, int, ...); #endif /* _LIBC_PRIVATE_H_ */ #endif /* _SYS_FCNTL_H_ */ /* #include */ #ifdef _SIGNAL_H_ #ifndef _LIBC_PRIVATE_H_ int __sys_sigaction(int, const struct sigaction *, struct sigaction *); int __sys_sigprocmask(int, const sigset_t *, sigset_t *); int __sys_sigsuspend(const sigset_t *); int __sys_sigtimedwait(const sigset_t *, siginfo_t *, const struct timespec *); int __sys_sigwait(const sigset_t *, int *); int __sys_sigwaitinfo(const sigset_t *set, siginfo_t *info); #endif /* _LIBC_PRIVATE_H_ */ #endif /* _SYS_FCNTL_H_ */ /* #include */ #ifdef _TIME_H_ #ifndef _LIBC_PRIVATE_H_ int __sys_clock_nanosleep(clockid_t, int, const struct timespec *, struct timespec *); int __sys_nanosleep(const struct timespec *, struct timespec *); #endif /* _LIBC_PRIVATE_H_ */ #endif /* _SYS_FCNTL_H_ */ /* #include */ #ifdef _SYS_UCONTEXT_H_ #ifndef _LIBC_PRIVATE_H_ int __sys_setcontext(const ucontext_t *ucp); int __sys_swapcontext(ucontext_t *oucp, const ucontext_t *ucp); #endif /* _LIBC_PRIVATE_H_ */ #endif /* _SYS_FCNTL_H_ */ /* #include */ #ifdef _UNISTD_H_ #ifndef _LIBC_PRIVATE_H_ int __sys_close(int); int __sys_fork(void); ssize_t __sys_read(int, void *, size_t); #endif /* _LIBC_PRIVATE_H_ */ #endif /* _SYS_FCNTL_H_ */ static inline int _thr_isthreaded(void) { return (__isthreaded != 0); } static inline int _thr_is_inited(void) { return (_thr_initial != NULL); } static inline void _thr_check_init(void) { if (_thr_initial == NULL) _libpthread_init(NULL); } struct wake_addr *_thr_alloc_wake_addr(void); void _thr_release_wake_addr(struct wake_addr *); int _thr_sleep(struct pthread *, int, const struct timespec *); void _thr_wake_addr_init(void) __hidden; static inline void _thr_clear_wake(struct pthread *td) { td->wake_addr->value = 0; } static inline int _thr_is_woken(struct pthread *td) { return td->wake_addr->value != 0; } static inline void _thr_set_wake(unsigned int *waddr) { *waddr = 1; _thr_umtx_wake(waddr, INT_MAX, 0); } void _thr_wake_all(unsigned int *waddrs[], int) __hidden; static inline struct pthread * _sleepq_first(struct sleepqueue *sq) { return TAILQ_FIRST(&sq->sq_blocked); } void _sleepq_init(void) __hidden; struct sleepqueue *_sleepq_alloc(void) __hidden; void _sleepq_free(struct sleepqueue *) __hidden; void _sleepq_lock(void *) __hidden; void _sleepq_unlock(void *) __hidden; struct sleepqueue *_sleepq_lookup(void *) __hidden; void _sleepq_add(void *, struct pthread *) __hidden; int _sleepq_remove(struct sleepqueue *, struct pthread *) __hidden; void _sleepq_drop(struct sleepqueue *, void (*cb)(struct pthread *, void *arg), void *) __hidden; int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); struct dl_phdr_info; void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info); void _thr_tsd_unload(struct dl_phdr_info *phdr_info) __hidden; void _thr_sigact_unload(struct dl_phdr_info *phdr_info) __hidden; void _thr_stack_fix_protection(struct pthread *thrd); void __pthread_distribute_static_tls(size_t offset, void *src, size_t len, size_t total_len); int *__error_threaded(void) __hidden; void __thr_interpose_libc(void) __hidden; pid_t __thr_fork(void); pid_t __thr_pdfork(int *, int); int __thr_setcontext(const ucontext_t *ucp); int __thr_sigaction(int sig, const struct sigaction *act, struct sigaction *oact) __hidden; int __thr_sigprocmask(int how, const sigset_t *set, sigset_t *oset); int __thr_sigsuspend(const sigset_t * set); int __thr_sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec * timeout); int __thr_sigwait(const sigset_t *set, int *sig); int __thr_sigwaitinfo(const sigset_t *set, siginfo_t *info); int __thr_swapcontext(ucontext_t *oucp, const ucontext_t *ucp); void __thr_map_stacks_exec(void); struct _spinlock; void __thr_spinunlock(struct _spinlock *lck); void __thr_spinlock(struct _spinlock *lck); struct tcb *_tcb_ctor(struct pthread *, int); void _tcb_dtor(struct tcb *); void __thr_pshared_init(void) __hidden; void *__thr_pshared_offpage(void *key, int doalloc) __hidden; void __thr_pshared_destroy(void *key) __hidden; void __thr_pshared_atfork_pre(void) __hidden; void __thr_pshared_atfork_post(void) __hidden; void *__thr_calloc(size_t num, size_t size); void __thr_free(void *cp); void *__thr_malloc(size_t nbytes); void *__thr_realloc(void *cp, size_t nbytes); void __thr_malloc_init(void); void __thr_malloc_prefork(struct pthread *curthread); void __thr_malloc_postfork(struct pthread *curthread); int _thr_join(pthread_t, void **); int _Tthr_kill(pthread_t, int); int _thr_getthreadid_np(void); void __thr_cleanup_push_imp(void (*)(void *), void *, struct _pthread_cleanup_info *); void __thr_cleanup_pop_imp(int); void _thr_cleanup_push(void (*)(void *), void *); void _thr_cleanup_pop(int); void _Tthr_testcancel(void); void _Tthr_cancel_enter(int); void _Tthr_cancel_leave(int); int _thr_cancel(pthread_t); int _thr_atfork(void (*)(void), void (*)(void), void (*)(void)); int _thr_attr_destroy(pthread_attr_t *); int _thr_attr_get_np(pthread_t, pthread_attr_t *); int _thr_attr_getdetachstate(const pthread_attr_t *, int *); int _thr_attr_getguardsize(const pthread_attr_t * __restrict, size_t * __restrict); int _thr_attr_getinheritsched(const pthread_attr_t * __restrict, int * __restrict); int _thr_attr_getschedparam(const pthread_attr_t * __restrict, struct sched_param * __restrict); int _thr_attr_getschedpolicy(const pthread_attr_t * __restrict, int * __restrict); int _thr_attr_getscope(const pthread_attr_t * __restrict, int * __restrict); int _thr_attr_getstackaddr(const pthread_attr_t *, void **); int _thr_attr_getstacksize(const pthread_attr_t * __restrict, size_t * __restrict); int _thr_attr_init(pthread_attr_t *); int _thr_attr_setdetachstate(pthread_attr_t *, int); int _thr_attr_setguardsize(pthread_attr_t *, size_t); int _thr_attr_setinheritsched(pthread_attr_t *, int); int _thr_attr_setschedparam(pthread_attr_t * __restrict, const struct sched_param * __restrict); int _thr_attr_setschedpolicy(pthread_attr_t *, int); int _thr_attr_setscope(pthread_attr_t *, int); int _thr_attr_setstackaddr(pthread_attr_t *, void *); int _thr_attr_setstacksize(pthread_attr_t *, size_t); int _thr_cond_init(pthread_cond_t * __restrict, const pthread_condattr_t * __restrict); int _thr_cond_destroy(pthread_cond_t *); int _thr_cond_timedwait(pthread_cond_t * __restrict, pthread_mutex_t * __restrict, const struct timespec * __restrict); int _thr_cond_signal(pthread_cond_t * cond); int _thr_cond_broadcast(pthread_cond_t * cond); int __thr_cond_wait(pthread_cond_t *, pthread_mutex_t *); int _thr_cond_wait(pthread_cond_t *, pthread_mutex_t *); int _thr_detach(pthread_t); int _thr_equal(pthread_t, pthread_t); void _Tthr_exit(void *); int _thr_key_create(pthread_key_t *, void (*)(void *)); int _thr_key_delete(pthread_key_t); int _thr_setspecific(pthread_key_t, const void *); void *_thr_getspecific(pthread_key_t); int _thr_setcancelstate(int, int *); int _thr_setcanceltype(int, int *); pthread_t _Tthr_self(void); int _thr_rwlock_init(pthread_rwlock_t *, const pthread_rwlockattr_t *); int _thr_rwlock_destroy(pthread_rwlock_t *); int _Tthr_rwlock_rdlock(pthread_rwlock_t *); int _Tthr_rwlock_tryrdlock(pthread_rwlock_t *); int _Tthr_rwlock_trywrlock(pthread_rwlock_t *); int _Tthr_rwlock_wrlock(pthread_rwlock_t *); int _Tthr_rwlock_unlock(pthread_rwlock_t *); int _thr_once(pthread_once_t *, void (*)(void)); int _thr_sigmask(int, const sigset_t *, sigset_t *); int _thr_main_np(void); int _thr_mutexattr_init(pthread_mutexattr_t *); int _thr_mutexattr_destroy(pthread_mutexattr_t *); int _thr_mutexattr_settype(pthread_mutexattr_t *, int); int _thr_mutexattr_getrobust(pthread_mutexattr_t *, int *); int _thr_mutexattr_setrobust(pthread_mutexattr_t *, int); int __Tthr_mutex_init(pthread_mutex_t * __restrict, const pthread_mutexattr_t * __restrict); int _Tthr_mutex_consistent(pthread_mutex_t *); int _thr_mutex_destroy(pthread_mutex_t *); int _thr_mutex_unlock(pthread_mutex_t *); int __Tthr_mutex_lock(pthread_mutex_t *); int __Tthr_mutex_trylock(pthread_mutex_t *); __END_DECLS __NULLABILITY_PRAGMA_POP #endif /* !_THR_PRIVATE_H */ diff --git a/lib/libthr/thread/thr_stack.c b/lib/libthr/thread/thr_stack.c index b08bafdd9417..fe50bc76db5f 100644 --- a/lib/libthr/thread/thr_stack.c +++ b/lib/libthr/thread/thr_stack.c @@ -1,320 +1,322 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2001 Daniel Eischen * Copyright (c) 2000-2001 Jason Evans * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include "thr_private.h" /* Spare thread stack. */ struct stack { LIST_ENTRY(stack) qe; /* Stack queue linkage. */ size_t stacksize; /* Stack size (rounded up). */ size_t guardsize; /* Guard size. */ void *stackaddr; /* Stack address. */ }; /* * Default sized (stack and guard) spare stack queue. Stacks are cached * to avoid additional complexity managing mmap()ed stack regions. Spare * stacks are used in LIFO order to increase cache locality. */ static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq); /* * Miscellaneous sized (non-default stack and/or guard) spare stack queue. * Stacks are cached to avoid additional complexity managing mmap()ed * stack regions. This list is unordered, since ordering on both stack * size and guard size would be more trouble than it's worth. Stacks are * allocated from this cache on a first size match basis. */ static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq); /** * Base address of the last stack allocated (including its red zone, if * there is one). Stacks are allocated contiguously, starting beyond the * top of the main stack. When a new stack is created, a red zone is * typically created (actually, the red zone is mapped with PROT_NONE) above * the top of the stack, such that the stack will not be able to grow all * the way to the bottom of the next stack. This isn't fool-proof. It is * possible for a stack to grow by a large amount, such that it grows into * the next stack, and as long as the memory within the red zone is never * accessed, nothing will prevent one thread stack from trouncing all over * the next. * * low memory * . . . . . . . . . . . . . . . . . . * | | * | stack 3 | start of 3rd thread stack * +-----------------------------------+ * | | * | Red Zone (guard page) | red zone for 2nd thread * | | * +-----------------------------------+ * | stack 2 - _thr_stack_default | top of 2nd thread stack * | | * | | * | | * | | * | stack 2 | * +-----------------------------------+ <-- start of 2nd thread stack * | | * | Red Zone | red zone for 1st thread * | | * +-----------------------------------+ * | stack 1 - _thr_stack_default | top of 1st thread stack * | | * | | * | | * | | * | stack 1 | * +-----------------------------------+ <-- start of 1st thread stack * | | (initial value of last_stack) * | Red Zone | * | | red zone for main thread * +-----------------------------------+ * | USRSTACK - _thr_stack_initial | top of main thread stack * | | ^ * | | | * | | | * | | | stack growth * | | * +-----------------------------------+ <-- start of main thread stack * (USRSTACK) * high memory * */ static char *last_stack = NULL; /* * Round size up to the nearest multiple of * _thr_page_size. */ static inline size_t round_up(size_t size) { if (size % _thr_page_size != 0) size = ((size / _thr_page_size) + 1) * _thr_page_size; return size; } void _thr_stack_fix_protection(struct pthread *thrd) { mprotect((char *)thrd->attr.stackaddr_attr + round_up(thrd->attr.guardsize_attr), round_up(thrd->attr.stacksize_attr), _rtld_get_stack_prot()); } static void singlethread_map_stacks_exec(void) { int mib[2]; struct rlimit rlim; - u_long usrstack; + u_long stacktop; size_t len; mib[0] = CTL_KERN; - mib[1] = KERN_USRSTACK; - len = sizeof(usrstack); - if (sysctl(mib, sizeof(mib) / sizeof(mib[0]), &usrstack, &len, NULL, 0) - == -1) - return; + mib[1] = KERN_STACKTOP; + len = sizeof(stacktop); + if (sysctl(mib, nitems(mib), &stacktop, &len, NULL, 0) == -1) { + mib[1] = KERN_USRSTACK; + if (sysctl(mib, nitems(mib), &stacktop, &len, NULL, 0) == -1) + return; + } if (getrlimit(RLIMIT_STACK, &rlim) == -1) return; - mprotect((void *)(uintptr_t)(usrstack - rlim.rlim_cur), + mprotect((void *)(uintptr_t)(stacktop - rlim.rlim_cur), rlim.rlim_cur, _rtld_get_stack_prot()); } void __thr_map_stacks_exec(void) { struct pthread *curthread, *thrd; struct stack *st; if (!_thr_is_inited()) { singlethread_map_stacks_exec(); return; } curthread = _get_curthread(); THREAD_LIST_RDLOCK(curthread); LIST_FOREACH(st, &mstackq, qe) mprotect((char *)st->stackaddr + st->guardsize, st->stacksize, _rtld_get_stack_prot()); LIST_FOREACH(st, &dstackq, qe) mprotect((char *)st->stackaddr + st->guardsize, st->stacksize, _rtld_get_stack_prot()); TAILQ_FOREACH(thrd, &_thread_gc_list, gcle) _thr_stack_fix_protection(thrd); TAILQ_FOREACH(thrd, &_thread_list, tle) _thr_stack_fix_protection(thrd); THREAD_LIST_UNLOCK(curthread); } int _thr_stack_alloc(struct pthread_attr *attr) { struct pthread *curthread = _get_curthread(); struct stack *spare_stack; size_t stacksize; size_t guardsize; char *stackaddr; /* * Round up stack size to nearest multiple of _thr_page_size so * that mmap() * will work. If the stack size is not an even * multiple, we end up initializing things such that there is * unused space above the beginning of the stack, so the stack * sits snugly against its guard. */ stacksize = round_up(attr->stacksize_attr); guardsize = round_up(attr->guardsize_attr); attr->stackaddr_attr = NULL; attr->flags &= ~THR_STACK_USER; /* * Use the garbage collector lock for synchronization of the - * spare stack lists and allocations from usrstack. + * spare stack lists and allocations from stacktop. */ THREAD_LIST_WRLOCK(curthread); /* * If the stack and guard sizes are default, try to allocate a stack * from the default-size stack cache: */ if ((stacksize == THR_STACK_DEFAULT) && (guardsize == _thr_guard_default)) { if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) { /* Use the spare stack. */ LIST_REMOVE(spare_stack, qe); attr->stackaddr_attr = spare_stack->stackaddr; } } /* * The user specified a non-default stack and/or guard size, so try to * allocate a stack from the non-default size stack cache, using the * rounded up stack size (stack_size) in the search: */ else { LIST_FOREACH(spare_stack, &mstackq, qe) { if (spare_stack->stacksize == stacksize && spare_stack->guardsize == guardsize) { LIST_REMOVE(spare_stack, qe); attr->stackaddr_attr = spare_stack->stackaddr; break; } } } if (attr->stackaddr_attr != NULL) { /* A cached stack was found. Release the lock. */ THREAD_LIST_UNLOCK(curthread); } else { /* - * Allocate a stack from or below usrstack, depending + * Allocate a stack from or below stacktop, depending * on the LIBPTHREAD_BIGSTACK_MAIN env variable. */ if (last_stack == NULL) - last_stack = _usrstack - _thr_stack_initial - + last_stack = _stacktop - _thr_stack_initial - _thr_guard_default; /* Allocate a new stack. */ stackaddr = last_stack - stacksize - guardsize; /* * Even if stack allocation fails, we don't want to try to * use this location again, so unconditionally decrement * last_stack. Under normal operating conditions, the most * likely reason for an mmap() error is a stack overflow of * the adjacent thread stack. */ last_stack -= (stacksize + guardsize); /* Release the lock before mmap'ing it. */ THREAD_LIST_UNLOCK(curthread); /* Map the stack and guard page together, and split guard page from allocated space: */ if ((stackaddr = mmap(stackaddr, stacksize + guardsize, _rtld_get_stack_prot(), MAP_STACK, -1, 0)) != MAP_FAILED && (guardsize == 0 || mprotect(stackaddr, guardsize, PROT_NONE) == 0)) { stackaddr += guardsize; } else { if (stackaddr != MAP_FAILED) munmap(stackaddr, stacksize + guardsize); stackaddr = NULL; } attr->stackaddr_attr = stackaddr; } if (attr->stackaddr_attr != NULL) return (0); else return (-1); } /* This function must be called with _thread_list_lock held. */ void _thr_stack_free(struct pthread_attr *attr) { struct stack *spare_stack; if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0) && (attr->stackaddr_attr != NULL)) { spare_stack = (struct stack *) ((char *)attr->stackaddr_attr + attr->stacksize_attr - sizeof(struct stack)); spare_stack->stacksize = round_up(attr->stacksize_attr); spare_stack->guardsize = round_up(attr->guardsize_attr); spare_stack->stackaddr = attr->stackaddr_attr; if (spare_stack->stacksize == THR_STACK_DEFAULT && spare_stack->guardsize == _thr_guard_default) { /* Default stack/guard size. */ LIST_INSERT_HEAD(&dstackq, spare_stack, qe); } else { /* Non-default stack/guard size. */ LIST_INSERT_HEAD(&mstackq, spare_stack, qe); } attr->stackaddr_attr = NULL; } }