diff --git a/lib/libkse/thread/thr_init.c b/lib/libkse/thread/thr_init.c index 7945ed921961..ac043367f874 100644 --- a/lib/libkse/thread/thr_init.c +++ b/lib/libkse/thread/thr_init.c @@ -1,526 +1,528 @@ /* * Copyright (c) 2003 Daniel M. Eischen * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* Allocate space for global thread variables here: */ #define GLOBAL_PTHREAD_PRIVATE #include "namespace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "un-namespace.h" #include "libc_private.h" #include "thr_private.h" int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *); int __pthread_mutex_lock(pthread_mutex_t *); int __pthread_mutex_trylock(pthread_mutex_t *); void _thread_init_hack(void); extern int _thread_state_running; static void init_private(void); static void init_main_thread(struct pthread *thread); /* * All weak references used within libc should be in this table. * This is so that static libraries will work. */ static void *references[] = { &_accept, &_bind, &_close, &_connect, &_dup, &_dup2, &_execve, &_fcntl, &_flock, &_flockfile, &_fstat, &_fstatfs, &_fsync, &_funlockfile, &_getdirentries, &_getlogin, &_getpeername, &_getsockname, &_getsockopt, &_ioctl, &_kevent, &_listen, &_nanosleep, &_open, &_pthread_getspecific, &_pthread_key_create, &_pthread_key_delete, &_pthread_mutex_destroy, &_pthread_mutex_init, &_pthread_mutex_lock, &_pthread_mutex_trylock, &_pthread_mutex_unlock, &_pthread_mutexattr_init, &_pthread_mutexattr_destroy, &_pthread_mutexattr_settype, &_pthread_once, &_pthread_setspecific, &_read, &_readv, &_recvfrom, &_recvmsg, &_select, &_sendmsg, &_sendto, &_setsockopt, &_sigaction, &_sigprocmask, &_sigsuspend, &_socket, &_socketpair, &_thread_init_hack, &_wait4, &_write, &_writev }; /* * These are needed when linking statically. All references within * libgcc (and in the future libc) to these routines are weak, but * if they are not (strongly) referenced by the application or other * libraries, then the actual functions will not be loaded. */ static void *libgcc_references[] = { &_pthread_once, &_pthread_key_create, &_pthread_key_delete, &_pthread_getspecific, &_pthread_setspecific, &_pthread_mutex_init, &_pthread_mutex_destroy, &_pthread_mutex_lock, &_pthread_mutex_trylock, &_pthread_mutex_unlock }; #define DUAL_ENTRY(entry) \ (pthread_func_t)entry, (pthread_func_t)entry static pthread_func_t jmp_table[][2] = { {DUAL_ENTRY(_pthread_atfork)}, /* PJT_ATFORK */ {DUAL_ENTRY(_pthread_attr_destroy)}, /* PJT_ATTR_DESTROY */ {DUAL_ENTRY(_pthread_attr_getdetachstate)}, /* PJT_ATTR_GETDETACHSTATE */ {DUAL_ENTRY(_pthread_attr_getguardsize)}, /* PJT_ATTR_GETGUARDSIZE */ {DUAL_ENTRY(_pthread_attr_getinheritsched)}, /* PJT_ATTR_GETINHERITSCHED */ {DUAL_ENTRY(_pthread_attr_getschedparam)}, /* PJT_ATTR_GETSCHEDPARAM */ {DUAL_ENTRY(_pthread_attr_getschedpolicy)}, /* PJT_ATTR_GETSCHEDPOLICY */ {DUAL_ENTRY(_pthread_attr_getscope)}, /* PJT_ATTR_GETSCOPE */ {DUAL_ENTRY(_pthread_attr_getstackaddr)}, /* PJT_ATTR_GETSTACKADDR */ {DUAL_ENTRY(_pthread_attr_getstacksize)}, /* PJT_ATTR_GETSTACKSIZE */ {DUAL_ENTRY(_pthread_attr_init)}, /* PJT_ATTR_INIT */ {DUAL_ENTRY(_pthread_attr_setdetachstate)}, /* PJT_ATTR_SETDETACHSTATE */ {DUAL_ENTRY(_pthread_attr_setguardsize)}, /* PJT_ATTR_SETGUARDSIZE */ {DUAL_ENTRY(_pthread_attr_setinheritsched)}, /* PJT_ATTR_SETINHERITSCHED */ {DUAL_ENTRY(_pthread_attr_setschedparam)}, /* PJT_ATTR_SETSCHEDPARAM */ {DUAL_ENTRY(_pthread_attr_setschedpolicy)}, /* PJT_ATTR_SETSCHEDPOLICY */ {DUAL_ENTRY(_pthread_attr_setscope)}, /* PJT_ATTR_SETSCOPE */ {DUAL_ENTRY(_pthread_attr_setstackaddr)}, /* PJT_ATTR_SETSTACKADDR */ {DUAL_ENTRY(_pthread_attr_setstacksize)}, /* PJT_ATTR_SETSTACKSIZE */ {DUAL_ENTRY(_pthread_cancel)}, /* PJT_CANCEL */ {DUAL_ENTRY(_pthread_cleanup_pop)}, /* PJT_CLEANUP_POP */ {DUAL_ENTRY(_pthread_cleanup_push)}, /* PJT_CLEANUP_PUSH */ {DUAL_ENTRY(_pthread_cond_broadcast)}, /* PJT_COND_BROADCAST */ {DUAL_ENTRY(_pthread_cond_destroy)}, /* PJT_COND_DESTROY */ {DUAL_ENTRY(_pthread_cond_init)}, /* PJT_COND_INIT */ {DUAL_ENTRY(_pthread_cond_signal)}, /* PJT_COND_SIGNAL */ {DUAL_ENTRY(_pthread_cond_timedwait)}, /* PJT_COND_TIMEDWAIT */ {(pthread_func_t)__pthread_cond_wait, (pthread_func_t)_pthread_cond_wait}, /* PJT_COND_WAIT */ {DUAL_ENTRY(_pthread_detach)}, /* PJT_DETACH */ {DUAL_ENTRY(_pthread_equal)}, /* PJT_EQUAL */ {DUAL_ENTRY(_pthread_exit)}, /* PJT_EXIT */ {DUAL_ENTRY(_pthread_getspecific)}, /* PJT_GETSPECIFIC */ {DUAL_ENTRY(_pthread_join)}, /* PJT_JOIN */ {DUAL_ENTRY(_pthread_key_create)}, /* PJT_KEY_CREATE */ {DUAL_ENTRY(_pthread_key_delete)}, /* PJT_KEY_DELETE*/ {DUAL_ENTRY(_pthread_kill)}, /* PJT_KILL */ {DUAL_ENTRY(_pthread_main_np)}, /* PJT_MAIN_NP */ {DUAL_ENTRY(_pthread_mutexattr_destroy)}, /* PJT_MUTEXATTR_DESTROY */ {DUAL_ENTRY(_pthread_mutexattr_init)}, /* PJT_MUTEXATTR_INIT */ {DUAL_ENTRY(_pthread_mutexattr_settype)}, /* PJT_MUTEXATTR_SETTYPE */ {DUAL_ENTRY(_pthread_mutex_destroy)}, /* PJT_MUTEX_DESTROY */ {DUAL_ENTRY(_pthread_mutex_init)}, /* PJT_MUTEX_INIT */ {(pthread_func_t)__pthread_mutex_lock, (pthread_func_t)_pthread_mutex_lock}, /* PJT_MUTEX_LOCK */ {(pthread_func_t)__pthread_mutex_trylock, (pthread_func_t)_pthread_mutex_trylock},/* PJT_MUTEX_TRYLOCK */ {DUAL_ENTRY(_pthread_mutex_unlock)}, /* PJT_MUTEX_UNLOCK */ {DUAL_ENTRY(_pthread_once)}, /* PJT_ONCE */ {DUAL_ENTRY(_pthread_rwlock_destroy)}, /* PJT_RWLOCK_DESTROY */ {DUAL_ENTRY(_pthread_rwlock_init)}, /* PJT_RWLOCK_INIT */ {DUAL_ENTRY(_pthread_rwlock_rdlock)}, /* PJT_RWLOCK_RDLOCK */ {DUAL_ENTRY(_pthread_rwlock_tryrdlock)},/* PJT_RWLOCK_TRYRDLOCK */ {DUAL_ENTRY(_pthread_rwlock_trywrlock)},/* PJT_RWLOCK_TRYWRLOCK */ {DUAL_ENTRY(_pthread_rwlock_unlock)}, /* PJT_RWLOCK_UNLOCK */ {DUAL_ENTRY(_pthread_rwlock_wrlock)}, /* PJT_RWLOCK_WRLOCK */ {DUAL_ENTRY(_pthread_self)}, /* PJT_SELF */ {DUAL_ENTRY(_pthread_setcancelstate)}, /* PJT_SETCANCELSTATE */ {DUAL_ENTRY(_pthread_setcanceltype)}, /* PJT_SETCANCELTYPE */ {DUAL_ENTRY(_pthread_setspecific)}, /* PJT_SETSPECIFIC */ {DUAL_ENTRY(_pthread_sigmask)}, /* PJT_SIGMASK */ {DUAL_ENTRY(_pthread_testcancel)} /* PJT_TESTCANCEL */ }; static int init_once = 0; /* * Threaded process initialization. * * This is only called under two conditions: * * 1) Some thread routines have detected that the library hasn't yet * been initialized (_thr_initial == NULL && curthread == NULL), or * * 2) An explicit call to reinitialize after a fork (indicated * by curthread != NULL) */ void _libpthread_init(struct pthread *curthread) { int fd; /* Check if this function has already been called: */ if ((_thr_initial != NULL) && (curthread == NULL)) /* Only initialize the threaded application once. */ return; /* * Make gcc quiescent about {,libgcc_}references not being * referenced: */ if ((references[0] == NULL) || (libgcc_references[0] == NULL)) PANIC("Failed loading mandatory references in _thread_init"); /* Pull debug symbols in for static binary */ _thread_state_running = PS_RUNNING; /* * Check the size of the jump table to make sure it is preset * with the correct number of entries. */ if (sizeof(jmp_table) != (sizeof(pthread_func_t) * PJT_MAX * 2)) PANIC("Thread jump table not properly initialized"); memcpy(__thr_jtable, jmp_table, sizeof(jmp_table)); /* * Check for the special case of this process running as * or in place of init as pid = 1: */ if ((_thr_pid = getpid()) == 1) { /* * Setup a new session for this process which is * assumed to be running as root. */ if (setsid() == -1) PANIC("Can't set session ID"); if (revoke(_PATH_CONSOLE) != 0) PANIC("Can't revoke console"); if ((fd = __sys_open(_PATH_CONSOLE, O_RDWR)) < 0) PANIC("Can't open console"); if (setlogin("root") == -1) PANIC("Can't set login to root"); if (__sys_ioctl(fd, TIOCSCTTY, (char *) NULL) == -1) PANIC("Can't set controlling terminal"); } /* Initialize pthread private data. */ init_private(); _kse_init(); /* Initialize the initial kse and kseg. */ _kse_initial = _kse_alloc(NULL, _thread_scope_system > 0); if (_kse_initial == NULL) PANIC("Can't allocate initial kse."); _kse_initial->k_kseg = _kseg_alloc(NULL); if (_kse_initial->k_kseg == NULL) PANIC("Can't allocate initial kseg."); _kse_initial->k_kseg->kg_flags |= KGF_SINGLE_THREAD; _kse_initial->k_schedq = &_kse_initial->k_kseg->kg_schedq; TAILQ_INSERT_TAIL(&_kse_initial->k_kseg->kg_kseq, _kse_initial, k_kgqe); _kse_initial->k_kseg->kg_ksecount = 1; /* Set the initial thread. */ if (curthread == NULL) { /* Create and initialize the initial thread. */ curthread = _thr_alloc(NULL); if (curthread == NULL) PANIC("Can't allocate initial thread"); _thr_initial = curthread; init_main_thread(curthread); } else { /* * The initial thread is the current thread. It is * assumed that the current thread is already initialized * because it is left over from a fork(). */ _thr_initial = curthread; } _kse_initial->k_kseg->kg_threadcount = 0; _thr_initial->kse = _kse_initial; _thr_initial->kseg = _kse_initial->k_kseg; _thr_initial->active = 1; /* * Add the thread to the thread list and to the KSEG's thread * queue. */ THR_LIST_ADD(_thr_initial); KSEG_THRQ_ADD(_kse_initial->k_kseg, _thr_initial); /* Setup the KSE/thread specific data for the current KSE/thread. */ _thr_initial->kse->k_curthread = _thr_initial; _kcb_set(_thr_initial->kse->k_kcb); _tcb_set(_thr_initial->kse->k_kcb, _thr_initial->tcb); _thr_initial->kse->k_flags |= KF_INITIALIZED; _thr_signal_init(); _kse_critical_leave(&_thr_initial->tcb->tcb_tmbx); /* * activate threaded mode as soon as possible if we are * being debugged */ if (_libkse_debug) _kse_setthreaded(1); } /* * This function and pthread_create() do a lot of the same things. * It'd be nice to consolidate the common stuff in one place. */ static void init_main_thread(struct pthread *thread) { /* Setup the thread attributes. */ thread->attr = _pthread_attr_default; thread->attr.flags |= PTHREAD_SCOPE_SYSTEM; /* * Set up the thread stack. * * Create a red zone below the main stack. All other stacks * are constrained to a maximum size by the parameters * passed to mmap(), but this stack is only limited by * resource limits, so this stack needs an explicitly mapped * red zone to protect the thread stack that is just beyond. */ if (mmap((void *)_usrstack - _thr_stack_initial - _thr_guard_default, _thr_guard_default, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* * Mark the stack as an application supplied stack so that it * isn't deallocated. * * XXX - I'm not sure it would hurt anything to deallocate * the main thread stack because deallocation doesn't * actually free() it; it just puts it in the free * stack queue for later reuse. */ thread->attr.stackaddr_attr = (void *)_usrstack - _thr_stack_initial; thread->attr.stacksize_attr = _thr_stack_initial; thread->attr.guardsize_attr = _thr_guard_default; thread->attr.flags |= THR_STACK_USER; /* * Write a magic value to the thread structure * to help identify valid ones: */ thread->magic = THR_MAGIC; thread->slice_usec = -1; thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; thread->name = strdup("initial thread"); /* Initialize the thread for signals: */ SIGEMPTYSET(thread->sigmask); /* * Set up the thread mailbox. The threads saved context * is also in the mailbox. */ thread->tcb->tcb_tmbx.tm_udata = thread; thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_size = thread->attr.stacksize_attr; thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_sp = thread->attr.stackaddr_attr; /* Default the priority of the initial thread: */ thread->base_priority = THR_DEFAULT_PRIORITY; thread->active_priority = THR_DEFAULT_PRIORITY; thread->inherited_priority = 0; /* Initialize the mutex queue: */ TAILQ_INIT(&thread->mutexq); /* Initialize hooks in the thread structure: */ thread->specific = NULL; thread->cleanup = NULL; thread->flags = 0; thread->sigbackout = NULL; thread->continuation = NULL; thread->state = PS_RUNNING; thread->uniqueid = 0; } static void init_private(void) { struct clockinfo clockinfo; size_t len; int mib[2]; /* * Avoid reinitializing some things if they don't need to be, * e.g. after a fork(). */ if (init_once == 0) { /* Find the stack top */ mib[0] = CTL_KERN; mib[1] = KERN_USRSTACK; len = sizeof (_usrstack); if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1) PANIC("Cannot get kern.usrstack from sysctl"); /* Get the kernel clockrate: */ mib[0] = CTL_KERN; mib[1] = KERN_CLOCKRATE; len = sizeof (struct clockinfo); if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0) _clock_res_usec = 1000000 / clockinfo.stathz; else _clock_res_usec = CLOCK_RES_USEC; _thr_page_size = getpagesize(); _thr_guard_default = _thr_page_size; if (sizeof(void *) == 8) { _thr_stack_default = THR_STACK64_DEFAULT; _thr_stack_initial = THR_STACK64_INITIAL; } else { _thr_stack_default = THR_STACK32_DEFAULT; _thr_stack_initial = THR_STACK32_INITIAL; } _pthread_attr_default.guardsize_attr = _thr_guard_default; _pthread_attr_default.stacksize_attr = _thr_stack_default; TAILQ_INIT(&_thr_atfork_list); init_once = 1; /* Don't do this again. */ } else { /* * Destroy the locks before creating them. We don't * know what state they are in so it is better to just * recreate them. */ _lock_destroy(&_thread_signal_lock); _lock_destroy(&_mutex_static_lock); _lock_destroy(&_rwlock_static_lock); _lock_destroy(&_keytable_lock); } /* Initialize everything else. */ TAILQ_INIT(&_thread_list); TAILQ_INIT(&_thread_gc_list); _pthread_mutex_init(&_thr_atfork_mutex, NULL); /* * Initialize the lock for temporary installation of signal * handlers (to support sigwait() semantics) and for the * process signal mask and pending signal sets. */ if (_lock_init(&_thread_signal_lock, LCK_ADAPTIVE, _kse_lock_wait, _kse_lock_wakeup) != 0) PANIC("Cannot initialize _thread_signal_lock"); if (_lock_init(&_mutex_static_lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize mutex static init lock"); if (_lock_init(&_rwlock_static_lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize rwlock static init lock"); if (_lock_init(&_keytable_lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize thread specific keytable lock"); _thr_spinlock_init(); /* Clear pending signals and get the process signal mask. */ SIGEMPTYSET(_thr_proc_sigpending); /* Are we in M:N mode (default) or 1:1 mode? */ #ifdef SYSTEM_SCOPE_ONLY _thread_scope_system = 1; #else if (getenv("LIBPTHREAD_SYSTEM_SCOPE") != NULL) _thread_scope_system = 1; else if (getenv("LIBPTHREAD_PROCESS_SCOPE") != NULL) _thread_scope_system = -1; #endif + if (getenv("LIBPTHREAD_DEBUG") != NULL) + _thr_debug_flags |= DBG_INFO_DUMP; /* * _thread_list_lock and _kse_count are initialized * by _kse_init() */ } diff --git a/lib/libkse/thread/thr_private.h b/lib/libkse/thread/thr_private.h index e2055bc1da64..9d8ee632fd00 100644 --- a/lib/libkse/thread/thr_private.h +++ b/lib/libkse/thread/thr_private.h @@ -1,1281 +1,1282 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Private thread definitions for the uthread kernel. * * $FreeBSD$ */ #ifndef _THR_PRIVATE_H #define _THR_PRIVATE_H /* * Include files. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef LIBTHREAD_DB #include "lock.h" #include "pthread_md.h" #endif /* * Evaluate the storage class specifier. */ #ifdef GLOBAL_PTHREAD_PRIVATE #define SCLASS #define SCLASS_PRESET(x...) = x #else #define SCLASS extern #define SCLASS_PRESET(x...) #endif /* * Kernel fatal error handler macro. */ #define PANIC(string) _thr_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args) #define stderr_debug(args...) _thread_printf(STDOUT_FILENO, ##args) #define DBG_MUTEX 0x0001 #define DBG_SIG 0x0002 +#define DBG_INFO_DUMP 0x0004 #ifdef _PTHREADS_INVARIANTS #define THR_ASSERT(cond, msg) do { \ if (!(cond)) \ PANIC(msg); \ } while (0) #else #define THR_ASSERT(cond, msg) #endif /* * State change macro without scheduling queue change: */ #define THR_SET_STATE(thrd, newstate) do { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } while (0) #define TIMESPEC_ADD(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \ if ((dst)->tv_nsec >= 1000000000) { \ (dst)->tv_sec++; \ (dst)->tv_nsec -= 1000000000; \ } \ } while (0) #define TIMESPEC_SUB(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ if ((dst)->tv_nsec < 0) { \ (dst)->tv_sec--; \ (dst)->tv_nsec += 1000000000; \ } \ } while (0) /* * Priority queues. * * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. */ typedef struct pq_list { TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ int pl_prio; /* the priority of this list */ int pl_queued; /* is this in the priority queue */ } pq_list_t; typedef struct pq_queue { TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ pq_list_t *pq_lists; /* array of all priority lists */ int pq_size; /* number of priority lists */ #define PQF_ACTIVE 0x0001 int pq_flags; int pq_threads; } pq_queue_t; /* * Each KSEG has a scheduling queue. For now, threads that exist in their * own KSEG (system scope) will get a full priority queue. In the future * this can be optimized for the single thread per KSEG case. */ struct sched_queue { pq_queue_t sq_runq; TAILQ_HEAD(, pthread) sq_waitq; /* waiting in userland */ }; typedef struct kse_thr_mailbox *kse_critical_t; struct kse_group; #define MAX_KSE_LOCKLEVEL 5 struct kse { /* -- location and order specific items for gdb -- */ struct kcb *k_kcb; struct pthread *k_curthread; /* current thread */ struct kse_group *k_kseg; /* parent KSEG */ struct sched_queue *k_schedq; /* scheduling queue */ /* -- end of location and order specific items -- */ TAILQ_ENTRY(kse) k_qe; /* KSE list link entry */ TAILQ_ENTRY(kse) k_kgqe; /* KSEG's KSE list entry */ /* * Items that are only modified by the kse, or that otherwise * don't need to be locked when accessed */ struct lock k_lock; struct lockuser k_lockusers[MAX_KSE_LOCKLEVEL]; int k_locklevel; stack_t k_stack; int k_flags; #define KF_STARTED 0x0001 /* kernel kse created */ #define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */ #define KF_TERMINATED 0x0004 /* kse is terminated */ #define KF_IDLE 0x0008 /* kse is idle */ #define KF_SWITCH 0x0010 /* thread switch in UTS */ int k_error; /* syscall errno in critical */ int k_cpu; /* CPU ID when bound */ int k_sigseqno; /* signal buffered count */ }; #define KSE_SET_IDLE(kse) ((kse)->k_flags |= KF_IDLE) #define KSE_CLEAR_IDLE(kse) ((kse)->k_flags &= ~KF_IDLE) #define KSE_IS_IDLE(kse) (((kse)->k_flags & KF_IDLE) != 0) #define KSE_SET_SWITCH(kse) ((kse)->k_flags |= KF_SWITCH) #define KSE_CLEAR_SWITCH(kse) ((kse)->k_flags &= ~KF_SWITCH) #define KSE_IS_SWITCH(kse) (((kse)->k_flags & KF_SWITCH) != 0) /* * Each KSE group contains one or more KSEs in which threads can run. * At least for now, there is one scheduling queue per KSE group; KSEs * within the same KSE group compete for threads from the same scheduling * queue. A scope system thread has one KSE in one KSE group; the group * does not use its scheduling queue. */ struct kse_group { TAILQ_HEAD(, kse) kg_kseq; /* list of KSEs in group */ TAILQ_HEAD(, pthread) kg_threadq; /* list of threads in group */ TAILQ_ENTRY(kse_group) kg_qe; /* link entry */ struct sched_queue kg_schedq; /* scheduling queue */ struct lock kg_lock; int kg_threadcount; /* # of assigned threads */ int kg_ksecount; /* # of assigned KSEs */ int kg_idle_kses; int kg_flags; #define KGF_SINGLE_THREAD 0x0001 /* scope system kse group */ #define KGF_SCHEDQ_INITED 0x0002 /* has an initialized schedq */ }; /* * Add/remove threads from a KSE's scheduling queue. * For now the scheduling queue is hung off the KSEG. */ #define KSEG_THRQ_ADD(kseg, thr) \ do { \ TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle);\ (kseg)->kg_threadcount++; \ } while (0) #define KSEG_THRQ_REMOVE(kseg, thr) \ do { \ TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle); \ (kseg)->kg_threadcount--; \ } while (0) /* * Lock acquire and release for KSEs. */ #define KSE_LOCK_ACQUIRE(kse, lck) \ do { \ if ((kse)->k_locklevel < MAX_KSE_LOCKLEVEL) { \ (kse)->k_locklevel++; \ _lock_acquire((lck), \ &(kse)->k_lockusers[(kse)->k_locklevel - 1], 0); \ } \ else \ PANIC("Exceeded maximum lock level"); \ } while (0) #define KSE_LOCK_RELEASE(kse, lck) \ do { \ if ((kse)->k_locklevel > 0) { \ _lock_release((lck), \ &(kse)->k_lockusers[(kse)->k_locklevel - 1]); \ (kse)->k_locklevel--; \ } \ } while (0) /* * Lock our own KSEG. */ #define KSE_LOCK(curkse) \ KSE_LOCK_ACQUIRE(curkse, &(curkse)->k_kseg->kg_lock) #define KSE_UNLOCK(curkse) \ KSE_LOCK_RELEASE(curkse, &(curkse)->k_kseg->kg_lock) /* * Lock a potentially different KSEG. */ #define KSE_SCHED_LOCK(curkse, kseg) \ KSE_LOCK_ACQUIRE(curkse, &(kseg)->kg_lock) #define KSE_SCHED_UNLOCK(curkse, kseg) \ KSE_LOCK_RELEASE(curkse, &(kseg)->kg_lock) /* * Waiting queue manipulation macros (using pqe link): */ #define KSE_WAITQ_REMOVE(kse, thrd) \ do { \ if (((thrd)->flags & THR_FLAGS_IN_WAITQ) != 0) { \ TAILQ_REMOVE(&(kse)->k_schedq->sq_waitq, thrd, pqe); \ (thrd)->flags &= ~THR_FLAGS_IN_WAITQ; \ } \ } while (0) #define KSE_WAITQ_INSERT(kse, thrd) kse_waitq_insert(thrd) #define KSE_WAITQ_FIRST(kse) TAILQ_FIRST(&(kse)->k_schedq->sq_waitq) #define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_kcb->kcb_kmbx) /* * TailQ initialization values. */ #define TAILQ_INITIALIZER { NULL, NULL } /* * lock initialization values. */ #define LCK_INITIALIZER { NULL, NULL, LCK_DEFAULT } struct pthread_mutex { /* * Lock for accesses to this structure. */ struct lock m_lock; enum pthread_mutextype m_type; int m_protocol; TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; long m_flags; int m_count; int m_refcount; /* * Used for priority inheritence and protection. * * m_prio - For priority inheritence, the highest active * priority (threads locking the mutex inherit * this priority). For priority protection, the * ceiling priority of this mutex. * m_saved_prio - mutex owners inherited priority before * taking the mutex, restored when the owner * unlocks the mutex. */ int m_prio; int m_saved_prio; /* * Link for list of all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; }; /* * Flags for mutexes. */ #define MUTEX_FLAGS_PRIVATE 0x01 #define MUTEX_FLAGS_INITED 0x02 #define MUTEX_FLAGS_BUSY 0x04 /* * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ { LCK_INITIALIZER, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, \ TAILQ_INITIALIZER, NULL, MUTEX_FLAGS_PRIVATE, 0, 0, 0, 0, \ TAILQ_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; long m_flags; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } /* * Condition variable definitions. */ enum pthread_cond_type { COND_TYPE_FAST, COND_TYPE_MAX }; struct pthread_cond { /* * Lock for accesses to this structure. */ struct lock c_lock; enum pthread_cond_type c_type; TAILQ_HEAD(cond_head, pthread) c_queue; struct pthread_mutex *c_mutex; long c_flags; long c_seqno; }; struct pthread_cond_attr { enum pthread_cond_type c_type; long c_flags; }; struct pthread_barrier { pthread_mutex_t b_lock; pthread_cond_t b_cond; int b_count; int b_waiters; int b_generation; }; struct pthread_barrierattr { int pshared; }; struct pthread_spinlock { volatile int s_lock; pthread_t s_owner; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ { LCK_INITIALIZER, COND_TYPE_FAST, TAILQ_INITIALIZER, \ NULL, NULL, 0, 0 } /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *next; void (*routine) (); void *routine_arg; int onstack; }; #define THR_CLEANUP_PUSH(td, func, arg) { \ struct pthread_cleanup __cup; \ \ __cup.routine = func; \ __cup.routine_arg = arg; \ __cup.onstack = 1; \ __cup.next = (td)->cleanup; \ (td)->cleanup = &__cup; #define THR_CLEANUP_POP(td, exec) \ (td)->cleanup = __cup.next; \ if ((exec) != 0) \ __cup.routine(__cup.routine_arg); \ } struct pthread_atfork { TAILQ_ENTRY(pthread_atfork) qe; void (*prepare)(void); void (*parent)(void); void (*child)(void); }; struct pthread_attr { int sched_policy; int sched_inherit; int sched_interval; int prio; int suspend; #define THR_STACK_USER 0x100 /* 0xFF reserved for */ #define THR_SIGNAL_THREAD 0x200 /* This is a signal thread */ int flags; void *arg_attr; void (*cleanup_attr) (); void *stackaddr_attr; size_t stacksize_attr; size_t guardsize_attr; }; /* * Thread creation state attributes. */ #define THR_CREATE_RUNNING 0 #define THR_CREATE_SUSPENDED 1 /* * Miscellaneous definitions. */ #define THR_STACK32_DEFAULT (1 * 1024 * 1024) #define THR_STACK64_DEFAULT (2 * 1024 * 1024) /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define THR_STACK32_INITIAL (2 * 1024 * 1024) #define THR_STACK64_INITIAL (4 * 1024 * 1024) /* * Define the different priority ranges. All applications have thread * priorities constrained within 0-31. The threads library raises the * priority when delivering signals in order to ensure that signal * delivery happens (from the POSIX spec) "as soon as possible". * In the future, the threads library will also be able to map specific * threads into real-time (cooperating) processes or kernel threads. * The RT and SIGNAL priorities will be used internally and added to * thread base priorities so that the scheduling queue can handle both * normal and RT priority threads with and without signal handling. * * The approach taken is that, within each class, signal delivery * always has priority over thread execution. */ #define THR_DEFAULT_PRIORITY 15 #define THR_MIN_PRIORITY 0 #define THR_MAX_PRIORITY 31 /* 0x1F */ #define THR_SIGNAL_PRIORITY 32 /* 0x20 */ #define THR_RT_PRIORITY 64 /* 0x40 */ #define THR_FIRST_PRIORITY THR_MIN_PRIORITY #define THR_LAST_PRIORITY \ (THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY) #define THR_BASE_PRIORITY(prio) ((prio) & THR_MAX_PRIORITY) /* * Clock resolution in microseconds. */ #define CLOCK_RES_USEC 10000 /* * Time slice period in microseconds. */ #define TIMESLICE_USEC 20000 /* * XXX - Define a thread-safe macro to get the current time of day * which is updated at regular intervals by something. * * For now, we just make the system call to get the time. */ #define KSE_GET_TOD(curkse, tsp) \ do { \ *tsp = (curkse)->k_kcb->kcb_kmbx.km_timeofday; \ if ((tsp)->tv_sec == 0) \ clock_gettime(CLOCK_REALTIME, tsp); \ } while (0) struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ pthread_cond_t read_signal; pthread_cond_t write_signal; int state; /* 0 = idle >0 = # of readers -1 = writer */ int blocked_writers; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_LOCKWAIT, PS_MUTEX_WAIT, PS_COND_WAIT, PS_SLEEP_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, PS_JOIN, PS_SUSPENDED, PS_DEAD, PS_DEADLOCK, PS_STATE_MAX }; struct sigwait_data { sigset_t *waitset; siginfo_t *siginfo; /* used to save siginfo for sigwaitinfo() */ }; union pthread_wait_data { pthread_mutex_t mutex; pthread_cond_t cond; struct lock *lock; struct sigwait_data *sigwait; }; /* * Define a continuation routine that can be used to perform a * transfer of control: */ typedef void (*thread_continuation_t) (void *); /* * This stores a thread's state prior to running a signal handler. * It is used when a signal is delivered to a thread blocked in * userland. If the signal handler returns normally, the thread's * state is restored from here. */ struct pthread_sigframe { int psf_valid; int psf_flags; int psf_cancelflags; int psf_interrupted; int psf_timeout; int psf_signo; enum pthread_state psf_state; union pthread_wait_data psf_wait_data; struct timespec psf_wakeup_time; sigset_t psf_sigset; sigset_t psf_sigmask; int psf_seqno; thread_continuation_t psf_continuation; }; struct join_status { struct pthread *thread; void *ret; int error; }; struct pthread_specific_elem { const void *data; int seqno; }; struct pthread_key { volatile int allocated; volatile int count; int seqno; void (*destructor) (void *); }; #define MAX_THR_LOCKLEVEL 5 /* * Thread structure. */ struct pthread { /* Thread control block */ struct tcb *tcb; /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define THR_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; char *name; u_int64_t uniqueid; /* for gdb */ /* Queue entry for list of all threads: */ TAILQ_ENTRY(pthread) tle; /* link for all threads in process */ TAILQ_ENTRY(pthread) kle; /* link for all threads in KSE/KSEG */ /* Queue entry for GC lists: */ TAILQ_ENTRY(pthread) gcle; /* Hash queue entry */ LIST_ENTRY(pthread) hle; /* * Lock for accesses to this thread structure. */ struct lock lock; struct lockuser lockusers[MAX_THR_LOCKLEVEL]; int locklevel; kse_critical_t critical[MAX_KSE_LOCKLEVEL]; struct kse *kse; struct kse_group *kseg; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; struct pthread_attr attr; int active; /* thread running */ int blocked; /* thread blocked in kernel */ int need_switchout; /* * Used for tracking delivery of signal handlers. */ siginfo_t *siginfo; thread_continuation_t sigbackout; /* * Cancelability flags - the lower 2 bits are used by cancel * definitions in pthread.h */ #define THR_AT_CANCEL_POINT 0x0004 #define THR_CANCELLING 0x0008 #define THR_CANCEL_NEEDED 0x0010 int cancelflags; thread_continuation_t continuation; /* * The thread's base and pending signal masks. The active * signal mask is stored in the thread's context (in mailbox). */ sigset_t sigmask; sigset_t sigpend; sigset_t *oldsigmask; volatile int check_pending; int refcount; /* Thread state: */ enum pthread_state state; volatile int lock_switch; /* * Number of microseconds accumulated by this thread when * time slicing is active. */ long slice_usec; /* * Time to wake up thread. This is used for sleeping threads and * for any operation which may time out (such as select). */ struct timespec wakeup_time; /* TRUE if operation has timed out. */ int timeout; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* * The joiner is the thread that is joining to this thread. The * join status keeps track of a join operation to another thread. */ struct pthread *joiner; struct join_status join_status; /* * The current thread can belong to only one scheduling queue at * a time (ready or waiting queue). It can also belong to: * * o A queue of threads waiting for a mutex * o A queue of threads waiting for a condition variable * * It is possible for a thread to belong to more than one of the * above queues if it is handling a signal. A thread may only * enter a mutex or condition variable queue when it is not * being called from a signal handler. If a thread is a member * of one of these queues when a signal handler is invoked, it * must be removed from the queue before invoking the handler * and then added back to the queue after return from the handler. * * Use pqe for the scheduling queue link (both ready and waiting), * sqe for synchronization (mutex, condition variable, and join) * queue links, and qe for all other links. */ TAILQ_ENTRY(pthread) pqe; /* priority, wait queues link */ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ /* Wait data. */ union pthread_wait_data data; /* * Set to TRUE if a blocking operation was * interrupted by a signal: */ int interrupted; /* * Set to non-zero when this thread has entered a critical * region. We allow for recursive entries into critical regions. */ int critical_count; /* * Set to TRUE if this thread should yield after leaving a * critical region to check for signals, messages, etc. */ int critical_yield; int sflags; #define THR_FLAGS_IN_SYNCQ 0x0001 /* Miscellaneous flags; only set with scheduling lock held. */ int flags; #define THR_FLAGS_PRIVATE 0x0001 #define THR_FLAGS_IN_WAITQ 0x0002 /* in waiting queue using pqe link */ #define THR_FLAGS_IN_RUNQ 0x0004 /* in run queue using pqe link */ #define THR_FLAGS_EXITING 0x0008 /* thread is exiting */ #define THR_FLAGS_SUSPENDED 0x0010 /* thread is suspended */ /* Thread list flags; only set with thread list lock held. */ #define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */ #define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */ #define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */ int tlflags; /* * Base priority is the user setable and retrievable priority * of the thread. It is only affected by explicit calls to * set thread priority and upon thread creation via a thread * attribute or default priority. */ char base_priority; /* * Inherited priority is the priority a thread inherits by * taking a priority inheritence or protection mutex. It * is not affected by base priority changes. Inherited * priority defaults to and remains 0 until a mutex is taken * that is being waited on by any other thread whose priority * is non-zero. */ char inherited_priority; /* * Active priority is always the maximum of the threads base * priority and inherited priority. When there is a change * in either the base or inherited priority, the active * priority must be recalculated. */ char active_priority; /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; /* Number rwlocks rdlocks held. */ int rdlock_count; /* * Queue of currently owned mutexes. */ TAILQ_HEAD(, pthread_mutex) mutexq; void *ret; struct pthread_specific_elem *specific; int specific_data_count; /* Alternative stack for sigaltstack() */ stack_t sigstk; /* * Current locks bitmap for rtld. */ int rtld_bits; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; char *fname; /* Ptr to source file name */ int lineno; /* Source line number. */ }; /* * Critical regions can also be detected by looking at the threads * current lock level. Ensure these macros increment and decrement * the lock levels such that locks can not be held with a lock level * of 0. */ #define THR_IN_CRITICAL(thrd) \ (((thrd)->locklevel > 0) || \ ((thrd)->critical_count > 0)) #define THR_YIELD_CHECK(thrd) \ do { \ if (!THR_IN_CRITICAL(thrd)) { \ if (__predict_false(_libkse_debug)) \ _thr_debug_check_yield(thrd); \ if ((thrd)->critical_yield != 0) \ _thr_sched_switch(thrd); \ if ((thrd)->check_pending != 0) \ _thr_sig_check_pending(thrd); \ } \ } while (0) #define THR_LOCK_ACQUIRE(thrd, lck) \ do { \ if ((thrd)->locklevel < MAX_THR_LOCKLEVEL) { \ THR_DEACTIVATE_LAST_LOCK(thrd); \ (thrd)->locklevel++; \ _lock_acquire((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1], \ (thrd)->active_priority); \ } else \ PANIC("Exceeded maximum lock level"); \ } while (0) #define THR_LOCK_RELEASE(thrd, lck) \ do { \ if ((thrd)->locklevel > 0) { \ _lock_release((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1]); \ (thrd)->locklevel--; \ THR_ACTIVATE_LAST_LOCK(thrd); \ if ((thrd)->locklevel == 0) \ THR_YIELD_CHECK(thrd); \ } \ } while (0) #define THR_ACTIVATE_LAST_LOCK(thrd) \ do { \ if ((thrd)->locklevel > 0) \ _lockuser_setactive( \ &(thrd)->lockusers[(thrd)->locklevel - 1], 1); \ } while (0) #define THR_DEACTIVATE_LAST_LOCK(thrd) \ do { \ if ((thrd)->locklevel > 0) \ _lockuser_setactive( \ &(thrd)->lockusers[(thrd)->locklevel - 1], 0); \ } while (0) /* * For now, threads will have their own lock separate from their * KSE scheduling lock. */ #define THR_LOCK(thr) THR_LOCK_ACQUIRE(thr, &(thr)->lock) #define THR_UNLOCK(thr) THR_LOCK_RELEASE(thr, &(thr)->lock) #define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock) #define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock) /* * Priority queue manipulation macros (using pqe link). We use * the thread's kseg link instead of the kse link because a thread * does not (currently) have a statically assigned kse. */ #define THR_RUNQ_INSERT_HEAD(thrd) \ _pq_insert_head(&(thrd)->kseg->kg_schedq.sq_runq, thrd) #define THR_RUNQ_INSERT_TAIL(thrd) \ _pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd) #define THR_RUNQ_REMOVE(thrd) \ _pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd) /* * Macros to insert/remove threads to the all thread list and * the gc list. */ #define THR_LIST_ADD(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \ TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \ _thr_hash_add(thrd); \ (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \ } \ } while (0) #define THR_LIST_REMOVE(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \ TAILQ_REMOVE(&_thread_list, thrd, tle); \ _thr_hash_remove(thrd); \ (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \ } \ } while (0) #define THR_GCLIST_ADD(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \ TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\ (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \ _gc_count++; \ } \ } while (0) #define THR_GCLIST_REMOVE(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \ TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \ (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \ _gc_count--; \ } \ } while (0) #define GC_NEEDED() (atomic_load_acq_int(&_gc_count) >= 5) /* * Locking the scheduling queue for another thread uses that thread's * KSEG lock. */ #define THR_SCHED_LOCK(curthr, thr) do { \ (curthr)->critical[(curthr)->locklevel] = _kse_critical_enter(); \ (curthr)->locklevel++; \ KSE_SCHED_LOCK((curthr)->kse, (thr)->kseg); \ } while (0) #define THR_SCHED_UNLOCK(curthr, thr) do { \ KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \ (curthr)->locklevel--; \ _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \ } while (0) /* Take the scheduling lock with the intent to call the scheduler. */ #define THR_LOCK_SWITCH(curthr) do { \ (void)_kse_critical_enter(); \ KSE_SCHED_LOCK((curthr)->kse, (curthr)->kseg); \ } while (0) #define THR_UNLOCK_SWITCH(curthr) do { \ KSE_SCHED_UNLOCK((curthr)->kse, (curthr)->kseg);\ } while (0) #define THR_CRITICAL_ENTER(thr) (thr)->critical_count++ #define THR_CRITICAL_LEAVE(thr) do { \ (thr)->critical_count--; \ if (((thr)->critical_yield != 0) && \ ((thr)->critical_count == 0)) { \ (thr)->critical_yield = 0; \ _thr_sched_switch(thr); \ } \ } while (0) #define THR_IS_ACTIVE(thrd) \ ((thrd)->kse != NULL) && ((thrd)->kse->k_curthread == (thrd)) #define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0) #define THR_IS_SUSPENDED(thrd) \ (((thrd)->state == PS_SUSPENDED) || \ (((thrd)->flags & THR_FLAGS_SUSPENDED) != 0)) #define THR_IS_EXITING(thrd) (((thrd)->flags & THR_FLAGS_EXITING) != 0) #define DBG_CAN_RUN(thrd) (((thrd)->tcb->tcb_tmbx.tm_dflags & \ TMDF_SUSPEND) == 0) extern int __isthreaded; static inline int _kse_isthreaded(void) { return (__isthreaded != 0); } /* * Global variables for the pthread kernel. */ SCLASS void *_usrstack SCLASS_PRESET(NULL); SCLASS struct kse *_kse_initial SCLASS_PRESET(NULL); SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL); /* For debugger */ SCLASS int _libkse_debug SCLASS_PRESET(0); SCLASS int _thread_activated SCLASS_PRESET(0); SCLASS int _thread_scope_system SCLASS_PRESET(0); /* List of all threads: */ SCLASS TAILQ_HEAD(, pthread) _thread_list SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list)); /* List of threads needing GC: */ SCLASS TAILQ_HEAD(, pthread) _thread_gc_list SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list)); SCLASS int _thread_active_threads SCLASS_PRESET(1); SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list; SCLASS pthread_mutex_t _thr_atfork_mutex; /* Default thread attributes: */ SCLASS struct pthread_attr _pthread_attr_default SCLASS_PRESET({ SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY, THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, /* stacksize */0, /* guardsize */0 }); /* Default mutex attributes: */ SCLASS struct pthread_mutex_attr _pthread_mutexattr_default SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }); /* Default condition variable attributes: */ SCLASS struct pthread_cond_attr _pthread_condattr_default SCLASS_PRESET({COND_TYPE_FAST, 0}); /* Clock resolution in usec. */ SCLASS int _clock_res_usec SCLASS_PRESET(CLOCK_RES_USEC); /* Array of signal actions for this process: */ SCLASS struct sigaction _thread_sigact[_SIG_MAXSIG]; /* * Lock for above count of dummy handlers and for the process signal * mask and pending signal sets. */ SCLASS struct lock _thread_signal_lock; /* Pending signals and mask for this process: */ SCLASS sigset_t _thr_proc_sigpending; SCLASS siginfo_t _thr_proc_siginfo[_SIG_MAXSIG]; SCLASS pid_t _thr_pid SCLASS_PRESET(0); /* Garbage collector lock. */ SCLASS struct lock _gc_lock; SCLASS int _gc_check SCLASS_PRESET(0); SCLASS int _gc_count SCLASS_PRESET(0); SCLASS struct lock _mutex_static_lock; SCLASS struct lock _rwlock_static_lock; SCLASS struct lock _keytable_lock; SCLASS struct lock _thread_list_lock; SCLASS int _thr_guard_default; SCLASS int _thr_stack_default; SCLASS int _thr_stack_initial; SCLASS int _thr_page_size; SCLASS pthread_t _thr_sig_daemon; SCLASS int _thr_debug_flags SCLASS_PRESET(0); /* Undefine the storage class and preset specifiers: */ #undef SCLASS #undef SCLASS_PRESET /* * Function prototype definitions. */ __BEGIN_DECLS int _cond_reinit(pthread_cond_t *); struct kse *_kse_alloc(struct pthread *, int sys_scope); kse_critical_t _kse_critical_enter(void); void _kse_critical_leave(kse_critical_t); int _kse_in_critical(void); void _kse_free(struct pthread *, struct kse *); void _kse_init(); struct kse_group *_kseg_alloc(struct pthread *); void _kse_lock_wait(struct lock *, struct lockuser *lu); void _kse_lock_wakeup(struct lock *, struct lockuser *lu); void _kse_single_thread(struct pthread *); int _kse_setthreaded(int); void _kseg_free(struct kse_group *); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); void _mutex_notify_priochange(struct pthread *, struct pthread *, int); int _mutex_reinit(struct pthread_mutex *); void _mutex_unlock_private(struct pthread *); void _libpthread_init(struct pthread *); int _pq_alloc(struct pq_queue *, int, int); void _pq_free(struct pq_queue *); int _pq_init(struct pq_queue *); void _pq_remove(struct pq_queue *pq, struct pthread *); void _pq_insert_head(struct pq_queue *pq, struct pthread *); void _pq_insert_tail(struct pq_queue *pq, struct pthread *); struct pthread *_pq_first(struct pq_queue *pq); struct pthread *_pq_first_debug(struct pq_queue *pq); void *_pthread_getspecific(pthread_key_t); int _pthread_key_create(pthread_key_t *, void (*) (void *)); int _pthread_key_delete(pthread_key_t); int _pthread_mutex_destroy(pthread_mutex_t *); int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); int _pthread_mutex_lock(pthread_mutex_t *); int _pthread_mutex_trylock(pthread_mutex_t *); int _pthread_mutex_unlock(pthread_mutex_t *); int _pthread_mutexattr_init(pthread_mutexattr_t *); int _pthread_mutexattr_destroy(pthread_mutexattr_t *); int _pthread_mutexattr_settype(pthread_mutexattr_t *, int); int _pthread_once(pthread_once_t *, void (*) (void)); int _pthread_rwlock_init(pthread_rwlock_t *, const pthread_rwlockattr_t *); int _pthread_rwlock_destroy (pthread_rwlock_t *); struct pthread *_pthread_self(void); int _pthread_setspecific(pthread_key_t, const void *); void _pthread_yield(void); void _pthread_cleanup_push(void (*routine) (void *), void *routine_arg); void _pthread_cleanup_pop(int execute); struct pthread *_thr_alloc(struct pthread *); void _thr_exit(char *, int, char *); void _thr_exit_cleanup(void); void _thr_lock_wait(struct lock *lock, struct lockuser *lu); void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu); void _thr_mutex_reinit(pthread_mutex_t *); int _thr_ref_add(struct pthread *, struct pthread *, int); void _thr_ref_delete(struct pthread *, struct pthread *); void _thr_rtld_init(void); void _thr_rtld_fini(void); int _thr_schedule_add(struct pthread *, struct pthread *); void _thr_schedule_remove(struct pthread *, struct pthread *); void _thr_setrunnable(struct pthread *curthread, struct pthread *thread); struct kse_mailbox *_thr_setrunnable_unlocked(struct pthread *thread); struct kse_mailbox *_thr_sig_add(struct pthread *, int, siginfo_t *); void _thr_sig_dispatch(struct kse *, int, siginfo_t *); int _thr_stack_alloc(struct pthread_attr *); void _thr_stack_free(struct pthread_attr *); void _thr_exit_cleanup(void); void _thr_free(struct pthread *, struct pthread *); void _thr_gc(struct pthread *); void _thr_panic_exit(char *, int, char *); void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_printf(int, const char *, ...); void _thr_sched_switch(struct pthread *); void _thr_sched_switch_unlocked(struct pthread *); void _thr_set_timeout(const struct timespec *); void _thr_seterrno(struct pthread *, int); void _thr_sig_handler(int, siginfo_t *, ucontext_t *); void _thr_sig_check_pending(struct pthread *); void _thr_sig_rundown(struct pthread *, ucontext_t *); void _thr_sig_send(struct pthread *pthread, int sig); void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf); void _thr_spinlock_init(void); void _thr_cancel_enter(struct pthread *); void _thr_cancel_leave(struct pthread *, int); int _thr_setconcurrency(int new_level); int _thr_setmaxconcurrency(void); void _thr_critical_enter(struct pthread *); void _thr_critical_leave(struct pthread *); int _thr_start_sig_daemon(void); int _thr_getprocsig(int sig, siginfo_t *siginfo); int _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo); void _thr_signal_init(void); void _thr_signal_deinit(void); void _thr_hash_add(struct pthread *); void _thr_hash_remove(struct pthread *); struct pthread *_thr_hash_find(struct pthread *); void _thr_finish_cancellation(void *arg); int _thr_sigonstack(void *sp); void _thr_debug_check_yield(struct pthread *); /* * Aliases for _pthread functions. Should be called instead of * originals if PLT replocation is unwanted at runtme. */ int _thr_cond_broadcast(pthread_cond_t *); int _thr_cond_signal(pthread_cond_t *); int _thr_cond_wait(pthread_cond_t *, pthread_mutex_t *); int _thr_mutex_lock(pthread_mutex_t *); int _thr_mutex_unlock(pthread_mutex_t *); int _thr_rwlock_rdlock (pthread_rwlock_t *); int _thr_rwlock_wrlock (pthread_rwlock_t *); int _thr_rwlock_unlock (pthread_rwlock_t *); /* #include */ #ifdef _SYS_AIO_H_ int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); #endif /* #include */ #ifdef _SYS_FCNTL_H_ int __sys_fcntl(int, int, ...); int __sys_open(const char *, int, ...); #endif /* #include */ #ifdef _SYS_IOCTL_H_ int __sys_ioctl(int, unsigned long, ...); #endif /* #inclde */ #ifdef _SCHED_H_ int __sys_sched_yield(void); #endif /* #include */ #ifdef _SIGNAL_H_ int __sys_kill(pid_t, int); int __sys_sigaction(int, const struct sigaction *, struct sigaction *); int __sys_sigpending(sigset_t *); int __sys_sigprocmask(int, const sigset_t *, sigset_t *); int __sys_sigsuspend(const sigset_t *); int __sys_sigreturn(ucontext_t *); int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); #endif /* #include */ #ifdef _SYS_SOCKET_H_ int __sys_accept(int, struct sockaddr *, socklen_t *); int __sys_connect(int, const struct sockaddr *, socklen_t); int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int); #endif /* #include */ #ifdef _SYS_UIO_H_ ssize_t __sys_readv(int, const struct iovec *, int); ssize_t __sys_writev(int, const struct iovec *, int); #endif /* #include */ #ifdef _TIME_H_ int __sys_nanosleep(const struct timespec *, struct timespec *); #endif /* #include */ #ifdef _UNISTD_H_ int __sys_close(int); int __sys_execve(const char *, char * const *, char * const *); int __sys_fork(void); int __sys_fsync(int); pid_t __sys_getpid(void); int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); ssize_t __sys_read(int, void *, size_t); ssize_t __sys_write(int, const void *, size_t); void __sys_exit(int); int __sys_sigwait(const sigset_t *, int *); int __sys_sigtimedwait(sigset_t *, siginfo_t *, struct timespec *); #endif /* #include */ #ifdef _SYS_POLL_H_ int __sys_poll(struct pollfd *, unsigned, int); #endif /* #include */ #ifdef _SYS_MMAN_H_ int __sys_msync(void *, size_t, int); #endif #endif /* !_THR_PRIVATE_H */ diff --git a/lib/libkse/thread/thr_sig.c b/lib/libkse/thread/thr_sig.c index ec6ebd51eadd..f53b87f08b17 100644 --- a/lib/libkse/thread/thr_sig.c +++ b/lib/libkse/thread/thr_sig.c @@ -1,1250 +1,1259 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include "thr_private.h" /* Prototypes: */ static inline void build_siginfo(siginfo_t *info, int signo); #ifndef SYSTEM_SCOPE_ONLY static struct pthread *thr_sig_find(struct kse *curkse, int sig, siginfo_t *info); #endif static inline void thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf); static inline void thr_sigframe_save(struct pthread *thread, struct pthread_sigframe *psf); #define SA_KILL 0x01 /* terminates process by default */ #define SA_STOP 0x02 #define SA_CONT 0x04 static int sigproptbl[NSIG] = { SA_KILL, /* SIGHUP */ SA_KILL, /* SIGINT */ SA_KILL, /* SIGQUIT */ SA_KILL, /* SIGILL */ SA_KILL, /* SIGTRAP */ SA_KILL, /* SIGABRT */ SA_KILL, /* SIGEMT */ SA_KILL, /* SIGFPE */ SA_KILL, /* SIGKILL */ SA_KILL, /* SIGBUS */ SA_KILL, /* SIGSEGV */ SA_KILL, /* SIGSYS */ SA_KILL, /* SIGPIPE */ SA_KILL, /* SIGALRM */ SA_KILL, /* SIGTERM */ 0, /* SIGURG */ SA_STOP, /* SIGSTOP */ SA_STOP, /* SIGTSTP */ SA_CONT, /* SIGCONT */ 0, /* SIGCHLD */ SA_STOP, /* SIGTTIN */ SA_STOP, /* SIGTTOU */ 0, /* SIGIO */ SA_KILL, /* SIGXCPU */ SA_KILL, /* SIGXFSZ */ SA_KILL, /* SIGVTALRM */ SA_KILL, /* SIGPROF */ 0, /* SIGWINCH */ 0, /* SIGINFO */ SA_KILL, /* SIGUSR1 */ SA_KILL /* SIGUSR2 */ }; /* #define DEBUG_SIGNAL */ #ifdef DEBUG_SIGNAL #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif +static __inline int +_thr_dump_enabled(void) +{ + return ((_thr_debug_flags & DBG_INFO_DUMP) != 0); +} + /* * Signal setup and delivery. * * 1) Delivering signals to threads in the same KSE. * These signals are sent by upcall events and are set in the * km_sigscaught field of the KSE mailbox. Since these signals * are received while operating on the KSE stack, they can be * delivered either by using signalcontext() to add a stack frame * to the target thread's stack, or by adding them in the thread's * pending set and having the thread run them down after it * 2) Delivering signals to threads in other KSEs/KSEGs. * 3) Delivering signals to threads in critical regions. * 4) Delivering signals to threads after they change their signal masks. * * Methods of delivering signals. * * 1) Add a signal frame to the thread's saved context. * 2) Add the signal to the thread structure, mark the thread as * having signals to handle, and let the thread run them down * after it resumes from the KSE scheduler. * * Problem with 1). You can't do this to a running thread or a * thread in a critical region. * * Problem with 2). You can't do this to a thread that doesn't * yield in some way (explicitly enters the scheduler). A thread * blocked in the kernel or a CPU hungry thread will not see the * signal without entering the scheduler. * * The solution is to use both 1) and 2) to deliver signals: * * o Thread in critical region - use 2). When the thread * leaves the critical region it will check to see if it * has pending signals and run them down. * * o Thread enters scheduler explicitly - use 2). The thread * can check for pending signals after it returns from the * the scheduler. * * o Thread is running and not current thread - use 2). When the * thread hits a condition specified by one of the other bullets, * the signal will be delivered. * * o Thread is running and is current thread (e.g., the thread * has just changed its signal mask and now sees that it has * pending signals) - just run down the pending signals. * * o Thread is swapped out due to quantum expiration - use 1) * * o Thread is blocked in kernel - kse_thr_wakeup() and then * use 1) */ /* * Rules for selecting threads for signals received: * * 1) If the signal is a sychronous signal, it is delivered to * the generating (current thread). If the thread has the * signal masked, it is added to the threads pending signal * set until the thread unmasks it. * * 2) A thread in sigwait() where the signal is in the thread's * waitset. * * 3) A thread in sigsuspend() where the signal is not in the * thread's suspended signal mask. * * 4) Any thread (first found/easiest to deliver) that has the * signal unmasked. */ #ifndef SYSTEM_SCOPE_ONLY static void * sig_daemon(void *arg /* Unused */) { int i; kse_critical_t crit; struct timespec ts; sigset_t set; struct kse *curkse; struct pthread *curthread = _get_curthread(); DBG_MSG("signal daemon started(%p)\n", curthread); curthread->name = strdup("signal thread"); crit = _kse_critical_enter(); curkse = _get_curkse(); /* * Daemon thread is a bound thread and we must be created with * all signals masked */ #if 0 SIGFILLSET(set); __sys_sigprocmask(SIG_SETMASK, &set, NULL); #endif __sys_sigpending(&set); ts.tv_sec = 0; ts.tv_nsec = 0; while (1) { KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock); _thr_proc_sigpending = set; KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); for (i = 1; i <= _SIG_MAXSIG; i++) { if (SIGISMEMBER(set, i) != 0) _thr_sig_dispatch(curkse, i, NULL /* no siginfo */); } ts.tv_sec = 30; ts.tv_nsec = 0; curkse->k_kcb->kcb_kmbx.km_flags = KMF_NOUPCALL | KMF_NOCOMPLETED | KMF_WAITSIGEVENT; kse_release(&ts); curkse->k_kcb->kcb_kmbx.km_flags = 0; set = curkse->k_kcb->kcb_kmbx.km_sigscaught; } return (0); } /* Utility function to create signal daemon thread */ int _thr_start_sig_daemon(void) { pthread_attr_t attr; sigset_t sigset, oldset; SIGFILLSET(sigset); pthread_sigmask(SIG_SETMASK, &sigset, &oldset); pthread_attr_init(&attr); pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM); attr->flags |= THR_SIGNAL_THREAD; /* sigmask will be inherited */ if (pthread_create(&_thr_sig_daemon, &attr, sig_daemon, NULL)) PANIC("can not create signal daemon thread!\n"); pthread_attr_destroy(&attr); pthread_sigmask(SIG_SETMASK, &oldset, NULL); return (0); } /* * This signal handler only delivers asynchronous signals. * This must be called with upcalls disabled and without * holding any locks. */ void _thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info) { struct kse_mailbox *kmbx; struct pthread *thread; DBG_MSG(">>> _thr_sig_dispatch(%d)\n", sig); /* Check if the signal requires a dump of thread information: */ - if (sig == SIGINFO) { + if (_thr_dump_enabled() && (sig == SIGINFO)) { /* Dump thread information to file: */ _thread_dump_info(); } while ((thread = thr_sig_find(curkse, sig, info)) != NULL) { /* * Setup the target thread to receive the signal: */ DBG_MSG("Got signal %d, selecting thread %p\n", sig, thread); KSE_SCHED_LOCK(curkse, thread->kseg); if ((thread->state == PS_DEAD) || (thread->state == PS_DEADLOCK) || THR_IS_EXITING(thread) || THR_IS_SUSPENDED(thread)) { KSE_SCHED_UNLOCK(curkse, thread->kseg); _thr_ref_delete(NULL, thread); } else if (SIGISMEMBER(thread->sigmask, sig)) { KSE_SCHED_UNLOCK(curkse, thread->kseg); _thr_ref_delete(NULL, thread); } else { kmbx = _thr_sig_add(thread, sig, info); KSE_SCHED_UNLOCK(curkse, thread->kseg); _thr_ref_delete(NULL, thread); if (kmbx != NULL) kse_wakeup(kmbx); break; } } DBG_MSG("<<< _thr_sig_dispatch\n"); } #endif /* ! SYSTEM_SCOPE_ONLY */ static __inline int sigprop(int sig) { if (sig > 0 && sig < NSIG) return (sigproptbl[_SIG_IDX(sig)]); return (0); } typedef void (*ohandler)(int sig, int code, struct sigcontext *scp, char *addr, __sighandler_t *catcher); void _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) { struct pthread_sigframe psf; __siginfohandler_t *sigfunc; struct pthread *curthread; struct kse *curkse; struct sigaction act; int sa_flags, err_save; err_save = errno; DBG_MSG(">>> _thr_sig_handler(%d)\n", sig); curthread = _get_curthread(); if (curthread == NULL) PANIC("No current thread.\n"); if (!(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)) PANIC("Thread is not system scope.\n"); if (curthread->flags & THR_FLAGS_EXITING) { errno = err_save; return; } curkse = _get_curkse(); /* * If thread is in critical region or if thread is on * the way of state transition, then latch signal into buffer. */ if (_kse_in_critical() || THR_IN_CRITICAL(curthread) || curthread->state != PS_RUNNING) { DBG_MSG(">>> _thr_sig_handler(%d) in critical\n", sig); curthread->siginfo[sig-1] = *info; curthread->check_pending = 1; curkse->k_sigseqno++; SIGADDSET(curthread->sigpend, sig); /* * If the kse is on the way to idle itself, but * we have signal ready, we should prevent it * to sleep, kernel will latch the wakeup request, * so kse_release will return from kernel immediately. */ if (KSE_IS_IDLE(curkse)) kse_wakeup(&curkse->k_kcb->kcb_kmbx); errno = err_save; return; } /* Check if the signal requires a dump of thread information: */ - if (sig == SIGINFO) { + if (_thr_dump_enabled() && (sig == SIGINFO)) { /* Dump thread information to file: */ _thread_dump_info(); } /* Check the threads previous state: */ curthread->critical_count++; if (curthread->sigbackout != NULL) curthread->sigbackout((void *)curthread); curthread->critical_count--; thr_sigframe_save(curthread, &psf); THR_ASSERT(!(curthread->sigbackout), "sigbackout was not cleared."); _kse_critical_enter(); /* Get a fresh copy of signal mask */ __sys_sigprocmask(SIG_BLOCK, NULL, &curthread->sigmask); KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock); sigfunc = _thread_sigact[sig - 1].sa_sigaction; sa_flags = _thread_sigact[sig - 1].sa_flags; if (sa_flags & SA_RESETHAND) { act.sa_handler = SIG_DFL; act.sa_flags = SA_RESTART; SIGEMPTYSET(act.sa_mask); __sys_sigaction(sig, &act, NULL); __sys_sigaction(sig, NULL, &_thread_sigact[sig - 1]); } KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); _kse_critical_leave(&curthread->tcb->tcb_tmbx); /* Now invoke real handler */ if (((__sighandler_t *)sigfunc != SIG_DFL) && ((__sighandler_t *)sigfunc != SIG_IGN) && (sigfunc != (__siginfohandler_t *)_thr_sig_handler)) { if ((sa_flags & SA_SIGINFO) != 0 || info == NULL) (*(sigfunc))(sig, info, ucp); else { ((ohandler)(*sigfunc))( sig, info->si_code, (struct sigcontext *)ucp, info->si_addr, (__sighandler_t *)sigfunc); } } else { if ((__sighandler_t *)sigfunc == SIG_DFL) { if (sigprop(sig) & SA_KILL) { if (_kse_isthreaded()) kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig); else kill(getpid(), sig); } #ifdef NOTYET else if (sigprop(sig) & SA_STOP) kse_thr_interrupt(NULL, KSE_INTR_JOBSTOP, sig); #endif } } _kse_critical_enter(); curthread->sigmask = ucp->uc_sigmask; SIG_CANTMASK(curthread->sigmask); _kse_critical_leave(&curthread->tcb->tcb_tmbx); thr_sigframe_restore(curthread, &psf); DBG_MSG("<<< _thr_sig_handler(%d)\n", sig); errno = err_save; } struct sighandle_info { __siginfohandler_t *sigfunc; int sa_flags; int sig; siginfo_t *info; ucontext_t *ucp; }; static void handle_signal(struct pthread *curthread, struct sighandle_info *shi); static void handle_signal_altstack(struct pthread *curthread, struct sighandle_info *shi); /* Must be called with signal lock and schedule lock held in order */ static void thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info, ucontext_t *ucp) { __siginfohandler_t *sigfunc; sigset_t sigmask; int sa_flags; int onstack; struct sigaction act; struct kse *curkse; struct sighandle_info shi; /* * Invoke the signal handler without going through the scheduler: */ DBG_MSG("Got signal %d, calling handler for current thread %p\n", sig, curthread); if (!_kse_in_critical()) PANIC("thr_sig_invoke_handler without in critical\n"); curkse = curthread->kse; /* * Check that a custom handler is installed and if * the signal is not blocked: */ sigfunc = _thread_sigact[sig - 1].sa_sigaction; sa_flags = _thread_sigact[sig - 1].sa_flags; sigmask = curthread->sigmask; SIGSETOR(curthread->sigmask, _thread_sigact[sig - 1].sa_mask); if (!(sa_flags & (SA_NODEFER | SA_RESETHAND))) SIGADDSET(curthread->sigmask, sig); if ((sig != SIGILL) && (sa_flags & SA_RESETHAND)) { act.sa_handler = SIG_DFL; act.sa_flags = SA_RESTART; SIGEMPTYSET(act.sa_mask); __sys_sigaction(sig, &act, NULL); __sys_sigaction(sig, NULL, &_thread_sigact[sig - 1]); } KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); /* * We are processing buffered signals, synchronize working * signal mask into kernel. */ if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL); onstack = _thr_sigonstack(&sigfunc); ucp->uc_stack = curthread->sigstk; ucp->uc_stack.ss_flags = (curthread->sigstk.ss_flags & SS_DISABLE) ? SS_DISABLE : ((onstack) ? SS_ONSTACK : 0); if (curthread->oldsigmask) { ucp->uc_sigmask = *(curthread->oldsigmask); curthread->oldsigmask = NULL; } else ucp->uc_sigmask = sigmask; shi.sigfunc = sigfunc; shi.sig = sig; shi.sa_flags = sa_flags; shi.info = info; shi.ucp = ucp; if ((curthread->sigstk.ss_flags & SS_DISABLE) == 0) { /* Deliver signal on alternative stack */ if (sa_flags & SA_ONSTACK && !onstack) handle_signal_altstack(curthread, &shi); else handle_signal(curthread, &shi); } else { handle_signal(curthread, &shi); } _kse_critical_enter(); /* Don't trust after critical leave/enter */ curkse = curthread->kse; /* * Restore the thread's signal mask. */ curthread->sigmask = ucp->uc_sigmask; SIG_CANTMASK(curthread->sigmask); if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) __sys_sigprocmask(SIG_SETMASK, &ucp->uc_sigmask, NULL); KSE_SCHED_LOCK(curkse, curkse->k_kseg); KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock); DBG_MSG("Got signal %d, handler returned %p\n", sig, curthread); } static void handle_signal(struct pthread *curthread, struct sighandle_info *shi) { _kse_critical_leave(&curthread->tcb->tcb_tmbx); /* Check if the signal requires a dump of thread information: */ - if (shi->sig == SIGINFO) { + if (_thr_dump_enabled() && (shi->sig == SIGINFO)) { /* Dump thread information to file: */ _thread_dump_info(); } if (((__sighandler_t *)shi->sigfunc != SIG_DFL) && ((__sighandler_t *)shi->sigfunc != SIG_IGN)) { if ((shi->sa_flags & SA_SIGINFO) != 0 || shi->info == NULL) (*(shi->sigfunc))(shi->sig, shi->info, shi->ucp); else { ((ohandler)(*shi->sigfunc))( shi->sig, shi->info->si_code, (struct sigcontext *)shi->ucp, shi->info->si_addr, (__sighandler_t *)shi->sigfunc); } } else { if ((__sighandler_t *)shi->sigfunc == SIG_DFL) { if (sigprop(shi->sig) & SA_KILL) { if (_kse_isthreaded()) kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, shi->sig); else kill(getpid(), shi->sig); } #ifdef NOTYET else if (sigprop(shi->sig) & SA_STOP) kse_thr_interrupt(NULL, KSE_INTR_JOBSTOP, shi->sig); #endif } } } static void handle_signal_wrapper(struct pthread *curthread, ucontext_t *ret_uc, struct sighandle_info *shi) { shi->ucp->uc_stack.ss_flags = SS_ONSTACK; handle_signal(curthread, shi); if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) setcontext(ret_uc); else { /* Work around for ia64, THR_SETCONTEXT does not work */ _kse_critical_enter(); curthread->tcb->tcb_tmbx.tm_context = *ret_uc; _thread_switch(curthread->kse->k_kcb, curthread->tcb, 1); /* THR_SETCONTEXT */ } } /* * Jump to stack set by sigaltstack before invoking signal handler */ static void handle_signal_altstack(struct pthread *curthread, struct sighandle_info *shi) { volatile int once; ucontext_t uc1, *uc2; THR_ASSERT(_kse_in_critical(), "Not in critical"); once = 0; THR_GETCONTEXT(&uc1); if (once == 0) { once = 1; /* XXX * We are still in critical region, it is safe to operate thread * context */ uc2 = &curthread->tcb->tcb_tmbx.tm_context; uc2->uc_stack = curthread->sigstk; makecontext(uc2, (void (*)(void))handle_signal_wrapper, 3, curthread, &uc1, shi); if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) setcontext(uc2); else { _thread_switch(curthread->kse->k_kcb, curthread->tcb, 1); /* THR_SETCONTEXT(uc2); */ } } } int _thr_getprocsig(int sig, siginfo_t *siginfo) { kse_critical_t crit; struct kse *curkse; int ret; DBG_MSG(">>> _thr_getprocsig\n"); crit = _kse_critical_enter(); curkse = _get_curkse(); KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock); ret = _thr_getprocsig_unlocked(sig, siginfo); KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); _kse_critical_leave(crit); DBG_MSG("<<< _thr_getprocsig\n"); return (ret); } int _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo) { sigset_t sigset; struct timespec ts; /* try to retrieve signal from kernel */ SIGEMPTYSET(sigset); SIGADDSET(sigset, sig); ts.tv_sec = 0; ts.tv_nsec = 0; SIGDELSET(_thr_proc_sigpending, sig); if (__sys_sigtimedwait(&sigset, siginfo, &ts) > 0) return (sig); return (0); } #ifndef SYSTEM_SCOPE_ONLY /* * Find a thread that can handle the signal. This must be called * with upcalls disabled. */ struct pthread * thr_sig_find(struct kse *curkse, int sig, siginfo_t *info) { struct kse_mailbox *kmbx = NULL; struct pthread *pthread; struct pthread *suspended_thread, *signaled_thread; __siginfohandler_t *sigfunc; siginfo_t si; DBG_MSG("Looking for thread to handle signal %d\n", sig); /* * Enter a loop to look for threads that have the signal * unmasked. POSIX specifies that a thread in a sigwait * will get the signal over any other threads. Second * preference will be threads in in a sigsuspend. Third * preference will be the current thread. If none of the * above, then the signal is delivered to the first thread * that is found. Note that if a custom handler is not * installed, the signal only affects threads in sigwait. */ suspended_thread = NULL; signaled_thread = NULL; KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); TAILQ_FOREACH(pthread, &_thread_list, tle) { if (pthread == _thr_sig_daemon) continue; /* Signal delivering to bound thread is done by kernel */ if (pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) continue; /* Take the scheduling lock. */ KSE_SCHED_LOCK(curkse, pthread->kseg); if ((pthread->state == PS_DEAD) || (pthread->state == PS_DEADLOCK) || THR_IS_EXITING(pthread) || THR_IS_SUSPENDED(pthread)) { ; /* Skip this thread. */ } else if (pthread->state == PS_SIGWAIT && SIGISMEMBER(*(pthread->data.sigwait->waitset), sig)) { /* * retrieve signal from kernel, if it is job control * signal, and sigaction is SIG_DFL, then we will * be stopped in kernel, we hold lock here, but that * does not matter, because that's job control, and * whole process should be stopped. */ if (_thr_getprocsig(sig, &si)) { DBG_MSG("Waking thread %p in sigwait" " with signal %d\n", pthread, sig); /* where to put siginfo ? */ *(pthread->data.sigwait->siginfo) = si; kmbx = _thr_setrunnable_unlocked(pthread); } KSE_SCHED_UNLOCK(curkse, pthread->kseg); /* * POSIX doesn't doesn't specify which thread * will get the signal if there are multiple * waiters, so we give it to the first thread * we find. * * Do not attempt to deliver this signal * to other threads and do not add the signal * to the process pending set. */ KSE_LOCK_RELEASE(curkse, &_thread_list_lock); if (kmbx != NULL) kse_wakeup(kmbx); if (suspended_thread != NULL) _thr_ref_delete(NULL, suspended_thread); if (signaled_thread != NULL) _thr_ref_delete(NULL, signaled_thread); return (NULL); } else if (!SIGISMEMBER(pthread->sigmask, sig)) { /* * If debugger is running, we don't quick exit, * and give it a chance to check the signal. */ if (_libkse_debug == 0) { sigfunc = _thread_sigact[sig - 1].sa_sigaction; if ((__sighandler_t *)sigfunc == SIG_DFL) { if (sigprop(sig) & SA_KILL) { kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig); /* Never reach */ } } } if (pthread->state == PS_SIGSUSPEND) { if (suspended_thread == NULL) { suspended_thread = pthread; suspended_thread->refcount++; } } else if (signaled_thread == NULL) { signaled_thread = pthread; signaled_thread->refcount++; } } KSE_SCHED_UNLOCK(curkse, pthread->kseg); } KSE_LOCK_RELEASE(curkse, &_thread_list_lock); if (suspended_thread != NULL) { pthread = suspended_thread; if (signaled_thread) _thr_ref_delete(NULL, signaled_thread); } else if (signaled_thread) { pthread = signaled_thread; } else { pthread = NULL; } return (pthread); } #endif /* ! SYSTEM_SCOPE_ONLY */ static inline void build_siginfo(siginfo_t *info, int signo) { bzero(info, sizeof(*info)); info->si_signo = signo; info->si_pid = _thr_pid; } /* * This is called by a thread when it has pending signals to deliver. * It should only be called from the context of the thread. */ void _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp) { struct pthread_sigframe psf; siginfo_t siginfo; int i, err_save; kse_critical_t crit; struct kse *curkse; sigset_t sigmask; err_save = errno; DBG_MSG(">>> thr_sig_rundown (%p)\n", curthread); /* Check the threads previous state: */ curthread->critical_count++; if (curthread->sigbackout != NULL) curthread->sigbackout((void *)curthread); curthread->critical_count--; THR_ASSERT(!(curthread->sigbackout), "sigbackout was not cleared."); THR_ASSERT((curthread->state == PS_RUNNING), "state is not PS_RUNNING"); thr_sigframe_save(curthread, &psf); /* * Lower the priority before calling the handler in case * it never returns (longjmps back): */ crit = _kse_critical_enter(); curkse = curthread->kse; KSE_SCHED_LOCK(curkse, curkse->k_kseg); KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock); curthread->active_priority &= ~THR_SIGNAL_PRIORITY; SIGFILLSET(sigmask); while (1) { /* * For bound thread, we mask all signals and get a fresh * copy of signal mask from kernel */ if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) { __sys_sigprocmask(SIG_SETMASK, &sigmask, &curthread->sigmask); } for (i = 1; i <= _SIG_MAXSIG; i++) { if (SIGISMEMBER(curthread->sigmask, i)) continue; if (SIGISMEMBER(curthread->sigpend, i)) { SIGDELSET(curthread->sigpend, i); siginfo = curthread->siginfo[i-1]; break; } if (!(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) && SIGISMEMBER(_thr_proc_sigpending, i)) { if (_thr_getprocsig_unlocked(i, &siginfo)) break; } } if (i <= _SIG_MAXSIG) thr_sig_invoke_handler(curthread, i, &siginfo, ucp); else { if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) { __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL); } break; } } /* Don't trust after signal handling */ curkse = curthread->kse; KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); _kse_critical_leave(&curthread->tcb->tcb_tmbx); /* repost masked signal to kernel, it hardly happens in real world */ if ((curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) && !SIGISEMPTY(curthread->sigpend)) { /* dirty read */ __sys_sigprocmask(SIG_SETMASK, &sigmask, &curthread->sigmask); for (i = 1; i <= _SIG_MAXSIG; ++i) { if (SIGISMEMBER(curthread->sigpend, i)) { SIGDELSET(curthread->sigpend, i); if (!_kse_isthreaded()) kill(getpid(), i); else kse_thr_interrupt( &curthread->tcb->tcb_tmbx, KSE_INTR_SENDSIG, i); } } __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL); } DBG_MSG("<<< thr_sig_rundown (%p)\n", curthread); thr_sigframe_restore(curthread, &psf); errno = err_save; } /* * This checks pending signals for the current thread. It should be * called whenever a thread changes its signal mask. Note that this * is called from a thread (using its stack). * * XXX - We might want to just check to see if there are pending * signals for the thread here, but enter the UTS scheduler * to actually install the signal handler(s). */ void _thr_sig_check_pending(struct pthread *curthread) { ucontext_t uc; volatile int once; int errsave; /* * If the thread is in critical region, delay processing signals. * If the thread state is not PS_RUNNING, it might be switching * into UTS and but a THR_LOCK_RELEASE saw check_pending, and it * goes here, in the case we delay processing signals, lets UTS * process complicated things, normally UTS will call _thr_sig_add * to resume the thread, so we needn't repeat doing it here. */ if (THR_IN_CRITICAL(curthread) || curthread->state != PS_RUNNING) return; errsave = errno; once = 0; THR_GETCONTEXT(&uc); if (once == 0) { once = 1; curthread->check_pending = 0; _thr_sig_rundown(curthread, &uc); } errno = errsave; } /* * Perform thread specific actions in response to a signal. * This function is only called if there is a handler installed * for the signal, and if the target thread has the signal * unmasked. * * This must be called with the thread's scheduling lock held. */ struct kse_mailbox * _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info) { siginfo_t siginfo; struct kse *curkse; struct kse_mailbox *kmbx = NULL; struct pthread *curthread = _get_curthread(); int restart; int suppress_handler = 0; int fromproc = 0; __sighandler_t *sigfunc; DBG_MSG(">>> _thr_sig_add %p (%d)\n", pthread, sig); curkse = _get_curkse(); restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; sigfunc = _thread_sigact[sig - 1].sa_handler; fromproc = (curthread == _thr_sig_daemon); if (pthread->state == PS_DEAD || pthread->state == PS_DEADLOCK || pthread->state == PS_STATE_MAX) return (NULL); /* return false */ if ((pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) && (curthread != pthread)) { PANIC("Please use _thr_send_sig for bound thread"); return (NULL); } if (pthread->state != PS_SIGWAIT && SIGISMEMBER(pthread->sigmask, sig)) { /* signal is masked, just add signal to thread. */ if (!fromproc) { SIGADDSET(pthread->sigpend, sig); if (info == NULL) build_siginfo(&pthread->siginfo[sig-1], sig); else if (info != &pthread->siginfo[sig-1]) memcpy(&pthread->siginfo[sig-1], info, sizeof(*info)); } else { if (!_thr_getprocsig(sig, &pthread->siginfo[sig-1])) return (NULL); SIGADDSET(pthread->sigpend, sig); } } else { /* if process signal not exists, just return */ if (fromproc) { if (!_thr_getprocsig(sig, &siginfo)) return (NULL); info = &siginfo; } if (pthread->state != PS_SIGWAIT && sigfunc == SIG_DFL && (sigprop(sig) & SA_KILL)) { kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig); /* Never reach */ } /* * Process according to thread state: */ switch (pthread->state) { case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: return (NULL); /* XXX return false */ case PS_LOCKWAIT: case PS_SUSPENDED: /* * You can't call a signal handler for threads in these * states. */ suppress_handler = 1; break; case PS_RUNNING: if ((pthread->flags & THR_FLAGS_IN_RUNQ)) { THR_RUNQ_REMOVE(pthread); pthread->active_priority |= THR_SIGNAL_PRIORITY; THR_RUNQ_INSERT_TAIL(pthread); } else { /* Possible not in RUNQ and has curframe ? */ pthread->active_priority |= THR_SIGNAL_PRIORITY; } break; /* * States which cannot be interrupted but still require the * signal handler to run: */ case PS_COND_WAIT: case PS_MUTEX_WAIT: break; case PS_SLEEP_WAIT: /* * Unmasked signals always cause sleep to terminate * early regardless of SA_RESTART: */ pthread->interrupted = 1; break; case PS_JOIN: break; case PS_SIGSUSPEND: pthread->interrupted = 1; break; case PS_SIGWAIT: if (info == NULL) build_siginfo(&pthread->siginfo[sig-1], sig); else if (info != &pthread->siginfo[sig-1]) memcpy(&pthread->siginfo[sig-1], info, sizeof(*info)); /* * The signal handler is not called for threads in * SIGWAIT. */ suppress_handler = 1; /* Wake up the thread if the signal is not blocked. */ if (SIGISMEMBER(*(pthread->data.sigwait->waitset), sig)) { /* Return the signal number: */ *(pthread->data.sigwait->siginfo) = pthread->siginfo[sig-1]; /* Make the thread runnable: */ kmbx = _thr_setrunnable_unlocked(pthread); } else { /* Increment the pending signal count. */ SIGADDSET(pthread->sigpend, sig); if (!SIGISMEMBER(pthread->sigmask, sig)) { if (sigfunc == SIG_DFL && sigprop(sig) & SA_KILL) { kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig); /* Never reach */ } pthread->check_pending = 1; pthread->interrupted = 1; kmbx = _thr_setrunnable_unlocked(pthread); } } return (kmbx); } SIGADDSET(pthread->sigpend, sig); if (info == NULL) build_siginfo(&pthread->siginfo[sig-1], sig); else if (info != &pthread->siginfo[sig-1]) memcpy(&pthread->siginfo[sig-1], info, sizeof(*info)); pthread->check_pending = 1; if (!(pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) && (pthread->blocked != 0) && !THR_IN_CRITICAL(pthread)) kse_thr_interrupt(&pthread->tcb->tcb_tmbx, restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0); if (suppress_handler == 0) { /* * Setup a signal frame and save the current threads * state: */ if (pthread->state != PS_RUNNING) { if (pthread->flags & THR_FLAGS_IN_RUNQ) THR_RUNQ_REMOVE(pthread); pthread->active_priority |= THR_SIGNAL_PRIORITY; kmbx = _thr_setrunnable_unlocked(pthread); } } } return (kmbx); } /* * Send a signal to a specific thread (ala pthread_kill): */ void _thr_sig_send(struct pthread *pthread, int sig) { struct pthread *curthread = _get_curthread(); struct kse_mailbox *kmbx; if (pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) { kse_thr_interrupt(&pthread->tcb->tcb_tmbx, KSE_INTR_SENDSIG, sig); return; } /* Lock the scheduling queue of the target thread. */ THR_SCHED_LOCK(curthread, pthread); if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) { kmbx = _thr_sig_add(pthread, sig, NULL); /* Add a preemption point. */ if (kmbx == NULL && (curthread->kseg == pthread->kseg) && (pthread->active_priority > curthread->active_priority)) curthread->critical_yield = 1; THR_SCHED_UNLOCK(curthread, pthread); if (kmbx != NULL) kse_wakeup(kmbx); /* XXX * If thread sent signal to itself, check signals now. * It is not really needed, _kse_critical_leave should * have already checked signals. */ if (pthread == curthread && curthread->check_pending) _thr_sig_check_pending(curthread); } else { THR_SCHED_UNLOCK(curthread, pthread); } } static inline void thr_sigframe_restore(struct pthread *curthread, struct pthread_sigframe *psf) { kse_critical_t crit; struct kse *curkse; THR_THREAD_LOCK(curthread, curthread); curthread->cancelflags = psf->psf_cancelflags; crit = _kse_critical_enter(); curkse = curthread->kse; KSE_SCHED_LOCK(curkse, curthread->kseg); curthread->flags = psf->psf_flags; curthread->interrupted = psf->psf_interrupted; curthread->timeout = psf->psf_timeout; curthread->data = psf->psf_wait_data; curthread->wakeup_time = psf->psf_wakeup_time; curthread->continuation = psf->psf_continuation; KSE_SCHED_UNLOCK(curkse, curthread->kseg); _kse_critical_leave(crit); THR_THREAD_UNLOCK(curthread, curthread); } static inline void thr_sigframe_save(struct pthread *curthread, struct pthread_sigframe *psf) { kse_critical_t crit; struct kse *curkse; THR_THREAD_LOCK(curthread, curthread); psf->psf_cancelflags = curthread->cancelflags; crit = _kse_critical_enter(); curkse = curthread->kse; KSE_SCHED_LOCK(curkse, curthread->kseg); /* This has to initialize all members of the sigframe. */ psf->psf_flags = (curthread->flags & (THR_FLAGS_PRIVATE | THR_FLAGS_EXITING)); psf->psf_interrupted = curthread->interrupted; psf->psf_timeout = curthread->timeout; psf->psf_wait_data = curthread->data; psf->psf_wakeup_time = curthread->wakeup_time; psf->psf_continuation = curthread->continuation; KSE_SCHED_UNLOCK(curkse, curthread->kseg); _kse_critical_leave(crit); THR_THREAD_UNLOCK(curthread, curthread); } void _thr_signal_init(void) { struct sigaction act; __siginfohandler_t *sigfunc; int i; sigset_t sigset; SIGFILLSET(sigset); __sys_sigprocmask(SIG_SETMASK, &sigset, &_thr_initial->sigmask); /* Enter a loop to get the existing signal status: */ for (i = 1; i <= _SIG_MAXSIG; i++) { /* Get the signal handler details: */ if (__sys_sigaction(i, NULL, &_thread_sigact[i - 1]) != 0) { /* * Abort this process if signal * initialisation fails: */ PANIC("Cannot read signal handler info"); } /* Intall wrapper if handler was set */ sigfunc = _thread_sigact[i - 1].sa_sigaction; if (((__sighandler_t *)sigfunc) != SIG_DFL && ((__sighandler_t *)sigfunc) != SIG_IGN) { act = _thread_sigact[i - 1]; act.sa_flags |= SA_SIGINFO; act.sa_sigaction = (__siginfohandler_t *)_thr_sig_handler; __sys_sigaction(i, &act, NULL); } } - /* - * Install the signal handler for SIGINFO. It isn't - * really needed, but it is nice to have for debugging - * purposes. - */ - _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO | SA_RESTART; - SIGEMPTYSET(act.sa_mask); - act.sa_flags = SA_SIGINFO | SA_RESTART; - act.sa_sigaction = (__siginfohandler_t *)&_thr_sig_handler; - if (__sys_sigaction(SIGINFO, &act, NULL) != 0) { - __sys_sigprocmask(SIG_SETMASK, &_thr_initial->sigmask, NULL); + if (_thr_dump_enabled()) { /* - * Abort this process if signal initialisation fails: + * Install the signal handler for SIGINFO. It isn't + * really needed, but it is nice to have for debugging + * purposes. */ - PANIC("Cannot initialize signal handler"); + _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO | SA_RESTART; + SIGEMPTYSET(act.sa_mask); + act.sa_flags = SA_SIGINFO | SA_RESTART; + act.sa_sigaction = (__siginfohandler_t *)&_thr_sig_handler; + if (__sys_sigaction(SIGINFO, &act, NULL) != 0) { + __sys_sigprocmask(SIG_SETMASK, &_thr_initial->sigmask, + NULL); + /* + * Abort this process if signal initialisation fails: + */ + PANIC("Cannot initialize signal handler"); + } } __sys_sigprocmask(SIG_SETMASK, &_thr_initial->sigmask, NULL); __sys_sigaltstack(NULL, &_thr_initial->sigstk); } void _thr_signal_deinit(void) { int i; struct pthread *curthread = _get_curthread(); /* Clear process pending signals. */ sigemptyset(&_thr_proc_sigpending); /* Enter a loop to get the existing signal status: */ for (i = 1; i <= _SIG_MAXSIG; i++) { /* Check for signals which cannot be trapped: */ if (i == SIGKILL || i == SIGSTOP) { } /* Set the signal handler details: */ else if (__sys_sigaction(i, &_thread_sigact[i - 1], NULL) != 0) { /* * Abort this process if signal * initialisation fails: */ PANIC("Cannot set signal handler info"); } } __sys_sigaltstack(&curthread->sigstk, NULL); } diff --git a/lib/libpthread/thread/thr_init.c b/lib/libpthread/thread/thr_init.c index 7945ed921961..ac043367f874 100644 --- a/lib/libpthread/thread/thr_init.c +++ b/lib/libpthread/thread/thr_init.c @@ -1,526 +1,528 @@ /* * Copyright (c) 2003 Daniel M. Eischen * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* Allocate space for global thread variables here: */ #define GLOBAL_PTHREAD_PRIVATE #include "namespace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "un-namespace.h" #include "libc_private.h" #include "thr_private.h" int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *); int __pthread_mutex_lock(pthread_mutex_t *); int __pthread_mutex_trylock(pthread_mutex_t *); void _thread_init_hack(void); extern int _thread_state_running; static void init_private(void); static void init_main_thread(struct pthread *thread); /* * All weak references used within libc should be in this table. * This is so that static libraries will work. */ static void *references[] = { &_accept, &_bind, &_close, &_connect, &_dup, &_dup2, &_execve, &_fcntl, &_flock, &_flockfile, &_fstat, &_fstatfs, &_fsync, &_funlockfile, &_getdirentries, &_getlogin, &_getpeername, &_getsockname, &_getsockopt, &_ioctl, &_kevent, &_listen, &_nanosleep, &_open, &_pthread_getspecific, &_pthread_key_create, &_pthread_key_delete, &_pthread_mutex_destroy, &_pthread_mutex_init, &_pthread_mutex_lock, &_pthread_mutex_trylock, &_pthread_mutex_unlock, &_pthread_mutexattr_init, &_pthread_mutexattr_destroy, &_pthread_mutexattr_settype, &_pthread_once, &_pthread_setspecific, &_read, &_readv, &_recvfrom, &_recvmsg, &_select, &_sendmsg, &_sendto, &_setsockopt, &_sigaction, &_sigprocmask, &_sigsuspend, &_socket, &_socketpair, &_thread_init_hack, &_wait4, &_write, &_writev }; /* * These are needed when linking statically. All references within * libgcc (and in the future libc) to these routines are weak, but * if they are not (strongly) referenced by the application or other * libraries, then the actual functions will not be loaded. */ static void *libgcc_references[] = { &_pthread_once, &_pthread_key_create, &_pthread_key_delete, &_pthread_getspecific, &_pthread_setspecific, &_pthread_mutex_init, &_pthread_mutex_destroy, &_pthread_mutex_lock, &_pthread_mutex_trylock, &_pthread_mutex_unlock }; #define DUAL_ENTRY(entry) \ (pthread_func_t)entry, (pthread_func_t)entry static pthread_func_t jmp_table[][2] = { {DUAL_ENTRY(_pthread_atfork)}, /* PJT_ATFORK */ {DUAL_ENTRY(_pthread_attr_destroy)}, /* PJT_ATTR_DESTROY */ {DUAL_ENTRY(_pthread_attr_getdetachstate)}, /* PJT_ATTR_GETDETACHSTATE */ {DUAL_ENTRY(_pthread_attr_getguardsize)}, /* PJT_ATTR_GETGUARDSIZE */ {DUAL_ENTRY(_pthread_attr_getinheritsched)}, /* PJT_ATTR_GETINHERITSCHED */ {DUAL_ENTRY(_pthread_attr_getschedparam)}, /* PJT_ATTR_GETSCHEDPARAM */ {DUAL_ENTRY(_pthread_attr_getschedpolicy)}, /* PJT_ATTR_GETSCHEDPOLICY */ {DUAL_ENTRY(_pthread_attr_getscope)}, /* PJT_ATTR_GETSCOPE */ {DUAL_ENTRY(_pthread_attr_getstackaddr)}, /* PJT_ATTR_GETSTACKADDR */ {DUAL_ENTRY(_pthread_attr_getstacksize)}, /* PJT_ATTR_GETSTACKSIZE */ {DUAL_ENTRY(_pthread_attr_init)}, /* PJT_ATTR_INIT */ {DUAL_ENTRY(_pthread_attr_setdetachstate)}, /* PJT_ATTR_SETDETACHSTATE */ {DUAL_ENTRY(_pthread_attr_setguardsize)}, /* PJT_ATTR_SETGUARDSIZE */ {DUAL_ENTRY(_pthread_attr_setinheritsched)}, /* PJT_ATTR_SETINHERITSCHED */ {DUAL_ENTRY(_pthread_attr_setschedparam)}, /* PJT_ATTR_SETSCHEDPARAM */ {DUAL_ENTRY(_pthread_attr_setschedpolicy)}, /* PJT_ATTR_SETSCHEDPOLICY */ {DUAL_ENTRY(_pthread_attr_setscope)}, /* PJT_ATTR_SETSCOPE */ {DUAL_ENTRY(_pthread_attr_setstackaddr)}, /* PJT_ATTR_SETSTACKADDR */ {DUAL_ENTRY(_pthread_attr_setstacksize)}, /* PJT_ATTR_SETSTACKSIZE */ {DUAL_ENTRY(_pthread_cancel)}, /* PJT_CANCEL */ {DUAL_ENTRY(_pthread_cleanup_pop)}, /* PJT_CLEANUP_POP */ {DUAL_ENTRY(_pthread_cleanup_push)}, /* PJT_CLEANUP_PUSH */ {DUAL_ENTRY(_pthread_cond_broadcast)}, /* PJT_COND_BROADCAST */ {DUAL_ENTRY(_pthread_cond_destroy)}, /* PJT_COND_DESTROY */ {DUAL_ENTRY(_pthread_cond_init)}, /* PJT_COND_INIT */ {DUAL_ENTRY(_pthread_cond_signal)}, /* PJT_COND_SIGNAL */ {DUAL_ENTRY(_pthread_cond_timedwait)}, /* PJT_COND_TIMEDWAIT */ {(pthread_func_t)__pthread_cond_wait, (pthread_func_t)_pthread_cond_wait}, /* PJT_COND_WAIT */ {DUAL_ENTRY(_pthread_detach)}, /* PJT_DETACH */ {DUAL_ENTRY(_pthread_equal)}, /* PJT_EQUAL */ {DUAL_ENTRY(_pthread_exit)}, /* PJT_EXIT */ {DUAL_ENTRY(_pthread_getspecific)}, /* PJT_GETSPECIFIC */ {DUAL_ENTRY(_pthread_join)}, /* PJT_JOIN */ {DUAL_ENTRY(_pthread_key_create)}, /* PJT_KEY_CREATE */ {DUAL_ENTRY(_pthread_key_delete)}, /* PJT_KEY_DELETE*/ {DUAL_ENTRY(_pthread_kill)}, /* PJT_KILL */ {DUAL_ENTRY(_pthread_main_np)}, /* PJT_MAIN_NP */ {DUAL_ENTRY(_pthread_mutexattr_destroy)}, /* PJT_MUTEXATTR_DESTROY */ {DUAL_ENTRY(_pthread_mutexattr_init)}, /* PJT_MUTEXATTR_INIT */ {DUAL_ENTRY(_pthread_mutexattr_settype)}, /* PJT_MUTEXATTR_SETTYPE */ {DUAL_ENTRY(_pthread_mutex_destroy)}, /* PJT_MUTEX_DESTROY */ {DUAL_ENTRY(_pthread_mutex_init)}, /* PJT_MUTEX_INIT */ {(pthread_func_t)__pthread_mutex_lock, (pthread_func_t)_pthread_mutex_lock}, /* PJT_MUTEX_LOCK */ {(pthread_func_t)__pthread_mutex_trylock, (pthread_func_t)_pthread_mutex_trylock},/* PJT_MUTEX_TRYLOCK */ {DUAL_ENTRY(_pthread_mutex_unlock)}, /* PJT_MUTEX_UNLOCK */ {DUAL_ENTRY(_pthread_once)}, /* PJT_ONCE */ {DUAL_ENTRY(_pthread_rwlock_destroy)}, /* PJT_RWLOCK_DESTROY */ {DUAL_ENTRY(_pthread_rwlock_init)}, /* PJT_RWLOCK_INIT */ {DUAL_ENTRY(_pthread_rwlock_rdlock)}, /* PJT_RWLOCK_RDLOCK */ {DUAL_ENTRY(_pthread_rwlock_tryrdlock)},/* PJT_RWLOCK_TRYRDLOCK */ {DUAL_ENTRY(_pthread_rwlock_trywrlock)},/* PJT_RWLOCK_TRYWRLOCK */ {DUAL_ENTRY(_pthread_rwlock_unlock)}, /* PJT_RWLOCK_UNLOCK */ {DUAL_ENTRY(_pthread_rwlock_wrlock)}, /* PJT_RWLOCK_WRLOCK */ {DUAL_ENTRY(_pthread_self)}, /* PJT_SELF */ {DUAL_ENTRY(_pthread_setcancelstate)}, /* PJT_SETCANCELSTATE */ {DUAL_ENTRY(_pthread_setcanceltype)}, /* PJT_SETCANCELTYPE */ {DUAL_ENTRY(_pthread_setspecific)}, /* PJT_SETSPECIFIC */ {DUAL_ENTRY(_pthread_sigmask)}, /* PJT_SIGMASK */ {DUAL_ENTRY(_pthread_testcancel)} /* PJT_TESTCANCEL */ }; static int init_once = 0; /* * Threaded process initialization. * * This is only called under two conditions: * * 1) Some thread routines have detected that the library hasn't yet * been initialized (_thr_initial == NULL && curthread == NULL), or * * 2) An explicit call to reinitialize after a fork (indicated * by curthread != NULL) */ void _libpthread_init(struct pthread *curthread) { int fd; /* Check if this function has already been called: */ if ((_thr_initial != NULL) && (curthread == NULL)) /* Only initialize the threaded application once. */ return; /* * Make gcc quiescent about {,libgcc_}references not being * referenced: */ if ((references[0] == NULL) || (libgcc_references[0] == NULL)) PANIC("Failed loading mandatory references in _thread_init"); /* Pull debug symbols in for static binary */ _thread_state_running = PS_RUNNING; /* * Check the size of the jump table to make sure it is preset * with the correct number of entries. */ if (sizeof(jmp_table) != (sizeof(pthread_func_t) * PJT_MAX * 2)) PANIC("Thread jump table not properly initialized"); memcpy(__thr_jtable, jmp_table, sizeof(jmp_table)); /* * Check for the special case of this process running as * or in place of init as pid = 1: */ if ((_thr_pid = getpid()) == 1) { /* * Setup a new session for this process which is * assumed to be running as root. */ if (setsid() == -1) PANIC("Can't set session ID"); if (revoke(_PATH_CONSOLE) != 0) PANIC("Can't revoke console"); if ((fd = __sys_open(_PATH_CONSOLE, O_RDWR)) < 0) PANIC("Can't open console"); if (setlogin("root") == -1) PANIC("Can't set login to root"); if (__sys_ioctl(fd, TIOCSCTTY, (char *) NULL) == -1) PANIC("Can't set controlling terminal"); } /* Initialize pthread private data. */ init_private(); _kse_init(); /* Initialize the initial kse and kseg. */ _kse_initial = _kse_alloc(NULL, _thread_scope_system > 0); if (_kse_initial == NULL) PANIC("Can't allocate initial kse."); _kse_initial->k_kseg = _kseg_alloc(NULL); if (_kse_initial->k_kseg == NULL) PANIC("Can't allocate initial kseg."); _kse_initial->k_kseg->kg_flags |= KGF_SINGLE_THREAD; _kse_initial->k_schedq = &_kse_initial->k_kseg->kg_schedq; TAILQ_INSERT_TAIL(&_kse_initial->k_kseg->kg_kseq, _kse_initial, k_kgqe); _kse_initial->k_kseg->kg_ksecount = 1; /* Set the initial thread. */ if (curthread == NULL) { /* Create and initialize the initial thread. */ curthread = _thr_alloc(NULL); if (curthread == NULL) PANIC("Can't allocate initial thread"); _thr_initial = curthread; init_main_thread(curthread); } else { /* * The initial thread is the current thread. It is * assumed that the current thread is already initialized * because it is left over from a fork(). */ _thr_initial = curthread; } _kse_initial->k_kseg->kg_threadcount = 0; _thr_initial->kse = _kse_initial; _thr_initial->kseg = _kse_initial->k_kseg; _thr_initial->active = 1; /* * Add the thread to the thread list and to the KSEG's thread * queue. */ THR_LIST_ADD(_thr_initial); KSEG_THRQ_ADD(_kse_initial->k_kseg, _thr_initial); /* Setup the KSE/thread specific data for the current KSE/thread. */ _thr_initial->kse->k_curthread = _thr_initial; _kcb_set(_thr_initial->kse->k_kcb); _tcb_set(_thr_initial->kse->k_kcb, _thr_initial->tcb); _thr_initial->kse->k_flags |= KF_INITIALIZED; _thr_signal_init(); _kse_critical_leave(&_thr_initial->tcb->tcb_tmbx); /* * activate threaded mode as soon as possible if we are * being debugged */ if (_libkse_debug) _kse_setthreaded(1); } /* * This function and pthread_create() do a lot of the same things. * It'd be nice to consolidate the common stuff in one place. */ static void init_main_thread(struct pthread *thread) { /* Setup the thread attributes. */ thread->attr = _pthread_attr_default; thread->attr.flags |= PTHREAD_SCOPE_SYSTEM; /* * Set up the thread stack. * * Create a red zone below the main stack. All other stacks * are constrained to a maximum size by the parameters * passed to mmap(), but this stack is only limited by * resource limits, so this stack needs an explicitly mapped * red zone to protect the thread stack that is just beyond. */ if (mmap((void *)_usrstack - _thr_stack_initial - _thr_guard_default, _thr_guard_default, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* * Mark the stack as an application supplied stack so that it * isn't deallocated. * * XXX - I'm not sure it would hurt anything to deallocate * the main thread stack because deallocation doesn't * actually free() it; it just puts it in the free * stack queue for later reuse. */ thread->attr.stackaddr_attr = (void *)_usrstack - _thr_stack_initial; thread->attr.stacksize_attr = _thr_stack_initial; thread->attr.guardsize_attr = _thr_guard_default; thread->attr.flags |= THR_STACK_USER; /* * Write a magic value to the thread structure * to help identify valid ones: */ thread->magic = THR_MAGIC; thread->slice_usec = -1; thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; thread->name = strdup("initial thread"); /* Initialize the thread for signals: */ SIGEMPTYSET(thread->sigmask); /* * Set up the thread mailbox. The threads saved context * is also in the mailbox. */ thread->tcb->tcb_tmbx.tm_udata = thread; thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_size = thread->attr.stacksize_attr; thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_sp = thread->attr.stackaddr_attr; /* Default the priority of the initial thread: */ thread->base_priority = THR_DEFAULT_PRIORITY; thread->active_priority = THR_DEFAULT_PRIORITY; thread->inherited_priority = 0; /* Initialize the mutex queue: */ TAILQ_INIT(&thread->mutexq); /* Initialize hooks in the thread structure: */ thread->specific = NULL; thread->cleanup = NULL; thread->flags = 0; thread->sigbackout = NULL; thread->continuation = NULL; thread->state = PS_RUNNING; thread->uniqueid = 0; } static void init_private(void) { struct clockinfo clockinfo; size_t len; int mib[2]; /* * Avoid reinitializing some things if they don't need to be, * e.g. after a fork(). */ if (init_once == 0) { /* Find the stack top */ mib[0] = CTL_KERN; mib[1] = KERN_USRSTACK; len = sizeof (_usrstack); if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1) PANIC("Cannot get kern.usrstack from sysctl"); /* Get the kernel clockrate: */ mib[0] = CTL_KERN; mib[1] = KERN_CLOCKRATE; len = sizeof (struct clockinfo); if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0) _clock_res_usec = 1000000 / clockinfo.stathz; else _clock_res_usec = CLOCK_RES_USEC; _thr_page_size = getpagesize(); _thr_guard_default = _thr_page_size; if (sizeof(void *) == 8) { _thr_stack_default = THR_STACK64_DEFAULT; _thr_stack_initial = THR_STACK64_INITIAL; } else { _thr_stack_default = THR_STACK32_DEFAULT; _thr_stack_initial = THR_STACK32_INITIAL; } _pthread_attr_default.guardsize_attr = _thr_guard_default; _pthread_attr_default.stacksize_attr = _thr_stack_default; TAILQ_INIT(&_thr_atfork_list); init_once = 1; /* Don't do this again. */ } else { /* * Destroy the locks before creating them. We don't * know what state they are in so it is better to just * recreate them. */ _lock_destroy(&_thread_signal_lock); _lock_destroy(&_mutex_static_lock); _lock_destroy(&_rwlock_static_lock); _lock_destroy(&_keytable_lock); } /* Initialize everything else. */ TAILQ_INIT(&_thread_list); TAILQ_INIT(&_thread_gc_list); _pthread_mutex_init(&_thr_atfork_mutex, NULL); /* * Initialize the lock for temporary installation of signal * handlers (to support sigwait() semantics) and for the * process signal mask and pending signal sets. */ if (_lock_init(&_thread_signal_lock, LCK_ADAPTIVE, _kse_lock_wait, _kse_lock_wakeup) != 0) PANIC("Cannot initialize _thread_signal_lock"); if (_lock_init(&_mutex_static_lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize mutex static init lock"); if (_lock_init(&_rwlock_static_lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize rwlock static init lock"); if (_lock_init(&_keytable_lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize thread specific keytable lock"); _thr_spinlock_init(); /* Clear pending signals and get the process signal mask. */ SIGEMPTYSET(_thr_proc_sigpending); /* Are we in M:N mode (default) or 1:1 mode? */ #ifdef SYSTEM_SCOPE_ONLY _thread_scope_system = 1; #else if (getenv("LIBPTHREAD_SYSTEM_SCOPE") != NULL) _thread_scope_system = 1; else if (getenv("LIBPTHREAD_PROCESS_SCOPE") != NULL) _thread_scope_system = -1; #endif + if (getenv("LIBPTHREAD_DEBUG") != NULL) + _thr_debug_flags |= DBG_INFO_DUMP; /* * _thread_list_lock and _kse_count are initialized * by _kse_init() */ } diff --git a/lib/libpthread/thread/thr_private.h b/lib/libpthread/thread/thr_private.h index e2055bc1da64..9d8ee632fd00 100644 --- a/lib/libpthread/thread/thr_private.h +++ b/lib/libpthread/thread/thr_private.h @@ -1,1281 +1,1282 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Private thread definitions for the uthread kernel. * * $FreeBSD$ */ #ifndef _THR_PRIVATE_H #define _THR_PRIVATE_H /* * Include files. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef LIBTHREAD_DB #include "lock.h" #include "pthread_md.h" #endif /* * Evaluate the storage class specifier. */ #ifdef GLOBAL_PTHREAD_PRIVATE #define SCLASS #define SCLASS_PRESET(x...) = x #else #define SCLASS extern #define SCLASS_PRESET(x...) #endif /* * Kernel fatal error handler macro. */ #define PANIC(string) _thr_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args) #define stderr_debug(args...) _thread_printf(STDOUT_FILENO, ##args) #define DBG_MUTEX 0x0001 #define DBG_SIG 0x0002 +#define DBG_INFO_DUMP 0x0004 #ifdef _PTHREADS_INVARIANTS #define THR_ASSERT(cond, msg) do { \ if (!(cond)) \ PANIC(msg); \ } while (0) #else #define THR_ASSERT(cond, msg) #endif /* * State change macro without scheduling queue change: */ #define THR_SET_STATE(thrd, newstate) do { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } while (0) #define TIMESPEC_ADD(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \ if ((dst)->tv_nsec >= 1000000000) { \ (dst)->tv_sec++; \ (dst)->tv_nsec -= 1000000000; \ } \ } while (0) #define TIMESPEC_SUB(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ if ((dst)->tv_nsec < 0) { \ (dst)->tv_sec--; \ (dst)->tv_nsec += 1000000000; \ } \ } while (0) /* * Priority queues. * * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. */ typedef struct pq_list { TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ int pl_prio; /* the priority of this list */ int pl_queued; /* is this in the priority queue */ } pq_list_t; typedef struct pq_queue { TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ pq_list_t *pq_lists; /* array of all priority lists */ int pq_size; /* number of priority lists */ #define PQF_ACTIVE 0x0001 int pq_flags; int pq_threads; } pq_queue_t; /* * Each KSEG has a scheduling queue. For now, threads that exist in their * own KSEG (system scope) will get a full priority queue. In the future * this can be optimized for the single thread per KSEG case. */ struct sched_queue { pq_queue_t sq_runq; TAILQ_HEAD(, pthread) sq_waitq; /* waiting in userland */ }; typedef struct kse_thr_mailbox *kse_critical_t; struct kse_group; #define MAX_KSE_LOCKLEVEL 5 struct kse { /* -- location and order specific items for gdb -- */ struct kcb *k_kcb; struct pthread *k_curthread; /* current thread */ struct kse_group *k_kseg; /* parent KSEG */ struct sched_queue *k_schedq; /* scheduling queue */ /* -- end of location and order specific items -- */ TAILQ_ENTRY(kse) k_qe; /* KSE list link entry */ TAILQ_ENTRY(kse) k_kgqe; /* KSEG's KSE list entry */ /* * Items that are only modified by the kse, or that otherwise * don't need to be locked when accessed */ struct lock k_lock; struct lockuser k_lockusers[MAX_KSE_LOCKLEVEL]; int k_locklevel; stack_t k_stack; int k_flags; #define KF_STARTED 0x0001 /* kernel kse created */ #define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */ #define KF_TERMINATED 0x0004 /* kse is terminated */ #define KF_IDLE 0x0008 /* kse is idle */ #define KF_SWITCH 0x0010 /* thread switch in UTS */ int k_error; /* syscall errno in critical */ int k_cpu; /* CPU ID when bound */ int k_sigseqno; /* signal buffered count */ }; #define KSE_SET_IDLE(kse) ((kse)->k_flags |= KF_IDLE) #define KSE_CLEAR_IDLE(kse) ((kse)->k_flags &= ~KF_IDLE) #define KSE_IS_IDLE(kse) (((kse)->k_flags & KF_IDLE) != 0) #define KSE_SET_SWITCH(kse) ((kse)->k_flags |= KF_SWITCH) #define KSE_CLEAR_SWITCH(kse) ((kse)->k_flags &= ~KF_SWITCH) #define KSE_IS_SWITCH(kse) (((kse)->k_flags & KF_SWITCH) != 0) /* * Each KSE group contains one or more KSEs in which threads can run. * At least for now, there is one scheduling queue per KSE group; KSEs * within the same KSE group compete for threads from the same scheduling * queue. A scope system thread has one KSE in one KSE group; the group * does not use its scheduling queue. */ struct kse_group { TAILQ_HEAD(, kse) kg_kseq; /* list of KSEs in group */ TAILQ_HEAD(, pthread) kg_threadq; /* list of threads in group */ TAILQ_ENTRY(kse_group) kg_qe; /* link entry */ struct sched_queue kg_schedq; /* scheduling queue */ struct lock kg_lock; int kg_threadcount; /* # of assigned threads */ int kg_ksecount; /* # of assigned KSEs */ int kg_idle_kses; int kg_flags; #define KGF_SINGLE_THREAD 0x0001 /* scope system kse group */ #define KGF_SCHEDQ_INITED 0x0002 /* has an initialized schedq */ }; /* * Add/remove threads from a KSE's scheduling queue. * For now the scheduling queue is hung off the KSEG. */ #define KSEG_THRQ_ADD(kseg, thr) \ do { \ TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle);\ (kseg)->kg_threadcount++; \ } while (0) #define KSEG_THRQ_REMOVE(kseg, thr) \ do { \ TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle); \ (kseg)->kg_threadcount--; \ } while (0) /* * Lock acquire and release for KSEs. */ #define KSE_LOCK_ACQUIRE(kse, lck) \ do { \ if ((kse)->k_locklevel < MAX_KSE_LOCKLEVEL) { \ (kse)->k_locklevel++; \ _lock_acquire((lck), \ &(kse)->k_lockusers[(kse)->k_locklevel - 1], 0); \ } \ else \ PANIC("Exceeded maximum lock level"); \ } while (0) #define KSE_LOCK_RELEASE(kse, lck) \ do { \ if ((kse)->k_locklevel > 0) { \ _lock_release((lck), \ &(kse)->k_lockusers[(kse)->k_locklevel - 1]); \ (kse)->k_locklevel--; \ } \ } while (0) /* * Lock our own KSEG. */ #define KSE_LOCK(curkse) \ KSE_LOCK_ACQUIRE(curkse, &(curkse)->k_kseg->kg_lock) #define KSE_UNLOCK(curkse) \ KSE_LOCK_RELEASE(curkse, &(curkse)->k_kseg->kg_lock) /* * Lock a potentially different KSEG. */ #define KSE_SCHED_LOCK(curkse, kseg) \ KSE_LOCK_ACQUIRE(curkse, &(kseg)->kg_lock) #define KSE_SCHED_UNLOCK(curkse, kseg) \ KSE_LOCK_RELEASE(curkse, &(kseg)->kg_lock) /* * Waiting queue manipulation macros (using pqe link): */ #define KSE_WAITQ_REMOVE(kse, thrd) \ do { \ if (((thrd)->flags & THR_FLAGS_IN_WAITQ) != 0) { \ TAILQ_REMOVE(&(kse)->k_schedq->sq_waitq, thrd, pqe); \ (thrd)->flags &= ~THR_FLAGS_IN_WAITQ; \ } \ } while (0) #define KSE_WAITQ_INSERT(kse, thrd) kse_waitq_insert(thrd) #define KSE_WAITQ_FIRST(kse) TAILQ_FIRST(&(kse)->k_schedq->sq_waitq) #define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_kcb->kcb_kmbx) /* * TailQ initialization values. */ #define TAILQ_INITIALIZER { NULL, NULL } /* * lock initialization values. */ #define LCK_INITIALIZER { NULL, NULL, LCK_DEFAULT } struct pthread_mutex { /* * Lock for accesses to this structure. */ struct lock m_lock; enum pthread_mutextype m_type; int m_protocol; TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; long m_flags; int m_count; int m_refcount; /* * Used for priority inheritence and protection. * * m_prio - For priority inheritence, the highest active * priority (threads locking the mutex inherit * this priority). For priority protection, the * ceiling priority of this mutex. * m_saved_prio - mutex owners inherited priority before * taking the mutex, restored when the owner * unlocks the mutex. */ int m_prio; int m_saved_prio; /* * Link for list of all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; }; /* * Flags for mutexes. */ #define MUTEX_FLAGS_PRIVATE 0x01 #define MUTEX_FLAGS_INITED 0x02 #define MUTEX_FLAGS_BUSY 0x04 /* * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ { LCK_INITIALIZER, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, \ TAILQ_INITIALIZER, NULL, MUTEX_FLAGS_PRIVATE, 0, 0, 0, 0, \ TAILQ_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; long m_flags; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } /* * Condition variable definitions. */ enum pthread_cond_type { COND_TYPE_FAST, COND_TYPE_MAX }; struct pthread_cond { /* * Lock for accesses to this structure. */ struct lock c_lock; enum pthread_cond_type c_type; TAILQ_HEAD(cond_head, pthread) c_queue; struct pthread_mutex *c_mutex; long c_flags; long c_seqno; }; struct pthread_cond_attr { enum pthread_cond_type c_type; long c_flags; }; struct pthread_barrier { pthread_mutex_t b_lock; pthread_cond_t b_cond; int b_count; int b_waiters; int b_generation; }; struct pthread_barrierattr { int pshared; }; struct pthread_spinlock { volatile int s_lock; pthread_t s_owner; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ { LCK_INITIALIZER, COND_TYPE_FAST, TAILQ_INITIALIZER, \ NULL, NULL, 0, 0 } /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *next; void (*routine) (); void *routine_arg; int onstack; }; #define THR_CLEANUP_PUSH(td, func, arg) { \ struct pthread_cleanup __cup; \ \ __cup.routine = func; \ __cup.routine_arg = arg; \ __cup.onstack = 1; \ __cup.next = (td)->cleanup; \ (td)->cleanup = &__cup; #define THR_CLEANUP_POP(td, exec) \ (td)->cleanup = __cup.next; \ if ((exec) != 0) \ __cup.routine(__cup.routine_arg); \ } struct pthread_atfork { TAILQ_ENTRY(pthread_atfork) qe; void (*prepare)(void); void (*parent)(void); void (*child)(void); }; struct pthread_attr { int sched_policy; int sched_inherit; int sched_interval; int prio; int suspend; #define THR_STACK_USER 0x100 /* 0xFF reserved for */ #define THR_SIGNAL_THREAD 0x200 /* This is a signal thread */ int flags; void *arg_attr; void (*cleanup_attr) (); void *stackaddr_attr; size_t stacksize_attr; size_t guardsize_attr; }; /* * Thread creation state attributes. */ #define THR_CREATE_RUNNING 0 #define THR_CREATE_SUSPENDED 1 /* * Miscellaneous definitions. */ #define THR_STACK32_DEFAULT (1 * 1024 * 1024) #define THR_STACK64_DEFAULT (2 * 1024 * 1024) /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define THR_STACK32_INITIAL (2 * 1024 * 1024) #define THR_STACK64_INITIAL (4 * 1024 * 1024) /* * Define the different priority ranges. All applications have thread * priorities constrained within 0-31. The threads library raises the * priority when delivering signals in order to ensure that signal * delivery happens (from the POSIX spec) "as soon as possible". * In the future, the threads library will also be able to map specific * threads into real-time (cooperating) processes or kernel threads. * The RT and SIGNAL priorities will be used internally and added to * thread base priorities so that the scheduling queue can handle both * normal and RT priority threads with and without signal handling. * * The approach taken is that, within each class, signal delivery * always has priority over thread execution. */ #define THR_DEFAULT_PRIORITY 15 #define THR_MIN_PRIORITY 0 #define THR_MAX_PRIORITY 31 /* 0x1F */ #define THR_SIGNAL_PRIORITY 32 /* 0x20 */ #define THR_RT_PRIORITY 64 /* 0x40 */ #define THR_FIRST_PRIORITY THR_MIN_PRIORITY #define THR_LAST_PRIORITY \ (THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY) #define THR_BASE_PRIORITY(prio) ((prio) & THR_MAX_PRIORITY) /* * Clock resolution in microseconds. */ #define CLOCK_RES_USEC 10000 /* * Time slice period in microseconds. */ #define TIMESLICE_USEC 20000 /* * XXX - Define a thread-safe macro to get the current time of day * which is updated at regular intervals by something. * * For now, we just make the system call to get the time. */ #define KSE_GET_TOD(curkse, tsp) \ do { \ *tsp = (curkse)->k_kcb->kcb_kmbx.km_timeofday; \ if ((tsp)->tv_sec == 0) \ clock_gettime(CLOCK_REALTIME, tsp); \ } while (0) struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ pthread_cond_t read_signal; pthread_cond_t write_signal; int state; /* 0 = idle >0 = # of readers -1 = writer */ int blocked_writers; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_LOCKWAIT, PS_MUTEX_WAIT, PS_COND_WAIT, PS_SLEEP_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, PS_JOIN, PS_SUSPENDED, PS_DEAD, PS_DEADLOCK, PS_STATE_MAX }; struct sigwait_data { sigset_t *waitset; siginfo_t *siginfo; /* used to save siginfo for sigwaitinfo() */ }; union pthread_wait_data { pthread_mutex_t mutex; pthread_cond_t cond; struct lock *lock; struct sigwait_data *sigwait; }; /* * Define a continuation routine that can be used to perform a * transfer of control: */ typedef void (*thread_continuation_t) (void *); /* * This stores a thread's state prior to running a signal handler. * It is used when a signal is delivered to a thread blocked in * userland. If the signal handler returns normally, the thread's * state is restored from here. */ struct pthread_sigframe { int psf_valid; int psf_flags; int psf_cancelflags; int psf_interrupted; int psf_timeout; int psf_signo; enum pthread_state psf_state; union pthread_wait_data psf_wait_data; struct timespec psf_wakeup_time; sigset_t psf_sigset; sigset_t psf_sigmask; int psf_seqno; thread_continuation_t psf_continuation; }; struct join_status { struct pthread *thread; void *ret; int error; }; struct pthread_specific_elem { const void *data; int seqno; }; struct pthread_key { volatile int allocated; volatile int count; int seqno; void (*destructor) (void *); }; #define MAX_THR_LOCKLEVEL 5 /* * Thread structure. */ struct pthread { /* Thread control block */ struct tcb *tcb; /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define THR_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; char *name; u_int64_t uniqueid; /* for gdb */ /* Queue entry for list of all threads: */ TAILQ_ENTRY(pthread) tle; /* link for all threads in process */ TAILQ_ENTRY(pthread) kle; /* link for all threads in KSE/KSEG */ /* Queue entry for GC lists: */ TAILQ_ENTRY(pthread) gcle; /* Hash queue entry */ LIST_ENTRY(pthread) hle; /* * Lock for accesses to this thread structure. */ struct lock lock; struct lockuser lockusers[MAX_THR_LOCKLEVEL]; int locklevel; kse_critical_t critical[MAX_KSE_LOCKLEVEL]; struct kse *kse; struct kse_group *kseg; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; struct pthread_attr attr; int active; /* thread running */ int blocked; /* thread blocked in kernel */ int need_switchout; /* * Used for tracking delivery of signal handlers. */ siginfo_t *siginfo; thread_continuation_t sigbackout; /* * Cancelability flags - the lower 2 bits are used by cancel * definitions in pthread.h */ #define THR_AT_CANCEL_POINT 0x0004 #define THR_CANCELLING 0x0008 #define THR_CANCEL_NEEDED 0x0010 int cancelflags; thread_continuation_t continuation; /* * The thread's base and pending signal masks. The active * signal mask is stored in the thread's context (in mailbox). */ sigset_t sigmask; sigset_t sigpend; sigset_t *oldsigmask; volatile int check_pending; int refcount; /* Thread state: */ enum pthread_state state; volatile int lock_switch; /* * Number of microseconds accumulated by this thread when * time slicing is active. */ long slice_usec; /* * Time to wake up thread. This is used for sleeping threads and * for any operation which may time out (such as select). */ struct timespec wakeup_time; /* TRUE if operation has timed out. */ int timeout; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* * The joiner is the thread that is joining to this thread. The * join status keeps track of a join operation to another thread. */ struct pthread *joiner; struct join_status join_status; /* * The current thread can belong to only one scheduling queue at * a time (ready or waiting queue). It can also belong to: * * o A queue of threads waiting for a mutex * o A queue of threads waiting for a condition variable * * It is possible for a thread to belong to more than one of the * above queues if it is handling a signal. A thread may only * enter a mutex or condition variable queue when it is not * being called from a signal handler. If a thread is a member * of one of these queues when a signal handler is invoked, it * must be removed from the queue before invoking the handler * and then added back to the queue after return from the handler. * * Use pqe for the scheduling queue link (both ready and waiting), * sqe for synchronization (mutex, condition variable, and join) * queue links, and qe for all other links. */ TAILQ_ENTRY(pthread) pqe; /* priority, wait queues link */ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ /* Wait data. */ union pthread_wait_data data; /* * Set to TRUE if a blocking operation was * interrupted by a signal: */ int interrupted; /* * Set to non-zero when this thread has entered a critical * region. We allow for recursive entries into critical regions. */ int critical_count; /* * Set to TRUE if this thread should yield after leaving a * critical region to check for signals, messages, etc. */ int critical_yield; int sflags; #define THR_FLAGS_IN_SYNCQ 0x0001 /* Miscellaneous flags; only set with scheduling lock held. */ int flags; #define THR_FLAGS_PRIVATE 0x0001 #define THR_FLAGS_IN_WAITQ 0x0002 /* in waiting queue using pqe link */ #define THR_FLAGS_IN_RUNQ 0x0004 /* in run queue using pqe link */ #define THR_FLAGS_EXITING 0x0008 /* thread is exiting */ #define THR_FLAGS_SUSPENDED 0x0010 /* thread is suspended */ /* Thread list flags; only set with thread list lock held. */ #define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */ #define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */ #define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */ int tlflags; /* * Base priority is the user setable and retrievable priority * of the thread. It is only affected by explicit calls to * set thread priority and upon thread creation via a thread * attribute or default priority. */ char base_priority; /* * Inherited priority is the priority a thread inherits by * taking a priority inheritence or protection mutex. It * is not affected by base priority changes. Inherited * priority defaults to and remains 0 until a mutex is taken * that is being waited on by any other thread whose priority * is non-zero. */ char inherited_priority; /* * Active priority is always the maximum of the threads base * priority and inherited priority. When there is a change * in either the base or inherited priority, the active * priority must be recalculated. */ char active_priority; /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; /* Number rwlocks rdlocks held. */ int rdlock_count; /* * Queue of currently owned mutexes. */ TAILQ_HEAD(, pthread_mutex) mutexq; void *ret; struct pthread_specific_elem *specific; int specific_data_count; /* Alternative stack for sigaltstack() */ stack_t sigstk; /* * Current locks bitmap for rtld. */ int rtld_bits; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; char *fname; /* Ptr to source file name */ int lineno; /* Source line number. */ }; /* * Critical regions can also be detected by looking at the threads * current lock level. Ensure these macros increment and decrement * the lock levels such that locks can not be held with a lock level * of 0. */ #define THR_IN_CRITICAL(thrd) \ (((thrd)->locklevel > 0) || \ ((thrd)->critical_count > 0)) #define THR_YIELD_CHECK(thrd) \ do { \ if (!THR_IN_CRITICAL(thrd)) { \ if (__predict_false(_libkse_debug)) \ _thr_debug_check_yield(thrd); \ if ((thrd)->critical_yield != 0) \ _thr_sched_switch(thrd); \ if ((thrd)->check_pending != 0) \ _thr_sig_check_pending(thrd); \ } \ } while (0) #define THR_LOCK_ACQUIRE(thrd, lck) \ do { \ if ((thrd)->locklevel < MAX_THR_LOCKLEVEL) { \ THR_DEACTIVATE_LAST_LOCK(thrd); \ (thrd)->locklevel++; \ _lock_acquire((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1], \ (thrd)->active_priority); \ } else \ PANIC("Exceeded maximum lock level"); \ } while (0) #define THR_LOCK_RELEASE(thrd, lck) \ do { \ if ((thrd)->locklevel > 0) { \ _lock_release((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1]); \ (thrd)->locklevel--; \ THR_ACTIVATE_LAST_LOCK(thrd); \ if ((thrd)->locklevel == 0) \ THR_YIELD_CHECK(thrd); \ } \ } while (0) #define THR_ACTIVATE_LAST_LOCK(thrd) \ do { \ if ((thrd)->locklevel > 0) \ _lockuser_setactive( \ &(thrd)->lockusers[(thrd)->locklevel - 1], 1); \ } while (0) #define THR_DEACTIVATE_LAST_LOCK(thrd) \ do { \ if ((thrd)->locklevel > 0) \ _lockuser_setactive( \ &(thrd)->lockusers[(thrd)->locklevel - 1], 0); \ } while (0) /* * For now, threads will have their own lock separate from their * KSE scheduling lock. */ #define THR_LOCK(thr) THR_LOCK_ACQUIRE(thr, &(thr)->lock) #define THR_UNLOCK(thr) THR_LOCK_RELEASE(thr, &(thr)->lock) #define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock) #define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock) /* * Priority queue manipulation macros (using pqe link). We use * the thread's kseg link instead of the kse link because a thread * does not (currently) have a statically assigned kse. */ #define THR_RUNQ_INSERT_HEAD(thrd) \ _pq_insert_head(&(thrd)->kseg->kg_schedq.sq_runq, thrd) #define THR_RUNQ_INSERT_TAIL(thrd) \ _pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd) #define THR_RUNQ_REMOVE(thrd) \ _pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd) /* * Macros to insert/remove threads to the all thread list and * the gc list. */ #define THR_LIST_ADD(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \ TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \ _thr_hash_add(thrd); \ (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \ } \ } while (0) #define THR_LIST_REMOVE(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \ TAILQ_REMOVE(&_thread_list, thrd, tle); \ _thr_hash_remove(thrd); \ (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \ } \ } while (0) #define THR_GCLIST_ADD(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \ TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\ (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \ _gc_count++; \ } \ } while (0) #define THR_GCLIST_REMOVE(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \ TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \ (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \ _gc_count--; \ } \ } while (0) #define GC_NEEDED() (atomic_load_acq_int(&_gc_count) >= 5) /* * Locking the scheduling queue for another thread uses that thread's * KSEG lock. */ #define THR_SCHED_LOCK(curthr, thr) do { \ (curthr)->critical[(curthr)->locklevel] = _kse_critical_enter(); \ (curthr)->locklevel++; \ KSE_SCHED_LOCK((curthr)->kse, (thr)->kseg); \ } while (0) #define THR_SCHED_UNLOCK(curthr, thr) do { \ KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \ (curthr)->locklevel--; \ _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \ } while (0) /* Take the scheduling lock with the intent to call the scheduler. */ #define THR_LOCK_SWITCH(curthr) do { \ (void)_kse_critical_enter(); \ KSE_SCHED_LOCK((curthr)->kse, (curthr)->kseg); \ } while (0) #define THR_UNLOCK_SWITCH(curthr) do { \ KSE_SCHED_UNLOCK((curthr)->kse, (curthr)->kseg);\ } while (0) #define THR_CRITICAL_ENTER(thr) (thr)->critical_count++ #define THR_CRITICAL_LEAVE(thr) do { \ (thr)->critical_count--; \ if (((thr)->critical_yield != 0) && \ ((thr)->critical_count == 0)) { \ (thr)->critical_yield = 0; \ _thr_sched_switch(thr); \ } \ } while (0) #define THR_IS_ACTIVE(thrd) \ ((thrd)->kse != NULL) && ((thrd)->kse->k_curthread == (thrd)) #define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0) #define THR_IS_SUSPENDED(thrd) \ (((thrd)->state == PS_SUSPENDED) || \ (((thrd)->flags & THR_FLAGS_SUSPENDED) != 0)) #define THR_IS_EXITING(thrd) (((thrd)->flags & THR_FLAGS_EXITING) != 0) #define DBG_CAN_RUN(thrd) (((thrd)->tcb->tcb_tmbx.tm_dflags & \ TMDF_SUSPEND) == 0) extern int __isthreaded; static inline int _kse_isthreaded(void) { return (__isthreaded != 0); } /* * Global variables for the pthread kernel. */ SCLASS void *_usrstack SCLASS_PRESET(NULL); SCLASS struct kse *_kse_initial SCLASS_PRESET(NULL); SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL); /* For debugger */ SCLASS int _libkse_debug SCLASS_PRESET(0); SCLASS int _thread_activated SCLASS_PRESET(0); SCLASS int _thread_scope_system SCLASS_PRESET(0); /* List of all threads: */ SCLASS TAILQ_HEAD(, pthread) _thread_list SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list)); /* List of threads needing GC: */ SCLASS TAILQ_HEAD(, pthread) _thread_gc_list SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list)); SCLASS int _thread_active_threads SCLASS_PRESET(1); SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list; SCLASS pthread_mutex_t _thr_atfork_mutex; /* Default thread attributes: */ SCLASS struct pthread_attr _pthread_attr_default SCLASS_PRESET({ SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY, THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, /* stacksize */0, /* guardsize */0 }); /* Default mutex attributes: */ SCLASS struct pthread_mutex_attr _pthread_mutexattr_default SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }); /* Default condition variable attributes: */ SCLASS struct pthread_cond_attr _pthread_condattr_default SCLASS_PRESET({COND_TYPE_FAST, 0}); /* Clock resolution in usec. */ SCLASS int _clock_res_usec SCLASS_PRESET(CLOCK_RES_USEC); /* Array of signal actions for this process: */ SCLASS struct sigaction _thread_sigact[_SIG_MAXSIG]; /* * Lock for above count of dummy handlers and for the process signal * mask and pending signal sets. */ SCLASS struct lock _thread_signal_lock; /* Pending signals and mask for this process: */ SCLASS sigset_t _thr_proc_sigpending; SCLASS siginfo_t _thr_proc_siginfo[_SIG_MAXSIG]; SCLASS pid_t _thr_pid SCLASS_PRESET(0); /* Garbage collector lock. */ SCLASS struct lock _gc_lock; SCLASS int _gc_check SCLASS_PRESET(0); SCLASS int _gc_count SCLASS_PRESET(0); SCLASS struct lock _mutex_static_lock; SCLASS struct lock _rwlock_static_lock; SCLASS struct lock _keytable_lock; SCLASS struct lock _thread_list_lock; SCLASS int _thr_guard_default; SCLASS int _thr_stack_default; SCLASS int _thr_stack_initial; SCLASS int _thr_page_size; SCLASS pthread_t _thr_sig_daemon; SCLASS int _thr_debug_flags SCLASS_PRESET(0); /* Undefine the storage class and preset specifiers: */ #undef SCLASS #undef SCLASS_PRESET /* * Function prototype definitions. */ __BEGIN_DECLS int _cond_reinit(pthread_cond_t *); struct kse *_kse_alloc(struct pthread *, int sys_scope); kse_critical_t _kse_critical_enter(void); void _kse_critical_leave(kse_critical_t); int _kse_in_critical(void); void _kse_free(struct pthread *, struct kse *); void _kse_init(); struct kse_group *_kseg_alloc(struct pthread *); void _kse_lock_wait(struct lock *, struct lockuser *lu); void _kse_lock_wakeup(struct lock *, struct lockuser *lu); void _kse_single_thread(struct pthread *); int _kse_setthreaded(int); void _kseg_free(struct kse_group *); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); void _mutex_notify_priochange(struct pthread *, struct pthread *, int); int _mutex_reinit(struct pthread_mutex *); void _mutex_unlock_private(struct pthread *); void _libpthread_init(struct pthread *); int _pq_alloc(struct pq_queue *, int, int); void _pq_free(struct pq_queue *); int _pq_init(struct pq_queue *); void _pq_remove(struct pq_queue *pq, struct pthread *); void _pq_insert_head(struct pq_queue *pq, struct pthread *); void _pq_insert_tail(struct pq_queue *pq, struct pthread *); struct pthread *_pq_first(struct pq_queue *pq); struct pthread *_pq_first_debug(struct pq_queue *pq); void *_pthread_getspecific(pthread_key_t); int _pthread_key_create(pthread_key_t *, void (*) (void *)); int _pthread_key_delete(pthread_key_t); int _pthread_mutex_destroy(pthread_mutex_t *); int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); int _pthread_mutex_lock(pthread_mutex_t *); int _pthread_mutex_trylock(pthread_mutex_t *); int _pthread_mutex_unlock(pthread_mutex_t *); int _pthread_mutexattr_init(pthread_mutexattr_t *); int _pthread_mutexattr_destroy(pthread_mutexattr_t *); int _pthread_mutexattr_settype(pthread_mutexattr_t *, int); int _pthread_once(pthread_once_t *, void (*) (void)); int _pthread_rwlock_init(pthread_rwlock_t *, const pthread_rwlockattr_t *); int _pthread_rwlock_destroy (pthread_rwlock_t *); struct pthread *_pthread_self(void); int _pthread_setspecific(pthread_key_t, const void *); void _pthread_yield(void); void _pthread_cleanup_push(void (*routine) (void *), void *routine_arg); void _pthread_cleanup_pop(int execute); struct pthread *_thr_alloc(struct pthread *); void _thr_exit(char *, int, char *); void _thr_exit_cleanup(void); void _thr_lock_wait(struct lock *lock, struct lockuser *lu); void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu); void _thr_mutex_reinit(pthread_mutex_t *); int _thr_ref_add(struct pthread *, struct pthread *, int); void _thr_ref_delete(struct pthread *, struct pthread *); void _thr_rtld_init(void); void _thr_rtld_fini(void); int _thr_schedule_add(struct pthread *, struct pthread *); void _thr_schedule_remove(struct pthread *, struct pthread *); void _thr_setrunnable(struct pthread *curthread, struct pthread *thread); struct kse_mailbox *_thr_setrunnable_unlocked(struct pthread *thread); struct kse_mailbox *_thr_sig_add(struct pthread *, int, siginfo_t *); void _thr_sig_dispatch(struct kse *, int, siginfo_t *); int _thr_stack_alloc(struct pthread_attr *); void _thr_stack_free(struct pthread_attr *); void _thr_exit_cleanup(void); void _thr_free(struct pthread *, struct pthread *); void _thr_gc(struct pthread *); void _thr_panic_exit(char *, int, char *); void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_printf(int, const char *, ...); void _thr_sched_switch(struct pthread *); void _thr_sched_switch_unlocked(struct pthread *); void _thr_set_timeout(const struct timespec *); void _thr_seterrno(struct pthread *, int); void _thr_sig_handler(int, siginfo_t *, ucontext_t *); void _thr_sig_check_pending(struct pthread *); void _thr_sig_rundown(struct pthread *, ucontext_t *); void _thr_sig_send(struct pthread *pthread, int sig); void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf); void _thr_spinlock_init(void); void _thr_cancel_enter(struct pthread *); void _thr_cancel_leave(struct pthread *, int); int _thr_setconcurrency(int new_level); int _thr_setmaxconcurrency(void); void _thr_critical_enter(struct pthread *); void _thr_critical_leave(struct pthread *); int _thr_start_sig_daemon(void); int _thr_getprocsig(int sig, siginfo_t *siginfo); int _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo); void _thr_signal_init(void); void _thr_signal_deinit(void); void _thr_hash_add(struct pthread *); void _thr_hash_remove(struct pthread *); struct pthread *_thr_hash_find(struct pthread *); void _thr_finish_cancellation(void *arg); int _thr_sigonstack(void *sp); void _thr_debug_check_yield(struct pthread *); /* * Aliases for _pthread functions. Should be called instead of * originals if PLT replocation is unwanted at runtme. */ int _thr_cond_broadcast(pthread_cond_t *); int _thr_cond_signal(pthread_cond_t *); int _thr_cond_wait(pthread_cond_t *, pthread_mutex_t *); int _thr_mutex_lock(pthread_mutex_t *); int _thr_mutex_unlock(pthread_mutex_t *); int _thr_rwlock_rdlock (pthread_rwlock_t *); int _thr_rwlock_wrlock (pthread_rwlock_t *); int _thr_rwlock_unlock (pthread_rwlock_t *); /* #include */ #ifdef _SYS_AIO_H_ int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); #endif /* #include */ #ifdef _SYS_FCNTL_H_ int __sys_fcntl(int, int, ...); int __sys_open(const char *, int, ...); #endif /* #include */ #ifdef _SYS_IOCTL_H_ int __sys_ioctl(int, unsigned long, ...); #endif /* #inclde */ #ifdef _SCHED_H_ int __sys_sched_yield(void); #endif /* #include */ #ifdef _SIGNAL_H_ int __sys_kill(pid_t, int); int __sys_sigaction(int, const struct sigaction *, struct sigaction *); int __sys_sigpending(sigset_t *); int __sys_sigprocmask(int, const sigset_t *, sigset_t *); int __sys_sigsuspend(const sigset_t *); int __sys_sigreturn(ucontext_t *); int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); #endif /* #include */ #ifdef _SYS_SOCKET_H_ int __sys_accept(int, struct sockaddr *, socklen_t *); int __sys_connect(int, const struct sockaddr *, socklen_t); int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int); #endif /* #include */ #ifdef _SYS_UIO_H_ ssize_t __sys_readv(int, const struct iovec *, int); ssize_t __sys_writev(int, const struct iovec *, int); #endif /* #include */ #ifdef _TIME_H_ int __sys_nanosleep(const struct timespec *, struct timespec *); #endif /* #include */ #ifdef _UNISTD_H_ int __sys_close(int); int __sys_execve(const char *, char * const *, char * const *); int __sys_fork(void); int __sys_fsync(int); pid_t __sys_getpid(void); int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); ssize_t __sys_read(int, void *, size_t); ssize_t __sys_write(int, const void *, size_t); void __sys_exit(int); int __sys_sigwait(const sigset_t *, int *); int __sys_sigtimedwait(sigset_t *, siginfo_t *, struct timespec *); #endif /* #include */ #ifdef _SYS_POLL_H_ int __sys_poll(struct pollfd *, unsigned, int); #endif /* #include */ #ifdef _SYS_MMAN_H_ int __sys_msync(void *, size_t, int); #endif #endif /* !_THR_PRIVATE_H */ diff --git a/lib/libpthread/thread/thr_sig.c b/lib/libpthread/thread/thr_sig.c index ec6ebd51eadd..f53b87f08b17 100644 --- a/lib/libpthread/thread/thr_sig.c +++ b/lib/libpthread/thread/thr_sig.c @@ -1,1250 +1,1259 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include "thr_private.h" /* Prototypes: */ static inline void build_siginfo(siginfo_t *info, int signo); #ifndef SYSTEM_SCOPE_ONLY static struct pthread *thr_sig_find(struct kse *curkse, int sig, siginfo_t *info); #endif static inline void thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf); static inline void thr_sigframe_save(struct pthread *thread, struct pthread_sigframe *psf); #define SA_KILL 0x01 /* terminates process by default */ #define SA_STOP 0x02 #define SA_CONT 0x04 static int sigproptbl[NSIG] = { SA_KILL, /* SIGHUP */ SA_KILL, /* SIGINT */ SA_KILL, /* SIGQUIT */ SA_KILL, /* SIGILL */ SA_KILL, /* SIGTRAP */ SA_KILL, /* SIGABRT */ SA_KILL, /* SIGEMT */ SA_KILL, /* SIGFPE */ SA_KILL, /* SIGKILL */ SA_KILL, /* SIGBUS */ SA_KILL, /* SIGSEGV */ SA_KILL, /* SIGSYS */ SA_KILL, /* SIGPIPE */ SA_KILL, /* SIGALRM */ SA_KILL, /* SIGTERM */ 0, /* SIGURG */ SA_STOP, /* SIGSTOP */ SA_STOP, /* SIGTSTP */ SA_CONT, /* SIGCONT */ 0, /* SIGCHLD */ SA_STOP, /* SIGTTIN */ SA_STOP, /* SIGTTOU */ 0, /* SIGIO */ SA_KILL, /* SIGXCPU */ SA_KILL, /* SIGXFSZ */ SA_KILL, /* SIGVTALRM */ SA_KILL, /* SIGPROF */ 0, /* SIGWINCH */ 0, /* SIGINFO */ SA_KILL, /* SIGUSR1 */ SA_KILL /* SIGUSR2 */ }; /* #define DEBUG_SIGNAL */ #ifdef DEBUG_SIGNAL #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif +static __inline int +_thr_dump_enabled(void) +{ + return ((_thr_debug_flags & DBG_INFO_DUMP) != 0); +} + /* * Signal setup and delivery. * * 1) Delivering signals to threads in the same KSE. * These signals are sent by upcall events and are set in the * km_sigscaught field of the KSE mailbox. Since these signals * are received while operating on the KSE stack, they can be * delivered either by using signalcontext() to add a stack frame * to the target thread's stack, or by adding them in the thread's * pending set and having the thread run them down after it * 2) Delivering signals to threads in other KSEs/KSEGs. * 3) Delivering signals to threads in critical regions. * 4) Delivering signals to threads after they change their signal masks. * * Methods of delivering signals. * * 1) Add a signal frame to the thread's saved context. * 2) Add the signal to the thread structure, mark the thread as * having signals to handle, and let the thread run them down * after it resumes from the KSE scheduler. * * Problem with 1). You can't do this to a running thread or a * thread in a critical region. * * Problem with 2). You can't do this to a thread that doesn't * yield in some way (explicitly enters the scheduler). A thread * blocked in the kernel or a CPU hungry thread will not see the * signal without entering the scheduler. * * The solution is to use both 1) and 2) to deliver signals: * * o Thread in critical region - use 2). When the thread * leaves the critical region it will check to see if it * has pending signals and run them down. * * o Thread enters scheduler explicitly - use 2). The thread * can check for pending signals after it returns from the * the scheduler. * * o Thread is running and not current thread - use 2). When the * thread hits a condition specified by one of the other bullets, * the signal will be delivered. * * o Thread is running and is current thread (e.g., the thread * has just changed its signal mask and now sees that it has * pending signals) - just run down the pending signals. * * o Thread is swapped out due to quantum expiration - use 1) * * o Thread is blocked in kernel - kse_thr_wakeup() and then * use 1) */ /* * Rules for selecting threads for signals received: * * 1) If the signal is a sychronous signal, it is delivered to * the generating (current thread). If the thread has the * signal masked, it is added to the threads pending signal * set until the thread unmasks it. * * 2) A thread in sigwait() where the signal is in the thread's * waitset. * * 3) A thread in sigsuspend() where the signal is not in the * thread's suspended signal mask. * * 4) Any thread (first found/easiest to deliver) that has the * signal unmasked. */ #ifndef SYSTEM_SCOPE_ONLY static void * sig_daemon(void *arg /* Unused */) { int i; kse_critical_t crit; struct timespec ts; sigset_t set; struct kse *curkse; struct pthread *curthread = _get_curthread(); DBG_MSG("signal daemon started(%p)\n", curthread); curthread->name = strdup("signal thread"); crit = _kse_critical_enter(); curkse = _get_curkse(); /* * Daemon thread is a bound thread and we must be created with * all signals masked */ #if 0 SIGFILLSET(set); __sys_sigprocmask(SIG_SETMASK, &set, NULL); #endif __sys_sigpending(&set); ts.tv_sec = 0; ts.tv_nsec = 0; while (1) { KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock); _thr_proc_sigpending = set; KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); for (i = 1; i <= _SIG_MAXSIG; i++) { if (SIGISMEMBER(set, i) != 0) _thr_sig_dispatch(curkse, i, NULL /* no siginfo */); } ts.tv_sec = 30; ts.tv_nsec = 0; curkse->k_kcb->kcb_kmbx.km_flags = KMF_NOUPCALL | KMF_NOCOMPLETED | KMF_WAITSIGEVENT; kse_release(&ts); curkse->k_kcb->kcb_kmbx.km_flags = 0; set = curkse->k_kcb->kcb_kmbx.km_sigscaught; } return (0); } /* Utility function to create signal daemon thread */ int _thr_start_sig_daemon(void) { pthread_attr_t attr; sigset_t sigset, oldset; SIGFILLSET(sigset); pthread_sigmask(SIG_SETMASK, &sigset, &oldset); pthread_attr_init(&attr); pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM); attr->flags |= THR_SIGNAL_THREAD; /* sigmask will be inherited */ if (pthread_create(&_thr_sig_daemon, &attr, sig_daemon, NULL)) PANIC("can not create signal daemon thread!\n"); pthread_attr_destroy(&attr); pthread_sigmask(SIG_SETMASK, &oldset, NULL); return (0); } /* * This signal handler only delivers asynchronous signals. * This must be called with upcalls disabled and without * holding any locks. */ void _thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info) { struct kse_mailbox *kmbx; struct pthread *thread; DBG_MSG(">>> _thr_sig_dispatch(%d)\n", sig); /* Check if the signal requires a dump of thread information: */ - if (sig == SIGINFO) { + if (_thr_dump_enabled() && (sig == SIGINFO)) { /* Dump thread information to file: */ _thread_dump_info(); } while ((thread = thr_sig_find(curkse, sig, info)) != NULL) { /* * Setup the target thread to receive the signal: */ DBG_MSG("Got signal %d, selecting thread %p\n", sig, thread); KSE_SCHED_LOCK(curkse, thread->kseg); if ((thread->state == PS_DEAD) || (thread->state == PS_DEADLOCK) || THR_IS_EXITING(thread) || THR_IS_SUSPENDED(thread)) { KSE_SCHED_UNLOCK(curkse, thread->kseg); _thr_ref_delete(NULL, thread); } else if (SIGISMEMBER(thread->sigmask, sig)) { KSE_SCHED_UNLOCK(curkse, thread->kseg); _thr_ref_delete(NULL, thread); } else { kmbx = _thr_sig_add(thread, sig, info); KSE_SCHED_UNLOCK(curkse, thread->kseg); _thr_ref_delete(NULL, thread); if (kmbx != NULL) kse_wakeup(kmbx); break; } } DBG_MSG("<<< _thr_sig_dispatch\n"); } #endif /* ! SYSTEM_SCOPE_ONLY */ static __inline int sigprop(int sig) { if (sig > 0 && sig < NSIG) return (sigproptbl[_SIG_IDX(sig)]); return (0); } typedef void (*ohandler)(int sig, int code, struct sigcontext *scp, char *addr, __sighandler_t *catcher); void _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) { struct pthread_sigframe psf; __siginfohandler_t *sigfunc; struct pthread *curthread; struct kse *curkse; struct sigaction act; int sa_flags, err_save; err_save = errno; DBG_MSG(">>> _thr_sig_handler(%d)\n", sig); curthread = _get_curthread(); if (curthread == NULL) PANIC("No current thread.\n"); if (!(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)) PANIC("Thread is not system scope.\n"); if (curthread->flags & THR_FLAGS_EXITING) { errno = err_save; return; } curkse = _get_curkse(); /* * If thread is in critical region or if thread is on * the way of state transition, then latch signal into buffer. */ if (_kse_in_critical() || THR_IN_CRITICAL(curthread) || curthread->state != PS_RUNNING) { DBG_MSG(">>> _thr_sig_handler(%d) in critical\n", sig); curthread->siginfo[sig-1] = *info; curthread->check_pending = 1; curkse->k_sigseqno++; SIGADDSET(curthread->sigpend, sig); /* * If the kse is on the way to idle itself, but * we have signal ready, we should prevent it * to sleep, kernel will latch the wakeup request, * so kse_release will return from kernel immediately. */ if (KSE_IS_IDLE(curkse)) kse_wakeup(&curkse->k_kcb->kcb_kmbx); errno = err_save; return; } /* Check if the signal requires a dump of thread information: */ - if (sig == SIGINFO) { + if (_thr_dump_enabled() && (sig == SIGINFO)) { /* Dump thread information to file: */ _thread_dump_info(); } /* Check the threads previous state: */ curthread->critical_count++; if (curthread->sigbackout != NULL) curthread->sigbackout((void *)curthread); curthread->critical_count--; thr_sigframe_save(curthread, &psf); THR_ASSERT(!(curthread->sigbackout), "sigbackout was not cleared."); _kse_critical_enter(); /* Get a fresh copy of signal mask */ __sys_sigprocmask(SIG_BLOCK, NULL, &curthread->sigmask); KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock); sigfunc = _thread_sigact[sig - 1].sa_sigaction; sa_flags = _thread_sigact[sig - 1].sa_flags; if (sa_flags & SA_RESETHAND) { act.sa_handler = SIG_DFL; act.sa_flags = SA_RESTART; SIGEMPTYSET(act.sa_mask); __sys_sigaction(sig, &act, NULL); __sys_sigaction(sig, NULL, &_thread_sigact[sig - 1]); } KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); _kse_critical_leave(&curthread->tcb->tcb_tmbx); /* Now invoke real handler */ if (((__sighandler_t *)sigfunc != SIG_DFL) && ((__sighandler_t *)sigfunc != SIG_IGN) && (sigfunc != (__siginfohandler_t *)_thr_sig_handler)) { if ((sa_flags & SA_SIGINFO) != 0 || info == NULL) (*(sigfunc))(sig, info, ucp); else { ((ohandler)(*sigfunc))( sig, info->si_code, (struct sigcontext *)ucp, info->si_addr, (__sighandler_t *)sigfunc); } } else { if ((__sighandler_t *)sigfunc == SIG_DFL) { if (sigprop(sig) & SA_KILL) { if (_kse_isthreaded()) kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig); else kill(getpid(), sig); } #ifdef NOTYET else if (sigprop(sig) & SA_STOP) kse_thr_interrupt(NULL, KSE_INTR_JOBSTOP, sig); #endif } } _kse_critical_enter(); curthread->sigmask = ucp->uc_sigmask; SIG_CANTMASK(curthread->sigmask); _kse_critical_leave(&curthread->tcb->tcb_tmbx); thr_sigframe_restore(curthread, &psf); DBG_MSG("<<< _thr_sig_handler(%d)\n", sig); errno = err_save; } struct sighandle_info { __siginfohandler_t *sigfunc; int sa_flags; int sig; siginfo_t *info; ucontext_t *ucp; }; static void handle_signal(struct pthread *curthread, struct sighandle_info *shi); static void handle_signal_altstack(struct pthread *curthread, struct sighandle_info *shi); /* Must be called with signal lock and schedule lock held in order */ static void thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info, ucontext_t *ucp) { __siginfohandler_t *sigfunc; sigset_t sigmask; int sa_flags; int onstack; struct sigaction act; struct kse *curkse; struct sighandle_info shi; /* * Invoke the signal handler without going through the scheduler: */ DBG_MSG("Got signal %d, calling handler for current thread %p\n", sig, curthread); if (!_kse_in_critical()) PANIC("thr_sig_invoke_handler without in critical\n"); curkse = curthread->kse; /* * Check that a custom handler is installed and if * the signal is not blocked: */ sigfunc = _thread_sigact[sig - 1].sa_sigaction; sa_flags = _thread_sigact[sig - 1].sa_flags; sigmask = curthread->sigmask; SIGSETOR(curthread->sigmask, _thread_sigact[sig - 1].sa_mask); if (!(sa_flags & (SA_NODEFER | SA_RESETHAND))) SIGADDSET(curthread->sigmask, sig); if ((sig != SIGILL) && (sa_flags & SA_RESETHAND)) { act.sa_handler = SIG_DFL; act.sa_flags = SA_RESTART; SIGEMPTYSET(act.sa_mask); __sys_sigaction(sig, &act, NULL); __sys_sigaction(sig, NULL, &_thread_sigact[sig - 1]); } KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); /* * We are processing buffered signals, synchronize working * signal mask into kernel. */ if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL); onstack = _thr_sigonstack(&sigfunc); ucp->uc_stack = curthread->sigstk; ucp->uc_stack.ss_flags = (curthread->sigstk.ss_flags & SS_DISABLE) ? SS_DISABLE : ((onstack) ? SS_ONSTACK : 0); if (curthread->oldsigmask) { ucp->uc_sigmask = *(curthread->oldsigmask); curthread->oldsigmask = NULL; } else ucp->uc_sigmask = sigmask; shi.sigfunc = sigfunc; shi.sig = sig; shi.sa_flags = sa_flags; shi.info = info; shi.ucp = ucp; if ((curthread->sigstk.ss_flags & SS_DISABLE) == 0) { /* Deliver signal on alternative stack */ if (sa_flags & SA_ONSTACK && !onstack) handle_signal_altstack(curthread, &shi); else handle_signal(curthread, &shi); } else { handle_signal(curthread, &shi); } _kse_critical_enter(); /* Don't trust after critical leave/enter */ curkse = curthread->kse; /* * Restore the thread's signal mask. */ curthread->sigmask = ucp->uc_sigmask; SIG_CANTMASK(curthread->sigmask); if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) __sys_sigprocmask(SIG_SETMASK, &ucp->uc_sigmask, NULL); KSE_SCHED_LOCK(curkse, curkse->k_kseg); KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock); DBG_MSG("Got signal %d, handler returned %p\n", sig, curthread); } static void handle_signal(struct pthread *curthread, struct sighandle_info *shi) { _kse_critical_leave(&curthread->tcb->tcb_tmbx); /* Check if the signal requires a dump of thread information: */ - if (shi->sig == SIGINFO) { + if (_thr_dump_enabled() && (shi->sig == SIGINFO)) { /* Dump thread information to file: */ _thread_dump_info(); } if (((__sighandler_t *)shi->sigfunc != SIG_DFL) && ((__sighandler_t *)shi->sigfunc != SIG_IGN)) { if ((shi->sa_flags & SA_SIGINFO) != 0 || shi->info == NULL) (*(shi->sigfunc))(shi->sig, shi->info, shi->ucp); else { ((ohandler)(*shi->sigfunc))( shi->sig, shi->info->si_code, (struct sigcontext *)shi->ucp, shi->info->si_addr, (__sighandler_t *)shi->sigfunc); } } else { if ((__sighandler_t *)shi->sigfunc == SIG_DFL) { if (sigprop(shi->sig) & SA_KILL) { if (_kse_isthreaded()) kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, shi->sig); else kill(getpid(), shi->sig); } #ifdef NOTYET else if (sigprop(shi->sig) & SA_STOP) kse_thr_interrupt(NULL, KSE_INTR_JOBSTOP, shi->sig); #endif } } } static void handle_signal_wrapper(struct pthread *curthread, ucontext_t *ret_uc, struct sighandle_info *shi) { shi->ucp->uc_stack.ss_flags = SS_ONSTACK; handle_signal(curthread, shi); if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) setcontext(ret_uc); else { /* Work around for ia64, THR_SETCONTEXT does not work */ _kse_critical_enter(); curthread->tcb->tcb_tmbx.tm_context = *ret_uc; _thread_switch(curthread->kse->k_kcb, curthread->tcb, 1); /* THR_SETCONTEXT */ } } /* * Jump to stack set by sigaltstack before invoking signal handler */ static void handle_signal_altstack(struct pthread *curthread, struct sighandle_info *shi) { volatile int once; ucontext_t uc1, *uc2; THR_ASSERT(_kse_in_critical(), "Not in critical"); once = 0; THR_GETCONTEXT(&uc1); if (once == 0) { once = 1; /* XXX * We are still in critical region, it is safe to operate thread * context */ uc2 = &curthread->tcb->tcb_tmbx.tm_context; uc2->uc_stack = curthread->sigstk; makecontext(uc2, (void (*)(void))handle_signal_wrapper, 3, curthread, &uc1, shi); if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) setcontext(uc2); else { _thread_switch(curthread->kse->k_kcb, curthread->tcb, 1); /* THR_SETCONTEXT(uc2); */ } } } int _thr_getprocsig(int sig, siginfo_t *siginfo) { kse_critical_t crit; struct kse *curkse; int ret; DBG_MSG(">>> _thr_getprocsig\n"); crit = _kse_critical_enter(); curkse = _get_curkse(); KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock); ret = _thr_getprocsig_unlocked(sig, siginfo); KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); _kse_critical_leave(crit); DBG_MSG("<<< _thr_getprocsig\n"); return (ret); } int _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo) { sigset_t sigset; struct timespec ts; /* try to retrieve signal from kernel */ SIGEMPTYSET(sigset); SIGADDSET(sigset, sig); ts.tv_sec = 0; ts.tv_nsec = 0; SIGDELSET(_thr_proc_sigpending, sig); if (__sys_sigtimedwait(&sigset, siginfo, &ts) > 0) return (sig); return (0); } #ifndef SYSTEM_SCOPE_ONLY /* * Find a thread that can handle the signal. This must be called * with upcalls disabled. */ struct pthread * thr_sig_find(struct kse *curkse, int sig, siginfo_t *info) { struct kse_mailbox *kmbx = NULL; struct pthread *pthread; struct pthread *suspended_thread, *signaled_thread; __siginfohandler_t *sigfunc; siginfo_t si; DBG_MSG("Looking for thread to handle signal %d\n", sig); /* * Enter a loop to look for threads that have the signal * unmasked. POSIX specifies that a thread in a sigwait * will get the signal over any other threads. Second * preference will be threads in in a sigsuspend. Third * preference will be the current thread. If none of the * above, then the signal is delivered to the first thread * that is found. Note that if a custom handler is not * installed, the signal only affects threads in sigwait. */ suspended_thread = NULL; signaled_thread = NULL; KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); TAILQ_FOREACH(pthread, &_thread_list, tle) { if (pthread == _thr_sig_daemon) continue; /* Signal delivering to bound thread is done by kernel */ if (pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) continue; /* Take the scheduling lock. */ KSE_SCHED_LOCK(curkse, pthread->kseg); if ((pthread->state == PS_DEAD) || (pthread->state == PS_DEADLOCK) || THR_IS_EXITING(pthread) || THR_IS_SUSPENDED(pthread)) { ; /* Skip this thread. */ } else if (pthread->state == PS_SIGWAIT && SIGISMEMBER(*(pthread->data.sigwait->waitset), sig)) { /* * retrieve signal from kernel, if it is job control * signal, and sigaction is SIG_DFL, then we will * be stopped in kernel, we hold lock here, but that * does not matter, because that's job control, and * whole process should be stopped. */ if (_thr_getprocsig(sig, &si)) { DBG_MSG("Waking thread %p in sigwait" " with signal %d\n", pthread, sig); /* where to put siginfo ? */ *(pthread->data.sigwait->siginfo) = si; kmbx = _thr_setrunnable_unlocked(pthread); } KSE_SCHED_UNLOCK(curkse, pthread->kseg); /* * POSIX doesn't doesn't specify which thread * will get the signal if there are multiple * waiters, so we give it to the first thread * we find. * * Do not attempt to deliver this signal * to other threads and do not add the signal * to the process pending set. */ KSE_LOCK_RELEASE(curkse, &_thread_list_lock); if (kmbx != NULL) kse_wakeup(kmbx); if (suspended_thread != NULL) _thr_ref_delete(NULL, suspended_thread); if (signaled_thread != NULL) _thr_ref_delete(NULL, signaled_thread); return (NULL); } else if (!SIGISMEMBER(pthread->sigmask, sig)) { /* * If debugger is running, we don't quick exit, * and give it a chance to check the signal. */ if (_libkse_debug == 0) { sigfunc = _thread_sigact[sig - 1].sa_sigaction; if ((__sighandler_t *)sigfunc == SIG_DFL) { if (sigprop(sig) & SA_KILL) { kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig); /* Never reach */ } } } if (pthread->state == PS_SIGSUSPEND) { if (suspended_thread == NULL) { suspended_thread = pthread; suspended_thread->refcount++; } } else if (signaled_thread == NULL) { signaled_thread = pthread; signaled_thread->refcount++; } } KSE_SCHED_UNLOCK(curkse, pthread->kseg); } KSE_LOCK_RELEASE(curkse, &_thread_list_lock); if (suspended_thread != NULL) { pthread = suspended_thread; if (signaled_thread) _thr_ref_delete(NULL, signaled_thread); } else if (signaled_thread) { pthread = signaled_thread; } else { pthread = NULL; } return (pthread); } #endif /* ! SYSTEM_SCOPE_ONLY */ static inline void build_siginfo(siginfo_t *info, int signo) { bzero(info, sizeof(*info)); info->si_signo = signo; info->si_pid = _thr_pid; } /* * This is called by a thread when it has pending signals to deliver. * It should only be called from the context of the thread. */ void _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp) { struct pthread_sigframe psf; siginfo_t siginfo; int i, err_save; kse_critical_t crit; struct kse *curkse; sigset_t sigmask; err_save = errno; DBG_MSG(">>> thr_sig_rundown (%p)\n", curthread); /* Check the threads previous state: */ curthread->critical_count++; if (curthread->sigbackout != NULL) curthread->sigbackout((void *)curthread); curthread->critical_count--; THR_ASSERT(!(curthread->sigbackout), "sigbackout was not cleared."); THR_ASSERT((curthread->state == PS_RUNNING), "state is not PS_RUNNING"); thr_sigframe_save(curthread, &psf); /* * Lower the priority before calling the handler in case * it never returns (longjmps back): */ crit = _kse_critical_enter(); curkse = curthread->kse; KSE_SCHED_LOCK(curkse, curkse->k_kseg); KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock); curthread->active_priority &= ~THR_SIGNAL_PRIORITY; SIGFILLSET(sigmask); while (1) { /* * For bound thread, we mask all signals and get a fresh * copy of signal mask from kernel */ if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) { __sys_sigprocmask(SIG_SETMASK, &sigmask, &curthread->sigmask); } for (i = 1; i <= _SIG_MAXSIG; i++) { if (SIGISMEMBER(curthread->sigmask, i)) continue; if (SIGISMEMBER(curthread->sigpend, i)) { SIGDELSET(curthread->sigpend, i); siginfo = curthread->siginfo[i-1]; break; } if (!(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) && SIGISMEMBER(_thr_proc_sigpending, i)) { if (_thr_getprocsig_unlocked(i, &siginfo)) break; } } if (i <= _SIG_MAXSIG) thr_sig_invoke_handler(curthread, i, &siginfo, ucp); else { if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) { __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL); } break; } } /* Don't trust after signal handling */ curkse = curthread->kse; KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); _kse_critical_leave(&curthread->tcb->tcb_tmbx); /* repost masked signal to kernel, it hardly happens in real world */ if ((curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) && !SIGISEMPTY(curthread->sigpend)) { /* dirty read */ __sys_sigprocmask(SIG_SETMASK, &sigmask, &curthread->sigmask); for (i = 1; i <= _SIG_MAXSIG; ++i) { if (SIGISMEMBER(curthread->sigpend, i)) { SIGDELSET(curthread->sigpend, i); if (!_kse_isthreaded()) kill(getpid(), i); else kse_thr_interrupt( &curthread->tcb->tcb_tmbx, KSE_INTR_SENDSIG, i); } } __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL); } DBG_MSG("<<< thr_sig_rundown (%p)\n", curthread); thr_sigframe_restore(curthread, &psf); errno = err_save; } /* * This checks pending signals for the current thread. It should be * called whenever a thread changes its signal mask. Note that this * is called from a thread (using its stack). * * XXX - We might want to just check to see if there are pending * signals for the thread here, but enter the UTS scheduler * to actually install the signal handler(s). */ void _thr_sig_check_pending(struct pthread *curthread) { ucontext_t uc; volatile int once; int errsave; /* * If the thread is in critical region, delay processing signals. * If the thread state is not PS_RUNNING, it might be switching * into UTS and but a THR_LOCK_RELEASE saw check_pending, and it * goes here, in the case we delay processing signals, lets UTS * process complicated things, normally UTS will call _thr_sig_add * to resume the thread, so we needn't repeat doing it here. */ if (THR_IN_CRITICAL(curthread) || curthread->state != PS_RUNNING) return; errsave = errno; once = 0; THR_GETCONTEXT(&uc); if (once == 0) { once = 1; curthread->check_pending = 0; _thr_sig_rundown(curthread, &uc); } errno = errsave; } /* * Perform thread specific actions in response to a signal. * This function is only called if there is a handler installed * for the signal, and if the target thread has the signal * unmasked. * * This must be called with the thread's scheduling lock held. */ struct kse_mailbox * _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info) { siginfo_t siginfo; struct kse *curkse; struct kse_mailbox *kmbx = NULL; struct pthread *curthread = _get_curthread(); int restart; int suppress_handler = 0; int fromproc = 0; __sighandler_t *sigfunc; DBG_MSG(">>> _thr_sig_add %p (%d)\n", pthread, sig); curkse = _get_curkse(); restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; sigfunc = _thread_sigact[sig - 1].sa_handler; fromproc = (curthread == _thr_sig_daemon); if (pthread->state == PS_DEAD || pthread->state == PS_DEADLOCK || pthread->state == PS_STATE_MAX) return (NULL); /* return false */ if ((pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) && (curthread != pthread)) { PANIC("Please use _thr_send_sig for bound thread"); return (NULL); } if (pthread->state != PS_SIGWAIT && SIGISMEMBER(pthread->sigmask, sig)) { /* signal is masked, just add signal to thread. */ if (!fromproc) { SIGADDSET(pthread->sigpend, sig); if (info == NULL) build_siginfo(&pthread->siginfo[sig-1], sig); else if (info != &pthread->siginfo[sig-1]) memcpy(&pthread->siginfo[sig-1], info, sizeof(*info)); } else { if (!_thr_getprocsig(sig, &pthread->siginfo[sig-1])) return (NULL); SIGADDSET(pthread->sigpend, sig); } } else { /* if process signal not exists, just return */ if (fromproc) { if (!_thr_getprocsig(sig, &siginfo)) return (NULL); info = &siginfo; } if (pthread->state != PS_SIGWAIT && sigfunc == SIG_DFL && (sigprop(sig) & SA_KILL)) { kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig); /* Never reach */ } /* * Process according to thread state: */ switch (pthread->state) { case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: return (NULL); /* XXX return false */ case PS_LOCKWAIT: case PS_SUSPENDED: /* * You can't call a signal handler for threads in these * states. */ suppress_handler = 1; break; case PS_RUNNING: if ((pthread->flags & THR_FLAGS_IN_RUNQ)) { THR_RUNQ_REMOVE(pthread); pthread->active_priority |= THR_SIGNAL_PRIORITY; THR_RUNQ_INSERT_TAIL(pthread); } else { /* Possible not in RUNQ and has curframe ? */ pthread->active_priority |= THR_SIGNAL_PRIORITY; } break; /* * States which cannot be interrupted but still require the * signal handler to run: */ case PS_COND_WAIT: case PS_MUTEX_WAIT: break; case PS_SLEEP_WAIT: /* * Unmasked signals always cause sleep to terminate * early regardless of SA_RESTART: */ pthread->interrupted = 1; break; case PS_JOIN: break; case PS_SIGSUSPEND: pthread->interrupted = 1; break; case PS_SIGWAIT: if (info == NULL) build_siginfo(&pthread->siginfo[sig-1], sig); else if (info != &pthread->siginfo[sig-1]) memcpy(&pthread->siginfo[sig-1], info, sizeof(*info)); /* * The signal handler is not called for threads in * SIGWAIT. */ suppress_handler = 1; /* Wake up the thread if the signal is not blocked. */ if (SIGISMEMBER(*(pthread->data.sigwait->waitset), sig)) { /* Return the signal number: */ *(pthread->data.sigwait->siginfo) = pthread->siginfo[sig-1]; /* Make the thread runnable: */ kmbx = _thr_setrunnable_unlocked(pthread); } else { /* Increment the pending signal count. */ SIGADDSET(pthread->sigpend, sig); if (!SIGISMEMBER(pthread->sigmask, sig)) { if (sigfunc == SIG_DFL && sigprop(sig) & SA_KILL) { kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig); /* Never reach */ } pthread->check_pending = 1; pthread->interrupted = 1; kmbx = _thr_setrunnable_unlocked(pthread); } } return (kmbx); } SIGADDSET(pthread->sigpend, sig); if (info == NULL) build_siginfo(&pthread->siginfo[sig-1], sig); else if (info != &pthread->siginfo[sig-1]) memcpy(&pthread->siginfo[sig-1], info, sizeof(*info)); pthread->check_pending = 1; if (!(pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) && (pthread->blocked != 0) && !THR_IN_CRITICAL(pthread)) kse_thr_interrupt(&pthread->tcb->tcb_tmbx, restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0); if (suppress_handler == 0) { /* * Setup a signal frame and save the current threads * state: */ if (pthread->state != PS_RUNNING) { if (pthread->flags & THR_FLAGS_IN_RUNQ) THR_RUNQ_REMOVE(pthread); pthread->active_priority |= THR_SIGNAL_PRIORITY; kmbx = _thr_setrunnable_unlocked(pthread); } } } return (kmbx); } /* * Send a signal to a specific thread (ala pthread_kill): */ void _thr_sig_send(struct pthread *pthread, int sig) { struct pthread *curthread = _get_curthread(); struct kse_mailbox *kmbx; if (pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) { kse_thr_interrupt(&pthread->tcb->tcb_tmbx, KSE_INTR_SENDSIG, sig); return; } /* Lock the scheduling queue of the target thread. */ THR_SCHED_LOCK(curthread, pthread); if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) { kmbx = _thr_sig_add(pthread, sig, NULL); /* Add a preemption point. */ if (kmbx == NULL && (curthread->kseg == pthread->kseg) && (pthread->active_priority > curthread->active_priority)) curthread->critical_yield = 1; THR_SCHED_UNLOCK(curthread, pthread); if (kmbx != NULL) kse_wakeup(kmbx); /* XXX * If thread sent signal to itself, check signals now. * It is not really needed, _kse_critical_leave should * have already checked signals. */ if (pthread == curthread && curthread->check_pending) _thr_sig_check_pending(curthread); } else { THR_SCHED_UNLOCK(curthread, pthread); } } static inline void thr_sigframe_restore(struct pthread *curthread, struct pthread_sigframe *psf) { kse_critical_t crit; struct kse *curkse; THR_THREAD_LOCK(curthread, curthread); curthread->cancelflags = psf->psf_cancelflags; crit = _kse_critical_enter(); curkse = curthread->kse; KSE_SCHED_LOCK(curkse, curthread->kseg); curthread->flags = psf->psf_flags; curthread->interrupted = psf->psf_interrupted; curthread->timeout = psf->psf_timeout; curthread->data = psf->psf_wait_data; curthread->wakeup_time = psf->psf_wakeup_time; curthread->continuation = psf->psf_continuation; KSE_SCHED_UNLOCK(curkse, curthread->kseg); _kse_critical_leave(crit); THR_THREAD_UNLOCK(curthread, curthread); } static inline void thr_sigframe_save(struct pthread *curthread, struct pthread_sigframe *psf) { kse_critical_t crit; struct kse *curkse; THR_THREAD_LOCK(curthread, curthread); psf->psf_cancelflags = curthread->cancelflags; crit = _kse_critical_enter(); curkse = curthread->kse; KSE_SCHED_LOCK(curkse, curthread->kseg); /* This has to initialize all members of the sigframe. */ psf->psf_flags = (curthread->flags & (THR_FLAGS_PRIVATE | THR_FLAGS_EXITING)); psf->psf_interrupted = curthread->interrupted; psf->psf_timeout = curthread->timeout; psf->psf_wait_data = curthread->data; psf->psf_wakeup_time = curthread->wakeup_time; psf->psf_continuation = curthread->continuation; KSE_SCHED_UNLOCK(curkse, curthread->kseg); _kse_critical_leave(crit); THR_THREAD_UNLOCK(curthread, curthread); } void _thr_signal_init(void) { struct sigaction act; __siginfohandler_t *sigfunc; int i; sigset_t sigset; SIGFILLSET(sigset); __sys_sigprocmask(SIG_SETMASK, &sigset, &_thr_initial->sigmask); /* Enter a loop to get the existing signal status: */ for (i = 1; i <= _SIG_MAXSIG; i++) { /* Get the signal handler details: */ if (__sys_sigaction(i, NULL, &_thread_sigact[i - 1]) != 0) { /* * Abort this process if signal * initialisation fails: */ PANIC("Cannot read signal handler info"); } /* Intall wrapper if handler was set */ sigfunc = _thread_sigact[i - 1].sa_sigaction; if (((__sighandler_t *)sigfunc) != SIG_DFL && ((__sighandler_t *)sigfunc) != SIG_IGN) { act = _thread_sigact[i - 1]; act.sa_flags |= SA_SIGINFO; act.sa_sigaction = (__siginfohandler_t *)_thr_sig_handler; __sys_sigaction(i, &act, NULL); } } - /* - * Install the signal handler for SIGINFO. It isn't - * really needed, but it is nice to have for debugging - * purposes. - */ - _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO | SA_RESTART; - SIGEMPTYSET(act.sa_mask); - act.sa_flags = SA_SIGINFO | SA_RESTART; - act.sa_sigaction = (__siginfohandler_t *)&_thr_sig_handler; - if (__sys_sigaction(SIGINFO, &act, NULL) != 0) { - __sys_sigprocmask(SIG_SETMASK, &_thr_initial->sigmask, NULL); + if (_thr_dump_enabled()) { /* - * Abort this process if signal initialisation fails: + * Install the signal handler for SIGINFO. It isn't + * really needed, but it is nice to have for debugging + * purposes. */ - PANIC("Cannot initialize signal handler"); + _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO | SA_RESTART; + SIGEMPTYSET(act.sa_mask); + act.sa_flags = SA_SIGINFO | SA_RESTART; + act.sa_sigaction = (__siginfohandler_t *)&_thr_sig_handler; + if (__sys_sigaction(SIGINFO, &act, NULL) != 0) { + __sys_sigprocmask(SIG_SETMASK, &_thr_initial->sigmask, + NULL); + /* + * Abort this process if signal initialisation fails: + */ + PANIC("Cannot initialize signal handler"); + } } __sys_sigprocmask(SIG_SETMASK, &_thr_initial->sigmask, NULL); __sys_sigaltstack(NULL, &_thr_initial->sigstk); } void _thr_signal_deinit(void) { int i; struct pthread *curthread = _get_curthread(); /* Clear process pending signals. */ sigemptyset(&_thr_proc_sigpending); /* Enter a loop to get the existing signal status: */ for (i = 1; i <= _SIG_MAXSIG; i++) { /* Check for signals which cannot be trapped: */ if (i == SIGKILL || i == SIGSTOP) { } /* Set the signal handler details: */ else if (__sys_sigaction(i, &_thread_sigact[i - 1], NULL) != 0) { /* * Abort this process if signal * initialisation fails: */ PANIC("Cannot set signal handler info"); } } __sys_sigaltstack(&curthread->sigstk, NULL); }