diff --git a/lib/libthr/thread/thr_cancel.c b/lib/libthr/thread/thr_cancel.c index 943d53c39e92..e408e03dbcd4 100644 --- a/lib/libthr/thread/thr_cancel.c +++ b/lib/libthr/thread/thr_cancel.c @@ -1,186 +1,167 @@ /* * Copyright (c) 2005, David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #include "namespace.h" #include #include "un-namespace.h" #include "thr_private.h" __weak_reference(_pthread_cancel, pthread_cancel); __weak_reference(_pthread_setcancelstate, pthread_setcancelstate); __weak_reference(_pthread_setcanceltype, pthread_setcanceltype); __weak_reference(_pthread_testcancel, pthread_testcancel); static inline void testcancel(struct pthread *curthread) { if (__predict_false(SHOULD_CANCEL(curthread) && !THR_IN_CRITICAL(curthread))) _pthread_exit(PTHREAD_CANCELED); } void _thr_testcancel(struct pthread *curthread) { testcancel(curthread); } int _pthread_cancel(pthread_t pthread) { struct pthread *curthread = _get_curthread(); int ret; /* * POSIX says _pthread_cancel should be async cancellation safe. * _thr_ref_add and _thr_ref_delete will enter and leave critical * region automatically. */ if ((ret = _thr_ref_add(curthread, pthread, 0)) == 0) { THR_THREAD_LOCK(curthread, pthread); if (!pthread->cancel_pending) { pthread->cancel_pending = 1; if (pthread->cancel_enable) _thr_send_sig(pthread, SIGCANCEL); } THR_THREAD_UNLOCK(curthread, pthread); _thr_ref_delete(curthread, pthread); } return (ret); } int _pthread_setcancelstate(int state, int *oldstate) { struct pthread *curthread = _get_curthread(); int oldval; oldval = curthread->cancel_enable; switch (state) { case PTHREAD_CANCEL_DISABLE: - THR_LOCK(curthread); curthread->cancel_enable = 0; - THR_UNLOCK(curthread); break; case PTHREAD_CANCEL_ENABLE: - THR_LOCK(curthread); curthread->cancel_enable = 1; - THR_UNLOCK(curthread); + testcancel(curthread); break; default: return (EINVAL); } if (oldstate) { *oldstate = oldval ? PTHREAD_CANCEL_ENABLE : PTHREAD_CANCEL_DISABLE; } return (0); } int _pthread_setcanceltype(int type, int *oldtype) { struct pthread *curthread = _get_curthread(); int oldval; oldval = curthread->cancel_async; switch (type) { case PTHREAD_CANCEL_ASYNCHRONOUS: curthread->cancel_async = 1; testcancel(curthread); break; case PTHREAD_CANCEL_DEFERRED: curthread->cancel_async = 0; break; default: return (EINVAL); } if (oldtype) { *oldtype = oldval ? PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED; } return (0); } void _pthread_testcancel(void) { struct pthread *curthread = _get_curthread(); - _thr_cancel_enter(curthread); - _thr_cancel_leave(curthread); + curthread->cancel_point = 1; + testcancel(curthread); + curthread->cancel_point = 0; } void _thr_cancel_enter(struct pthread *curthread) { - curthread->cancel_point++; - if (curthread->cancel_enable) - testcancel(curthread); + curthread->cancel_point = 1; + testcancel(curthread); } void -_thr_cancel_enter_defer(struct pthread *curthread, int maycancel) +_thr_cancel_enter2(struct pthread *curthread, int maycancel) { - curthread->cancel_defer++; - curthread->cancel_point++; + curthread->cancel_point = 1; if (__predict_false(SHOULD_CANCEL(curthread) && !THR_IN_CRITICAL(curthread))) { if (!maycancel) thr_wake(curthread->tid); else _pthread_exit(PTHREAD_CANCELED); } } void -_thr_cancel_leave(struct pthread *curthread) -{ - curthread->cancel_point--; -} - -void -_thr_cancel_leave2(struct pthread *curthread, int maycancel) -{ - if (curthread->cancel_enable && maycancel) - testcancel(curthread); - curthread->cancel_point--; -} - -void -_thr_cancel_leave_defer(struct pthread *curthread, int maycancel) +_thr_cancel_leave(struct pthread *curthread, int maycancel) { - if (curthread->cancel_enable && maycancel) + if (maycancel) testcancel(curthread); - curthread->cancel_point--; - curthread->cancel_defer--; + curthread->cancel_point = 0; } diff --git a/lib/libthr/thread/thr_cond.c b/lib/libthr/thread/thr_cond.c index 95970d9d0fcd..07f1b8b952cc 100644 --- a/lib/libthr/thread/thr_cond.c +++ b/lib/libthr/thread/thr_cond.c @@ -1,302 +1,302 @@ /* * Copyright (c) 2005 David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include "namespace.h" #include #include #include #include #include #include "un-namespace.h" #include "thr_private.h" /* * Prototypes */ int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec * abstime); static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int cancel); static int cond_signal_common(pthread_cond_t *cond, int broadcast); /* * Double underscore versions are cancellation points. Single underscore * versions are not and are provided for libc internal usage (which * shouldn't introduce cancellation points). */ __weak_reference(__pthread_cond_wait, pthread_cond_wait); __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait); __weak_reference(_pthread_cond_init, pthread_cond_init); __weak_reference(_pthread_cond_destroy, pthread_cond_destroy); __weak_reference(_pthread_cond_signal, pthread_cond_signal); __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { pthread_cond_t pcond; int rval = 0; if ((pcond = (pthread_cond_t) calloc(1, sizeof(struct pthread_cond))) == NULL) { rval = ENOMEM; } else { /* * Initialise the condition variable structure: */ if (cond_attr == NULL || *cond_attr == NULL) { pcond->c_pshared = 0; pcond->c_clockid = CLOCK_REALTIME; } else { pcond->c_pshared = (*cond_attr)->c_pshared; pcond->c_clockid = (*cond_attr)->c_clockid; } _thr_umutex_init(&pcond->c_lock); *cond = pcond; } /* Return the completion status: */ return (rval); } static int init_static(struct pthread *thread, pthread_cond_t *cond) { int ret; THR_LOCK_ACQUIRE(thread, &_cond_static_lock); if (*cond == NULL) ret = cond_init(cond, NULL); else ret = 0; THR_LOCK_RELEASE(thread, &_cond_static_lock); return (ret); } int _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { *cond = NULL; return (cond_init(cond, cond_attr)); } int _pthread_cond_destroy(pthread_cond_t *cond) { struct pthread *curthread = _get_curthread(); struct pthread_cond *cv; int rval = 0; if (*cond == NULL) rval = EINVAL; else { cv = *cond; THR_UMUTEX_LOCK(curthread, &cv->c_lock); /* * NULL the caller's pointer now that the condition * variable has been destroyed: */ *cond = NULL; THR_UMUTEX_UNLOCK(curthread, &cv->c_lock); /* * Free the memory allocated for the condition * variable structure: */ free(cv); } /* Return the completion status: */ return (rval); } struct cond_cancel_info { pthread_mutex_t *mutex; pthread_cond_t *cond; int count; }; static void cond_cancel_handler(void *arg) { struct pthread *curthread = _get_curthread(); struct cond_cancel_info *info = (struct cond_cancel_info *)arg; pthread_cond_t cv; if (info->cond != NULL) { cv = *(info->cond); THR_UMUTEX_UNLOCK(curthread, &cv->c_lock); } _mutex_cv_lock(info->mutex, info->count); } /* * Cancellation behaivor: * Thread may be canceled at start, if thread is canceled, it means it * did not get a wakeup from pthread_cond_signal(), otherwise, it is * not canceled. * Thread cancellation never cause wakeup from pthread_cond_signal() * to be lost. */ static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int cancel) { struct pthread *curthread = _get_curthread(); struct timespec ts, ts2, *tsp; struct cond_cancel_info info; pthread_cond_t cv; int ret = 0; /* * If the condition variable is statically initialized, * perform the dynamic initialization: */ if (__predict_false(*cond == NULL && (ret = init_static(curthread, cond)) != 0)) return (ret); _thr_testcancel(curthread); cv = *cond; THR_UMUTEX_LOCK(curthread, &cv->c_lock); ret = _mutex_cv_unlock(mutex, &info.count); if (ret) { THR_UMUTEX_UNLOCK(curthread, &cv->c_lock); return (ret); } info.mutex = mutex; info.cond = cond; if (abstime != NULL) { clock_gettime(cv->c_clockid, &ts); TIMESPEC_SUB(&ts2, abstime, &ts); tsp = &ts2; } else tsp = NULL; if (cancel) { THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &info); - _thr_cancel_enter_defer(curthread, 0); + _thr_cancel_enter2(curthread, 0); ret = _thr_ucond_wait(&cv->c_kerncv, &cv->c_lock, tsp, 1); info.cond = NULL; - _thr_cancel_leave_defer(curthread, (ret != 0)); + _thr_cancel_leave(curthread, (ret != 0)); THR_CLEANUP_POP(curthread, 0); } else { ret = _thr_ucond_wait(&cv->c_kerncv, &cv->c_lock, tsp, 0); } if (ret == EINTR) ret = 0; _mutex_cv_lock(mutex, info.count); return (ret); } int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { return (cond_wait_common(cond, mutex, NULL, 0)); } int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { return (cond_wait_common(cond, mutex, NULL, 1)); } int _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, const struct timespec * abstime) { if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return (EINVAL); return (cond_wait_common(cond, mutex, abstime, 0)); } int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime) { if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return (EINVAL); return (cond_wait_common(cond, mutex, abstime, 1)); } static int cond_signal_common(pthread_cond_t *cond, int broadcast) { struct pthread *curthread = _get_curthread(); pthread_cond_t cv; int ret = 0; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ if (__predict_false(*cond == NULL && (ret = init_static(curthread, cond)) != 0)) return (ret); cv = *cond; THR_UMUTEX_LOCK(curthread, &cv->c_lock); if (!broadcast) ret = _thr_ucond_signal(&cv->c_kerncv); else ret = _thr_ucond_broadcast(&cv->c_kerncv); THR_UMUTEX_UNLOCK(curthread, &cv->c_lock); return (ret); } int _pthread_cond_signal(pthread_cond_t * cond) { return (cond_signal_common(cond, 0)); } int _pthread_cond_broadcast(pthread_cond_t * cond) { return (cond_signal_common(cond, 1)); } diff --git a/lib/libthr/thread/thr_create.c b/lib/libthr/thread/thr_create.c index f73a6c9231b3..2677571b424e 100644 --- a/lib/libthr/thread/thr_create.c +++ b/lib/libthr/thread/thr_create.c @@ -1,292 +1,279 @@ /* * Copyright (c) 2003 Daniel M. Eischen * Copyright (c) 2005, David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include "namespace.h" #include #include #include #include #include #include #include #include #include #include "un-namespace.h" #include "thr_private.h" static int create_stack(struct pthread_attr *pattr); static void thread_start(struct pthread *curthread); __weak_reference(_pthread_create, pthread_create); int _pthread_create(pthread_t * thread, const pthread_attr_t * attr, void *(*start_routine) (void *), void *arg) { struct pthread *curthread, *new_thread; struct thr_param param; struct sched_param sched_param; struct rtprio rtp; int ret = 0, locked, create_suspended; sigset_t set, oset; cpuset_t *cpusetp = NULL; int cpusetsize = 0; _thr_check_init(); /* * Tell libc and others now they need lock to protect their data. */ if (_thr_isthreaded() == 0 && _thr_setthreaded(1)) return (EAGAIN); curthread = _get_curthread(); if ((new_thread = _thr_alloc(curthread)) == NULL) return (EAGAIN); memset(¶m, 0, sizeof(param)); if (attr == NULL || *attr == NULL) /* Use the default thread attributes: */ new_thread->attr = _pthread_attr_default; else { new_thread->attr = *(*attr); cpusetp = new_thread->attr.cpuset; cpusetsize = new_thread->attr.cpusetsize; new_thread->attr.cpuset = NULL; new_thread->attr.cpusetsize = 0; } if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) { /* inherit scheduling contention scope */ if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM; else new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM; new_thread->attr.prio = curthread->attr.prio; new_thread->attr.sched_policy = curthread->attr.sched_policy; } new_thread->tid = TID_TERMINATED; if (create_stack(&new_thread->attr) != 0) { /* Insufficient memory to create a stack: */ _thr_free(curthread, new_thread); return (EAGAIN); } /* * Write a magic value to the thread structure * to help identify valid ones: */ new_thread->magic = THR_MAGIC; new_thread->start_routine = start_routine; new_thread->arg = arg; new_thread->cancel_enable = 1; new_thread->cancel_async = 0; /* Initialize the mutex queue: */ TAILQ_INIT(&new_thread->mutexq); TAILQ_INIT(&new_thread->pp_mutexq); /* Initialise hooks in the thread structure: */ if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) { new_thread->flags = THR_FLAGS_NEED_SUSPEND; create_suspended = 1; } else { create_suspended = 0; } new_thread->state = PS_RUNNING; if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED) new_thread->tlflags |= TLFLAGS_DETACHED; - if (curthread->in_sigcancel_handler) - new_thread->unblock_sigcancel = 1; - else - new_thread->unblock_sigcancel = 0; - /* Add the new thread. */ new_thread->refcount = 1; _thr_link(curthread, new_thread); /* Return thread pointer eariler so that new thread can use it. */ (*thread) = new_thread; if (SHOULD_REPORT_EVENT(curthread, TD_CREATE) || cpusetp != NULL) { THR_THREAD_LOCK(curthread, new_thread); locked = 1; } else locked = 0; param.start_func = (void (*)(void *)) thread_start; param.arg = new_thread; param.stack_base = new_thread->attr.stackaddr_attr; param.stack_size = new_thread->attr.stacksize_attr; param.tls_base = (char *)new_thread->tcb; param.tls_size = sizeof(struct tcb); param.child_tid = &new_thread->tid; param.parent_tid = &new_thread->tid; param.flags = 0; if (new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) param.flags |= THR_SYSTEM_SCOPE; if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) param.rtp = NULL; else { sched_param.sched_priority = new_thread->attr.prio; _schedparam_to_rtp(new_thread->attr.sched_policy, &sched_param, &rtp); param.rtp = &rtp; } /* Schedule the new thread. */ if (create_suspended) { SIGFILLSET(set); SIGDELSET(set, SIGTRAP); __sys_sigprocmask(SIG_SETMASK, &set, &oset); new_thread->sigmask = oset; SIGDELSET(new_thread->sigmask, SIGCANCEL); } ret = thr_new(¶m, sizeof(param)); if (ret != 0) { ret = errno; /* * Translate EPROCLIM into well-known POSIX code EAGAIN. */ if (ret == EPROCLIM) ret = EAGAIN; } if (create_suspended) __sys_sigprocmask(SIG_SETMASK, &oset, NULL); if (ret != 0) { if (!locked) THR_THREAD_LOCK(curthread, new_thread); new_thread->state = PS_DEAD; new_thread->tid = TID_TERMINATED; if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) { new_thread->cycle++; _thr_umtx_wake(&new_thread->cycle, INT_MAX, 0); } THR_THREAD_UNLOCK(curthread, new_thread); THREAD_LIST_LOCK(curthread); _thread_active_threads--; new_thread->tlflags |= TLFLAGS_DETACHED; _thr_ref_delete_unlocked(curthread, new_thread); THREAD_LIST_UNLOCK(curthread); } else if (locked) { if (cpusetp != NULL) { if (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, TID(new_thread), cpusetsize, cpusetp)) { ret = errno; /* kill the new thread */ new_thread->force_exit = 1; THR_THREAD_UNLOCK(curthread, new_thread); goto out; } } _thr_report_creation(curthread, new_thread); THR_THREAD_UNLOCK(curthread, new_thread); out: if (ret) { THREAD_LIST_LOCK(curthread); new_thread->tlflags |= TLFLAGS_DETACHED; THR_GCLIST_ADD(new_thread); THREAD_LIST_UNLOCK(curthread); } } if (ret) (*thread) = 0; return (ret); } static int create_stack(struct pthread_attr *pattr) { int ret; /* Check if a stack was specified in the thread attributes: */ if ((pattr->stackaddr_attr) != NULL) { pattr->guardsize_attr = 0; pattr->flags |= THR_STACK_USER; ret = 0; } else ret = _thr_stack_alloc(pattr); return (ret); } static void thread_start(struct pthread *curthread) { sigset_t set; if (curthread->attr.suspend == THR_CREATE_SUSPENDED) set = curthread->sigmask; /* * This is used as a serialization point to allow parent * to report 'new thread' event to debugger or tweak new thread's * attributes before the new thread does real-world work. */ THR_LOCK(curthread); THR_UNLOCK(curthread); if (curthread->force_exit) _pthread_exit(PTHREAD_CANCELED); - if (curthread->unblock_sigcancel) { - sigset_t set1; - - SIGEMPTYSET(set1); - SIGADDSET(set1, SIGCANCEL); - __sys_sigprocmask(SIG_UNBLOCK, &set1, NULL); - } - if (curthread->attr.suspend == THR_CREATE_SUSPENDED) { #if 0 /* Done in THR_UNLOCK() */ _thr_ast(curthread); #endif /* * Parent thread have stored signal mask for us, * we should restore it now. */ __sys_sigprocmask(SIG_SETMASK, &set, NULL); } /* Run the current thread's start routine with argument: */ _pthread_exit(curthread->start_routine(curthread->arg)); /* This point should never be reached. */ PANIC("Thread has resumed after exit"); } diff --git a/lib/libthr/thread/thr_exit.c b/lib/libthr/thread/thr_exit.c index e6facd9d85ba..10581ab23958 100644 --- a/lib/libthr/thread/thr_exit.c +++ b/lib/libthr/thread/thr_exit.c @@ -1,130 +1,149 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "namespace.h" #include #include #include #include +#include +#include #include "un-namespace.h" #include "libc_private.h" #include "thr_private.h" void _pthread_exit(void *status); __weak_reference(_pthread_exit, pthread_exit); void _thread_exit(const char *fname, int lineno, const char *msg) { /* Write an error message to the standard error file descriptor: */ _thread_printf(2, "Fatal error '%s' at line %d in file %s (errno = %d)\n", msg, lineno, fname, errno); abort(); } void _pthread_exit(void *status) +{ + _pthread_exit_mask(status, NULL); +} + +void +_pthread_exit_mask(void *status, sigset_t *mask) { struct pthread *curthread = _get_curthread(); /* Check if this thread is already in the process of exiting: */ if (curthread->cancelling) { char msg[128]; snprintf(msg, sizeof(msg), "Thread %p has called " "pthread_exit() from a destructor. POSIX 1003.1 " "1996 s16.2.5.2 does not allow this!", curthread); PANIC(msg); } /* Flag this thread as exiting. */ curthread->cancelling = 1; curthread->cancel_enable = 0; curthread->cancel_async = 0; + curthread->cancel_point = 0; + if (mask != NULL) + __sys_sigprocmask(SIG_SETMASK, mask, NULL); + if (curthread->unblock_sigcancel) { + sigset_t set; + + curthread->unblock_sigcancel = 0; + SIGEMPTYSET(set); + SIGADDSET(set, SIGCANCEL); + __sys_sigprocmask(SIG_UNBLOCK, mask, NULL); + } /* Save the return value: */ curthread->ret = status; while (curthread->cleanup != NULL) { _pthread_cleanup_pop(1); } /* Check if there is thread specific data: */ if (curthread->specific != NULL) { /* Run the thread-specific data destructors: */ _thread_cleanupspecific(); } if (!_thr_isthreaded()) exit(0); THREAD_LIST_LOCK(curthread); _thread_active_threads--; if (_thread_active_threads == 0) { THREAD_LIST_UNLOCK(curthread); exit(0); /* Never reach! */ } THREAD_LIST_UNLOCK(curthread); /* Tell malloc that the thread is exiting. */ _malloc_thread_cleanup(); THREAD_LIST_LOCK(curthread); THR_LOCK(curthread); curthread->state = PS_DEAD; if (curthread->flags & THR_FLAGS_NEED_SUSPEND) { curthread->cycle++; _thr_umtx_wake(&curthread->cycle, INT_MAX, 0); } THR_UNLOCK(curthread); /* * Thread was created with initial refcount 1, we drop the * reference count to allow it to be garbage collected. */ curthread->refcount--; if (curthread->tlflags & TLFLAGS_DETACHED) THR_GCLIST_ADD(curthread); THREAD_LIST_UNLOCK(curthread); if (!curthread->force_exit && SHOULD_REPORT_EVENT(curthread, TD_DEATH)) _thr_report_death(curthread); /* * Kernel will do wakeup at the address, so joiner thread * will be resumed if it is sleeping at the address. */ thr_exit(&curthread->tid); PANIC("thr_exit() returned"); /* Never reach! */ } diff --git a/lib/libthr/thread/thr_fork.c b/lib/libthr/thread/thr_fork.c index f20942d42be9..8e1ea6a5e72b 100644 --- a/lib/libthr/thread/thr_fork.c +++ b/lib/libthr/thread/thr_fork.c @@ -1,234 +1,240 @@ /* * Copyright (c) 2005 David Xu * Copyright (c) 2003 Daniel Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "namespace.h" #include #include #include #include #include #include #include #include "un-namespace.h" #include "libc_private.h" #include "rtld_lock.h" #include "thr_private.h" __weak_reference(_pthread_atfork, pthread_atfork); int _pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void)) { struct pthread *curthread; struct pthread_atfork *af; _thr_check_init(); if ((af = malloc(sizeof(struct pthread_atfork))) == NULL) return (ENOMEM); curthread = _get_curthread(); af->prepare = prepare; af->parent = parent; af->child = child; THR_UMUTEX_LOCK(curthread, &_thr_atfork_lock); TAILQ_INSERT_TAIL(&_thr_atfork_list, af, qe); THR_UMUTEX_UNLOCK(curthread, &_thr_atfork_lock); return (0); } void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info) { struct pthread *curthread; struct pthread_atfork *af, *af1; _thr_check_init(); curthread = _get_curthread(); THR_UMUTEX_LOCK(curthread, &_thr_atfork_lock); TAILQ_FOREACH_SAFE(af, &_thr_atfork_list, qe, af1) { if (__elf_phdr_match_addr(phdr_info, af->prepare) || __elf_phdr_match_addr(phdr_info, af->parent) || __elf_phdr_match_addr(phdr_info, af->child)) { TAILQ_REMOVE(&_thr_atfork_list, af, qe); free(af); } } THR_UMUTEX_UNLOCK(curthread, &_thr_atfork_lock); _thr_tsd_unload(phdr_info); + _thr_sigact_unload(phdr_info); } __weak_reference(_fork, fork); pid_t _fork(void); pid_t _fork(void) { struct pthread *curthread; struct pthread_atfork *af; pid_t ret; int errsave; int was_threaded; int rtld_locks[MAX_RTLD_LOCKS]; if (!_thr_is_inited()) return (__sys_fork()); curthread = _get_curthread(); THR_UMUTEX_LOCK(curthread, &_thr_atfork_lock); /* Run down atfork prepare handlers. */ TAILQ_FOREACH_REVERSE(af, &_thr_atfork_list, atfork_head, qe) { if (af->prepare != NULL) af->prepare(); } /* * All bets are off as to what should happen soon if the parent * process was not so kindly as to set up pthread fork hooks to * relinquish all running threads. */ if (_thr_isthreaded() != 0) { was_threaded = 1; _malloc_prefork(); _rtld_atfork_pre(rtld_locks); } else { was_threaded = 0; } /* * Block all signals until we reach a safe point. */ _thr_signal_block(curthread); + _thr_signal_prefork(); /* Fork a new process: */ if ((ret = __sys_fork()) == 0) { /* Child process */ errsave = errno; curthread->cancel_pending = 0; curthread->flags &= ~THR_FLAGS_NEED_SUSPEND; /* * Thread list will be reinitialized, and later we call * _libpthread_init(), it will add us back to list. */ curthread->tlflags &= ~(TLFLAGS_IN_TDLIST | TLFLAGS_DETACHED); /* child is a new kernel thread. */ thr_self(&curthread->tid); /* clear other threads locked us. */ _thr_umutex_init(&curthread->lock); _thr_umutex_init(&_thr_atfork_lock); + _thr_signal_postfork_child(); + if (was_threaded) _rtld_atfork_post(rtld_locks); _thr_setthreaded(0); /* reinitialize libc spinlocks. */ _thr_spinlock_init(); _mutex_fork(curthread); /* reinitalize library. */ _libpthread_init(curthread); /* Ready to continue, unblock signals. */ _thr_signal_unblock(curthread); if (was_threaded) { __isthreaded = 1; _malloc_postfork(); __isthreaded = 0; } /* Run down atfork child handlers. */ TAILQ_FOREACH(af, &_thr_atfork_list, qe) { if (af->child != NULL) af->child(); } } else { /* Parent process */ errsave = errno; + _thr_signal_postfork(); + /* Ready to continue, unblock signals. */ _thr_signal_unblock(curthread); if (was_threaded) { _rtld_atfork_post(rtld_locks); _malloc_postfork(); } /* Run down atfork parent handlers. */ TAILQ_FOREACH(af, &_thr_atfork_list, qe) { if (af->parent != NULL) af->parent(); } THR_UMUTEX_UNLOCK(curthread, &_thr_atfork_lock); } errno = errsave; /* Return the process ID: */ return (ret); } diff --git a/lib/libthr/thread/thr_init.c b/lib/libthr/thread/thr_init.c index 1bfdd287c203..ea567ee9958a 100644 --- a/lib/libthr/thread/thr_init.c +++ b/lib/libthr/thread/thr_init.c @@ -1,469 +1,463 @@ /* * Copyright (c) 2003 Daniel M. Eischen * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "namespace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "un-namespace.h" #include "libc_private.h" #include "thr_private.h" char *_usrstack; struct pthread *_thr_initial; int _libthr_debug; int _thread_event_mask; struct pthread *_thread_last_event; pthreadlist _thread_list = TAILQ_HEAD_INITIALIZER(_thread_list); pthreadlist _thread_gc_list = TAILQ_HEAD_INITIALIZER(_thread_gc_list); int _thread_active_threads = 1; atfork_head _thr_atfork_list = TAILQ_HEAD_INITIALIZER(_thr_atfork_list); struct umutex _thr_atfork_lock = DEFAULT_UMUTEX; struct pthread_prio _thr_priorities[3] = { {RTP_PRIO_MIN, RTP_PRIO_MAX, 0}, /* FIFO */ {0, 0, 63}, /* OTHER */ {RTP_PRIO_MIN, RTP_PRIO_MAX, 0} /* RR */ }; struct pthread_attr _pthread_attr_default = { .sched_policy = SCHED_OTHER, .sched_inherit = PTHREAD_INHERIT_SCHED, .prio = 0, .suspend = THR_CREATE_RUNNING, .flags = PTHREAD_SCOPE_SYSTEM, .stackaddr_attr = NULL, .stacksize_attr = THR_STACK_DEFAULT, .guardsize_attr = 0, .cpusetsize = 0, .cpuset = NULL }; struct pthread_mutex_attr _pthread_mutexattr_default = { .m_type = PTHREAD_MUTEX_DEFAULT, .m_protocol = PTHREAD_PRIO_NONE, .m_ceiling = 0 }; /* Default condition variable attributes: */ struct pthread_cond_attr _pthread_condattr_default = { .c_pshared = PTHREAD_PROCESS_PRIVATE, .c_clockid = CLOCK_REALTIME }; pid_t _thr_pid; int _thr_is_smp = 0; size_t _thr_guard_default; size_t _thr_stack_default = THR_STACK_DEFAULT; size_t _thr_stack_initial = THR_STACK_INITIAL; int _thr_page_size; int _thr_spinloops; int _thr_yieldloops; int _gc_count; struct umutex _mutex_static_lock = DEFAULT_UMUTEX; struct umutex _cond_static_lock = DEFAULT_UMUTEX; struct umutex _rwlock_static_lock = DEFAULT_UMUTEX; struct umutex _keytable_lock = DEFAULT_UMUTEX; struct umutex _thr_list_lock = DEFAULT_UMUTEX; struct umutex _thr_event_lock = DEFAULT_UMUTEX; int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *); int __pthread_mutex_lock(pthread_mutex_t *); int __pthread_mutex_trylock(pthread_mutex_t *); void _thread_init_hack(void) __attribute__ ((constructor)); static void init_private(void); static void init_main_thread(struct pthread *thread); /* * All weak references used within libc should be in this table. * This is so that static libraries will work. */ STATIC_LIB_REQUIRE(_fork); STATIC_LIB_REQUIRE(_pthread_getspecific); STATIC_LIB_REQUIRE(_pthread_key_create); STATIC_LIB_REQUIRE(_pthread_key_delete); STATIC_LIB_REQUIRE(_pthread_mutex_destroy); STATIC_LIB_REQUIRE(_pthread_mutex_init); STATIC_LIB_REQUIRE(_pthread_mutex_lock); STATIC_LIB_REQUIRE(_pthread_mutex_trylock); STATIC_LIB_REQUIRE(_pthread_mutex_unlock); STATIC_LIB_REQUIRE(_pthread_mutexattr_init); STATIC_LIB_REQUIRE(_pthread_mutexattr_destroy); STATIC_LIB_REQUIRE(_pthread_mutexattr_settype); STATIC_LIB_REQUIRE(_pthread_once); STATIC_LIB_REQUIRE(_pthread_setspecific); STATIC_LIB_REQUIRE(_raise); STATIC_LIB_REQUIRE(_sem_destroy); STATIC_LIB_REQUIRE(_sem_getvalue); STATIC_LIB_REQUIRE(_sem_init); STATIC_LIB_REQUIRE(_sem_post); STATIC_LIB_REQUIRE(_sem_timedwait); STATIC_LIB_REQUIRE(_sem_trywait); STATIC_LIB_REQUIRE(_sem_wait); STATIC_LIB_REQUIRE(_sigaction); STATIC_LIB_REQUIRE(_sigprocmask); STATIC_LIB_REQUIRE(_sigsuspend); STATIC_LIB_REQUIRE(_sigtimedwait); STATIC_LIB_REQUIRE(_sigwait); STATIC_LIB_REQUIRE(_sigwaitinfo); STATIC_LIB_REQUIRE(_spinlock); STATIC_LIB_REQUIRE(_spinlock_debug); STATIC_LIB_REQUIRE(_spinunlock); STATIC_LIB_REQUIRE(_thread_init_hack); /* * These are needed when linking statically. All references within * libgcc (and in the future libc) to these routines are weak, but * if they are not (strongly) referenced by the application or other * libraries, then the actual functions will not be loaded. */ STATIC_LIB_REQUIRE(_pthread_once); STATIC_LIB_REQUIRE(_pthread_key_create); STATIC_LIB_REQUIRE(_pthread_key_delete); STATIC_LIB_REQUIRE(_pthread_getspecific); STATIC_LIB_REQUIRE(_pthread_setspecific); STATIC_LIB_REQUIRE(_pthread_mutex_init); STATIC_LIB_REQUIRE(_pthread_mutex_destroy); STATIC_LIB_REQUIRE(_pthread_mutex_lock); STATIC_LIB_REQUIRE(_pthread_mutex_trylock); STATIC_LIB_REQUIRE(_pthread_mutex_unlock); STATIC_LIB_REQUIRE(_pthread_create); /* Pull in all symbols required by libthread_db */ STATIC_LIB_REQUIRE(_thread_state_running); #define DUAL_ENTRY(entry) \ (pthread_func_t)entry, (pthread_func_t)entry static pthread_func_t jmp_table[][2] = { {DUAL_ENTRY(_pthread_atfork)}, /* PJT_ATFORK */ {DUAL_ENTRY(_pthread_attr_destroy)}, /* PJT_ATTR_DESTROY */ {DUAL_ENTRY(_pthread_attr_getdetachstate)}, /* PJT_ATTR_GETDETACHSTATE */ {DUAL_ENTRY(_pthread_attr_getguardsize)}, /* PJT_ATTR_GETGUARDSIZE */ {DUAL_ENTRY(_pthread_attr_getinheritsched)}, /* PJT_ATTR_GETINHERITSCHED */ {DUAL_ENTRY(_pthread_attr_getschedparam)}, /* PJT_ATTR_GETSCHEDPARAM */ {DUAL_ENTRY(_pthread_attr_getschedpolicy)}, /* PJT_ATTR_GETSCHEDPOLICY */ {DUAL_ENTRY(_pthread_attr_getscope)}, /* PJT_ATTR_GETSCOPE */ {DUAL_ENTRY(_pthread_attr_getstackaddr)}, /* PJT_ATTR_GETSTACKADDR */ {DUAL_ENTRY(_pthread_attr_getstacksize)}, /* PJT_ATTR_GETSTACKSIZE */ {DUAL_ENTRY(_pthread_attr_init)}, /* PJT_ATTR_INIT */ {DUAL_ENTRY(_pthread_attr_setdetachstate)}, /* PJT_ATTR_SETDETACHSTATE */ {DUAL_ENTRY(_pthread_attr_setguardsize)}, /* PJT_ATTR_SETGUARDSIZE */ {DUAL_ENTRY(_pthread_attr_setinheritsched)}, /* PJT_ATTR_SETINHERITSCHED */ {DUAL_ENTRY(_pthread_attr_setschedparam)}, /* PJT_ATTR_SETSCHEDPARAM */ {DUAL_ENTRY(_pthread_attr_setschedpolicy)}, /* PJT_ATTR_SETSCHEDPOLICY */ {DUAL_ENTRY(_pthread_attr_setscope)}, /* PJT_ATTR_SETSCOPE */ {DUAL_ENTRY(_pthread_attr_setstackaddr)}, /* PJT_ATTR_SETSTACKADDR */ {DUAL_ENTRY(_pthread_attr_setstacksize)}, /* PJT_ATTR_SETSTACKSIZE */ {DUAL_ENTRY(_pthread_cancel)}, /* PJT_CANCEL */ {DUAL_ENTRY(_pthread_cleanup_pop)}, /* PJT_CLEANUP_POP */ {DUAL_ENTRY(_pthread_cleanup_push)}, /* PJT_CLEANUP_PUSH */ {DUAL_ENTRY(_pthread_cond_broadcast)}, /* PJT_COND_BROADCAST */ {DUAL_ENTRY(_pthread_cond_destroy)}, /* PJT_COND_DESTROY */ {DUAL_ENTRY(_pthread_cond_init)}, /* PJT_COND_INIT */ {DUAL_ENTRY(_pthread_cond_signal)}, /* PJT_COND_SIGNAL */ {DUAL_ENTRY(_pthread_cond_timedwait)}, /* PJT_COND_TIMEDWAIT */ {(pthread_func_t)__pthread_cond_wait, (pthread_func_t)_pthread_cond_wait}, /* PJT_COND_WAIT */ {DUAL_ENTRY(_pthread_detach)}, /* PJT_DETACH */ {DUAL_ENTRY(_pthread_equal)}, /* PJT_EQUAL */ {DUAL_ENTRY(_pthread_exit)}, /* PJT_EXIT */ {DUAL_ENTRY(_pthread_getspecific)}, /* PJT_GETSPECIFIC */ {DUAL_ENTRY(_pthread_join)}, /* PJT_JOIN */ {DUAL_ENTRY(_pthread_key_create)}, /* PJT_KEY_CREATE */ {DUAL_ENTRY(_pthread_key_delete)}, /* PJT_KEY_DELETE*/ {DUAL_ENTRY(_pthread_kill)}, /* PJT_KILL */ {DUAL_ENTRY(_pthread_main_np)}, /* PJT_MAIN_NP */ {DUAL_ENTRY(_pthread_mutexattr_destroy)}, /* PJT_MUTEXATTR_DESTROY */ {DUAL_ENTRY(_pthread_mutexattr_init)}, /* PJT_MUTEXATTR_INIT */ {DUAL_ENTRY(_pthread_mutexattr_settype)}, /* PJT_MUTEXATTR_SETTYPE */ {DUAL_ENTRY(_pthread_mutex_destroy)}, /* PJT_MUTEX_DESTROY */ {DUAL_ENTRY(_pthread_mutex_init)}, /* PJT_MUTEX_INIT */ {(pthread_func_t)__pthread_mutex_lock, (pthread_func_t)_pthread_mutex_lock}, /* PJT_MUTEX_LOCK */ {(pthread_func_t)__pthread_mutex_trylock, (pthread_func_t)_pthread_mutex_trylock},/* PJT_MUTEX_TRYLOCK */ {DUAL_ENTRY(_pthread_mutex_unlock)}, /* PJT_MUTEX_UNLOCK */ {DUAL_ENTRY(_pthread_once)}, /* PJT_ONCE */ {DUAL_ENTRY(_pthread_rwlock_destroy)}, /* PJT_RWLOCK_DESTROY */ {DUAL_ENTRY(_pthread_rwlock_init)}, /* PJT_RWLOCK_INIT */ {DUAL_ENTRY(_pthread_rwlock_rdlock)}, /* PJT_RWLOCK_RDLOCK */ {DUAL_ENTRY(_pthread_rwlock_tryrdlock)},/* PJT_RWLOCK_TRYRDLOCK */ {DUAL_ENTRY(_pthread_rwlock_trywrlock)},/* PJT_RWLOCK_TRYWRLOCK */ {DUAL_ENTRY(_pthread_rwlock_unlock)}, /* PJT_RWLOCK_UNLOCK */ {DUAL_ENTRY(_pthread_rwlock_wrlock)}, /* PJT_RWLOCK_WRLOCK */ {DUAL_ENTRY(_pthread_self)}, /* PJT_SELF */ {DUAL_ENTRY(_pthread_setcancelstate)}, /* PJT_SETCANCELSTATE */ {DUAL_ENTRY(_pthread_setcanceltype)}, /* PJT_SETCANCELTYPE */ {DUAL_ENTRY(_pthread_setspecific)}, /* PJT_SETSPECIFIC */ {DUAL_ENTRY(_pthread_sigmask)}, /* PJT_SIGMASK */ {DUAL_ENTRY(_pthread_testcancel)}, /* PJT_TESTCANCEL */ {DUAL_ENTRY(__pthread_cleanup_pop_imp)},/* PJT_CLEANUP_POP_IMP */ {DUAL_ENTRY(__pthread_cleanup_push_imp)}/* PJT_CLEANUP_PUSH_IMP */ }; static int init_once = 0; /* * For the shared version of the threads library, the above is sufficient. * But for the archive version of the library, we need a little bit more. * Namely, we must arrange for this particular module to be pulled in from * the archive library at link time. To accomplish that, we define and * initialize a variable, "_thread_autoinit_dummy_decl". This variable is * referenced (as an extern) from libc/stdlib/exit.c. This will always * create a need for this module, ensuring that it is present in the * executable. */ extern int _thread_autoinit_dummy_decl; int _thread_autoinit_dummy_decl = 0; void _thread_init_hack(void) { _libpthread_init(NULL); } /* * Threaded process initialization. * * This is only called under two conditions: * * 1) Some thread routines have detected that the library hasn't yet * been initialized (_thr_initial == NULL && curthread == NULL), or * * 2) An explicit call to reinitialize after a fork (indicated * by curthread != NULL) */ void _libpthread_init(struct pthread *curthread) { int fd, first = 0; - sigset_t sigset, oldset; /* Check if this function has already been called: */ if ((_thr_initial != NULL) && (curthread == NULL)) /* Only initialize the threaded application once. */ return; /* * Check the size of the jump table to make sure it is preset * with the correct number of entries. */ if (sizeof(jmp_table) != (sizeof(pthread_func_t) * PJT_MAX * 2)) PANIC("Thread jump table not properly initialized"); memcpy(__thr_jtable, jmp_table, sizeof(jmp_table)); /* * Check for the special case of this process running as * or in place of init as pid = 1: */ if ((_thr_pid = getpid()) == 1) { /* * Setup a new session for this process which is * assumed to be running as root. */ if (setsid() == -1) PANIC("Can't set session ID"); if (revoke(_PATH_CONSOLE) != 0) PANIC("Can't revoke console"); if ((fd = __sys_open(_PATH_CONSOLE, O_RDWR)) < 0) PANIC("Can't open console"); if (setlogin("root") == -1) PANIC("Can't set login to root"); if (_ioctl(fd, TIOCSCTTY, (char *) NULL) == -1) PANIC("Can't set controlling terminal"); } /* Initialize pthread private data. */ init_private(); /* Set the initial thread. */ if (curthread == NULL) { first = 1; /* Create and initialize the initial thread. */ curthread = _thr_alloc(NULL); if (curthread == NULL) PANIC("Can't allocate initial thread"); init_main_thread(curthread); } /* * Add the thread to the thread list queue. */ THR_LIST_ADD(curthread); _thread_active_threads = 1; /* Setup the thread specific data */ _tcb_set(curthread->tcb); if (first) { - SIGFILLSET(sigset); - SIGDELSET(sigset, SIGTRAP); - __sys_sigprocmask(SIG_SETMASK, &sigset, &oldset); - _thr_signal_init(); _thr_initial = curthread; - SIGDELSET(oldset, SIGCANCEL); - __sys_sigprocmask(SIG_SETMASK, &oldset, NULL); + _thr_signal_init(); if (_thread_event_mask & TD_CREATE) _thr_report_creation(curthread, curthread); } } /* * This function and pthread_create() do a lot of the same things. * It'd be nice to consolidate the common stuff in one place. */ static void init_main_thread(struct pthread *thread) { struct sched_param sched_param; /* Setup the thread attributes. */ thr_self(&thread->tid); thread->attr = _pthread_attr_default; /* * Set up the thread stack. * * Create a red zone below the main stack. All other stacks * are constrained to a maximum size by the parameters * passed to mmap(), but this stack is only limited by * resource limits, so this stack needs an explicitly mapped * red zone to protect the thread stack that is just beyond. */ if (mmap(_usrstack - _thr_stack_initial - _thr_guard_default, _thr_guard_default, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* * Mark the stack as an application supplied stack so that it * isn't deallocated. * * XXX - I'm not sure it would hurt anything to deallocate * the main thread stack because deallocation doesn't * actually free() it; it just puts it in the free * stack queue for later reuse. */ thread->attr.stackaddr_attr = _usrstack - _thr_stack_initial; thread->attr.stacksize_attr = _thr_stack_initial; thread->attr.guardsize_attr = _thr_guard_default; thread->attr.flags |= THR_STACK_USER; /* * Write a magic value to the thread structure * to help identify valid ones: */ thread->magic = THR_MAGIC; thread->cancel_enable = 1; thread->cancel_async = 0; thr_set_name(thread->tid, "initial thread"); /* Initialize the mutex queue: */ TAILQ_INIT(&thread->mutexq); TAILQ_INIT(&thread->pp_mutexq); thread->state = PS_RUNNING; _thr_getscheduler(thread->tid, &thread->attr.sched_policy, &sched_param); thread->attr.prio = sched_param.sched_priority; /* Others cleared to zero by thr_alloc() */ } static void init_private(void) { size_t len; int mib[2]; char *env; _thr_umutex_init(&_mutex_static_lock); _thr_umutex_init(&_cond_static_lock); _thr_umutex_init(&_rwlock_static_lock); _thr_umutex_init(&_keytable_lock); _thr_umutex_init(&_thr_atfork_lock); _thr_umutex_init(&_thr_event_lock); _thr_once_init(); _thr_spinlock_init(); _thr_list_init(); /* * Avoid reinitializing some things if they don't need to be, * e.g. after a fork(). */ if (init_once == 0) { /* Find the stack top */ mib[0] = CTL_KERN; mib[1] = KERN_USRSTACK; len = sizeof (_usrstack); if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1) PANIC("Cannot get kern.usrstack from sysctl"); len = sizeof(_thr_is_smp); sysctlbyname("kern.smp.cpus", &_thr_is_smp, &len, NULL, 0); _thr_is_smp = (_thr_is_smp > 1); _thr_page_size = getpagesize(); _thr_guard_default = _thr_page_size; _pthread_attr_default.guardsize_attr = _thr_guard_default; _pthread_attr_default.stacksize_attr = _thr_stack_default; env = getenv("LIBPTHREAD_SPINLOOPS"); if (env) _thr_spinloops = atoi(env); env = getenv("LIBPTHREAD_YIELDLOOPS"); if (env) _thr_yieldloops = atoi(env); TAILQ_INIT(&_thr_atfork_list); } init_once = 1; } diff --git a/lib/libthr/thread/thr_join.c b/lib/libthr/thread/thr_join.c index 8201abafeae9..d3c8367dc130 100644 --- a/lib/libthr/thread/thr_join.c +++ b/lib/libthr/thread/thr_join.c @@ -1,150 +1,150 @@ /* * Copyright (c) 2005, David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #include "namespace.h" #include #include #include "un-namespace.h" #include "thr_private.h" int _pthread_timedjoin_np(pthread_t pthread, void **thread_return, const struct timespec *abstime); static int join_common(pthread_t, void **, const struct timespec *); __weak_reference(_pthread_join, pthread_join); __weak_reference(_pthread_timedjoin_np, pthread_timedjoin_np); static void backout_join(void *arg) { struct pthread *curthread = _get_curthread(); struct pthread *pthread = (struct pthread *)arg; THREAD_LIST_LOCK(curthread); pthread->joiner = NULL; THREAD_LIST_UNLOCK(curthread); } int _pthread_join(pthread_t pthread, void **thread_return) { return (join_common(pthread, thread_return, NULL)); } int _pthread_timedjoin_np(pthread_t pthread, void **thread_return, const struct timespec *abstime) { if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return (EINVAL); return (join_common(pthread, thread_return, abstime)); } /* * Cancellation behavior: * if the thread is canceled, joinee is not recycled. */ static int join_common(pthread_t pthread, void **thread_return, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); struct timespec ts, ts2, *tsp; void *tmp; long tid; int ret = 0; if (pthread == NULL) return (EINVAL); if (pthread == curthread) return (EDEADLK); THREAD_LIST_LOCK(curthread); if ((ret = _thr_find_thread(curthread, pthread, 1)) != 0) { ret = ESRCH; } else if ((pthread->tlflags & TLFLAGS_DETACHED) != 0) { ret = EINVAL; } else if (pthread->joiner != NULL) { /* Multiple joiners are not supported. */ ret = ENOTSUP; } if (ret) { THREAD_LIST_UNLOCK(curthread); return (ret); } /* Set the running thread to be the joiner: */ pthread->joiner = curthread; THREAD_LIST_UNLOCK(curthread); THR_CLEANUP_PUSH(curthread, backout_join, pthread); - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); tid = pthread->tid; while (pthread->tid != TID_TERMINATED) { _thr_testcancel(curthread); if (abstime != NULL) { clock_gettime(CLOCK_REALTIME, &ts); TIMESPEC_SUB(&ts2, abstime, &ts); if (ts2.tv_sec < 0) { ret = ETIMEDOUT; break; } tsp = &ts2; } else tsp = NULL; ret = _thr_umtx_wait(&pthread->tid, tid, tsp); if (ret == ETIMEDOUT) break; } - _thr_cancel_leave_defer(curthread, 0); + _thr_cancel_leave(curthread, 0); THR_CLEANUP_POP(curthread, 0); if (ret == ETIMEDOUT) { THREAD_LIST_LOCK(curthread); pthread->joiner = NULL; THREAD_LIST_UNLOCK(curthread); } else { ret = 0; tmp = pthread->ret; THREAD_LIST_LOCK(curthread); pthread->tlflags |= TLFLAGS_DETACHED; pthread->joiner = NULL; THR_GCLIST_ADD(pthread); THREAD_LIST_UNLOCK(curthread); if (thread_return != NULL) *thread_return = tmp; } return (ret); } diff --git a/lib/libthr/thread/thr_kern.c b/lib/libthr/thread/thr_kern.c index 649a973aabdc..3ad33ad995a2 100644 --- a/lib/libthr/thread/thr_kern.c +++ b/lib/libthr/thread/thr_kern.c @@ -1,164 +1,132 @@ /* * Copyright (c) 2005 David Xu * Copyright (C) 2003 Daniel M. Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "thr_private.h" /*#define DEBUG_THREAD_KERN */ #ifdef DEBUG_THREAD_KERN #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif /* * This is called when the first thread (other than the initial * thread) is created. */ int _thr_setthreaded(int threaded) { if (((threaded == 0) ^ (__isthreaded == 0)) == 0) return (0); __isthreaded = threaded; if (threaded != 0) { _thr_rtld_init(); } else { _thr_rtld_fini(); } return (0); } -void -_thr_signal_block(struct pthread *curthread) -{ - sigset_t set; - - if (curthread->sigblock > 0) { - curthread->sigblock++; - return; - } - SIGFILLSET(set); - SIGDELSET(set, SIGBUS); - SIGDELSET(set, SIGILL); - SIGDELSET(set, SIGFPE); - SIGDELSET(set, SIGSEGV); - SIGDELSET(set, SIGTRAP); - __sys_sigprocmask(SIG_BLOCK, &set, &curthread->sigmask); - curthread->sigblock++; -} - -void -_thr_signal_unblock(struct pthread *curthread) -{ - if (--curthread->sigblock == 0) - __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL); -} - -int -_thr_send_sig(struct pthread *thread, int sig) -{ - return thr_kill(thread->tid, sig); -} - void _thr_assert_lock_level() { PANIC("locklevel <= 0"); } int _rtp_to_schedparam(const struct rtprio *rtp, int *policy, struct sched_param *param) { switch(rtp->type) { case RTP_PRIO_REALTIME: *policy = SCHED_RR; param->sched_priority = RTP_PRIO_MAX - rtp->prio; break; case RTP_PRIO_FIFO: *policy = SCHED_FIFO; param->sched_priority = RTP_PRIO_MAX - rtp->prio; break; default: *policy = SCHED_OTHER; param->sched_priority = 0; break; } return (0); } int _schedparam_to_rtp(int policy, const struct sched_param *param, struct rtprio *rtp) { switch(policy) { case SCHED_RR: rtp->type = RTP_PRIO_REALTIME; rtp->prio = RTP_PRIO_MAX - param->sched_priority; break; case SCHED_FIFO: rtp->type = RTP_PRIO_FIFO; rtp->prio = RTP_PRIO_MAX - param->sched_priority; break; case SCHED_OTHER: default: rtp->type = RTP_PRIO_NORMAL; rtp->prio = 0; break; } return (0); } int _thr_getscheduler(lwpid_t lwpid, int *policy, struct sched_param *param) { struct rtprio rtp; int ret; ret = rtprio_thread(RTP_LOOKUP, lwpid, &rtp); if (ret == -1) return (ret); _rtp_to_schedparam(&rtp, policy, param); return (0); } int _thr_setscheduler(lwpid_t lwpid, int policy, const struct sched_param *param) { struct rtprio rtp; _schedparam_to_rtp(policy, param, &rtp); return (rtprio_thread(RTP_SET, lwpid, &rtp)); } diff --git a/lib/libthr/thread/thr_private.h b/lib/libthr/thread/thr_private.h index f261810e7e99..492d58d20638 100644 --- a/lib/libthr/thread/thr_private.h +++ b/lib/libthr/thread/thr_private.h @@ -1,746 +1,759 @@ /* * Copyright (C) 2005 Daniel M. Eischen * Copyright (c) 2005 David Xu * Copyright (c) 1995-1998 John Birrell . * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _THR_PRIVATE_H #define _THR_PRIVATE_H /* * Include files. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SYM_FB10(sym) __CONCAT(sym, _fb10) #define SYM_FBP10(sym) __CONCAT(sym, _fbp10) #define WEAK_REF(sym, alias) __weak_reference(sym, alias) #define SYM_COMPAT(sym, impl, ver) __sym_compat(sym, impl, ver) #define SYM_DEFAULT(sym, impl, ver) __sym_default(sym, impl, ver) #define FB10_COMPAT(func, sym) \ WEAK_REF(func, SYM_FB10(sym)); \ SYM_COMPAT(sym, SYM_FB10(sym), FBSD_1.0) #define FB10_COMPAT_PRIVATE(func, sym) \ WEAK_REF(func, SYM_FBP10(sym)); \ SYM_DEFAULT(sym, SYM_FBP10(sym), FBSDprivate_1.0) #include "pthread_md.h" #include "thr_umtx.h" #include "thread_db.h" typedef TAILQ_HEAD(pthreadlist, pthread) pthreadlist; typedef TAILQ_HEAD(atfork_head, pthread_atfork) atfork_head; TAILQ_HEAD(mutex_queue, pthread_mutex); /* Signal to do cancellation */ #define SIGCANCEL 32 /* * Kernel fatal error handler macro. */ #define PANIC(string) _thread_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args) #define stderr_debug(args...) _thread_printf(STDERR_FILENO, ##args) #ifdef _PTHREADS_INVARIANTS #define THR_ASSERT(cond, msg) do { \ if (__predict_false(!(cond))) \ PANIC(msg); \ } while (0) #else #define THR_ASSERT(cond, msg) #endif #ifdef PIC # define STATIC_LIB_REQUIRE(name) #else # define STATIC_LIB_REQUIRE(name) __asm (".globl " #name) #endif #define TIMESPEC_ADD(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \ if ((dst)->tv_nsec >= 1000000000) { \ (dst)->tv_sec++; \ (dst)->tv_nsec -= 1000000000; \ } \ } while (0) #define TIMESPEC_SUB(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ if ((dst)->tv_nsec < 0) { \ (dst)->tv_sec--; \ (dst)->tv_nsec += 1000000000; \ } \ } while (0) struct pthread_mutex { /* * Lock for accesses to this structure. */ struct umutex m_lock; enum pthread_mutextype m_type; struct pthread *m_owner; int m_count; int m_refcount; int m_spinloops; int m_yieldloops; /* * Link for all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; }; struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } struct pthread_cond { struct umutex c_lock; struct ucond c_kerncv; int c_pshared; int c_clockid; }; struct pthread_cond_attr { int c_pshared; int c_clockid; }; struct pthread_barrier { struct umutex b_lock; struct ucond b_cv; volatile int64_t b_cycle; volatile int b_count; volatile int b_waiters; }; struct pthread_barrierattr { int pshared; }; struct pthread_spinlock { struct umutex s_lock; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *prev; void (*routine)(void *); void *routine_arg; int onheap; }; #define THR_CLEANUP_PUSH(td, func, arg) { \ struct pthread_cleanup __cup; \ \ __cup.routine = func; \ __cup.routine_arg = arg; \ __cup.onheap = 0; \ __cup.prev = (td)->cleanup; \ (td)->cleanup = &__cup; #define THR_CLEANUP_POP(td, exec) \ (td)->cleanup = __cup.prev; \ if ((exec) != 0) \ __cup.routine(__cup.routine_arg); \ } struct pthread_atfork { TAILQ_ENTRY(pthread_atfork) qe; void (*prepare)(void); void (*parent)(void); void (*child)(void); }; struct pthread_attr { int sched_policy; int sched_inherit; int prio; int suspend; #define THR_STACK_USER 0x100 /* 0xFF reserved for */ int flags; void *stackaddr_attr; size_t stacksize_attr; size_t guardsize_attr; cpuset_t *cpuset; size_t cpusetsize; }; /* * Thread creation state attributes. */ #define THR_CREATE_RUNNING 0 #define THR_CREATE_SUSPENDED 1 /* * Miscellaneous definitions. */ #define THR_STACK_DEFAULT (sizeof(void *) / 4 * 1024 * 1024) /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define THR_STACK_INITIAL (THR_STACK_DEFAULT * 2) /* * Define priorities returned by kernel. */ #define THR_MIN_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_min) #define THR_MAX_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_max) #define THR_DEF_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_default) #define THR_MIN_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_min) #define THR_MAX_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_max) #define THR_DEF_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_default) /* XXX The SCHED_FIFO should have same priority range as SCHED_RR */ #define THR_MIN_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO_1].pri_min) #define THR_MAX_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO-1].pri_max) #define THR_DEF_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO-1].pri_default) struct pthread_prio { int pri_min; int pri_max; int pri_default; }; struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { struct urwlock lock; struct pthread *owner; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_DEAD }; struct pthread_specific_elem { const void *data; int seqno; }; struct pthread_key { volatile int allocated; int seqno; void (*destructor)(void *); }; /* * lwpid_t is 32bit but kernel thr API exports tid as long type * in very earily date. */ #define TID(thread) ((uint32_t) ((thread)->tid)) /* * Thread structure. */ struct pthread { /* Kernel thread id. */ long tid; #define TID_TERMINATED 1 /* * Lock for accesses to this thread structure. */ struct umutex lock; /* Internal condition variable cycle number. */ uint32_t cycle; /* How many low level locks the thread held. */ int locklevel; /* * Set to non-zero when this thread has entered a critical * region. We allow for recursive entries into critical regions. */ int critical_count; /* Signal blocked counter. */ int sigblock; /* Queue entry for list of all threads. */ TAILQ_ENTRY(pthread) tle; /* link for all threads in process */ /* Queue entry for GC lists. */ TAILQ_ENTRY(pthread) gcle; /* Hash queue entry. */ LIST_ENTRY(pthread) hle; /* Threads reference count. */ int refcount; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; struct pthread_attr attr; #define SHOULD_CANCEL(thr) \ ((thr)->cancel_pending && \ ((thr)->cancel_point || (thr)->cancel_async) && \ (thr)->cancel_enable && (thr)->cancelling == 0) /* Cancellation is enabled */ int cancel_enable; /* Cancellation request is pending */ int cancel_pending; /* Thread is at cancellation point */ int cancel_point; /* Cancellation should be synchoronized */ int cancel_defer; /* Asynchronouse cancellation is enabled */ int cancel_async; /* Cancellation is in progress */ int cancelling; /* Thread temporary signal mask. */ sigset_t sigmask; - /* Thread is in SIGCANCEL handler. */ - int in_sigcancel_handler; - - /* New thread should unblock SIGCANCEL. */ + /* Thread should unblock SIGCANCEL. */ int unblock_sigcancel; + /* In sigsuspend state */ + int in_sigsuspend; + + /* deferred signal info */ + siginfo_t deferred_siginfo; + + /* signal mask to restore. */ + sigset_t deferred_sigmask; + + /* the sigaction should be used for deferred signal. */ + struct sigaction deferred_sigact; + /* Force new thread to exit. */ int force_exit; /* Thread state: */ enum pthread_state state; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* * The joiner is the thread that is joining to this thread. The * join status keeps track of a join operation to another thread. */ struct pthread *joiner; /* Miscellaneous flags; only set with scheduling lock held. */ int flags; #define THR_FLAGS_PRIVATE 0x0001 #define THR_FLAGS_NEED_SUSPEND 0x0002 /* thread should be suspended */ #define THR_FLAGS_SUSPENDED 0x0004 /* thread is suspended */ /* Thread list flags; only set with thread list lock held. */ int tlflags; #define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */ #define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */ #define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */ #define TLFLAGS_DETACHED 0x0008 /* thread is detached */ /* Queue of currently owned NORMAL or PRIO_INHERIT type mutexes. */ struct mutex_queue mutexq; /* Queue of all owned PRIO_PROTECT mutexes. */ struct mutex_queue pp_mutexq; void *ret; struct pthread_specific_elem *specific; int specific_data_count; /* Number rwlocks rdlocks held. */ int rdlock_count; /* * Current locks bitmap for rtld. */ int rtld_bits; /* Thread control block */ struct tcb *tcb; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define THR_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; /* Enable event reporting */ int report_events; /* Event mask */ int event_mask; /* Event */ td_event_msg_t event_buf; }; #define THR_IN_CRITICAL(thrd) \ (((thrd)->locklevel > 0) || \ ((thrd)->critical_count > 0)) #define THR_CRITICAL_ENTER(thrd) \ (thrd)->critical_count++ #define THR_CRITICAL_LEAVE(thrd) \ do { \ (thrd)->critical_count--; \ _thr_ast(thrd); \ } while (0) #define THR_UMUTEX_TRYLOCK(thrd, lck) \ _thr_umutex_trylock((lck), TID(thrd)) #define THR_UMUTEX_LOCK(thrd, lck) \ _thr_umutex_lock((lck), TID(thrd)) #define THR_UMUTEX_TIMEDLOCK(thrd, lck, timo) \ _thr_umutex_timedlock((lck), TID(thrd), (timo)) #define THR_UMUTEX_UNLOCK(thrd, lck) \ _thr_umutex_unlock((lck), TID(thrd)) #define THR_LOCK_ACQUIRE(thrd, lck) \ do { \ (thrd)->locklevel++; \ _thr_umutex_lock(lck, TID(thrd)); \ } while (0) #ifdef _PTHREADS_INVARIANTS #define THR_ASSERT_LOCKLEVEL(thrd) \ do { \ if (__predict_false((thrd)->locklevel <= 0)) \ _thr_assert_lock_level(); \ } while (0) #else #define THR_ASSERT_LOCKLEVEL(thrd) #endif #define THR_LOCK_RELEASE(thrd, lck) \ do { \ THR_ASSERT_LOCKLEVEL(thrd); \ _thr_umutex_unlock((lck), TID(thrd)); \ (thrd)->locklevel--; \ _thr_ast(thrd); \ } while (0) #define THR_LOCK(curthrd) THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock) #define THR_UNLOCK(curthrd) THR_LOCK_RELEASE(curthrd, &(curthrd)->lock) #define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock) #define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock) #define THREAD_LIST_LOCK(curthrd) \ do { \ THR_LOCK_ACQUIRE((curthrd), &_thr_list_lock); \ } while (0) #define THREAD_LIST_UNLOCK(curthrd) \ do { \ THR_LOCK_RELEASE((curthrd), &_thr_list_lock); \ } while (0) /* * Macros to insert/remove threads to the all thread list and * the gc list. */ #define THR_LIST_ADD(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \ TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \ _thr_hash_add(thrd); \ (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \ } \ } while (0) #define THR_LIST_REMOVE(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \ TAILQ_REMOVE(&_thread_list, thrd, tle); \ _thr_hash_remove(thrd); \ (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \ } \ } while (0) #define THR_GCLIST_ADD(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \ TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\ (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \ _gc_count++; \ } \ } while (0) #define THR_GCLIST_REMOVE(thrd) do { \ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \ TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \ (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \ _gc_count--; \ } \ } while (0) #define GC_NEEDED() (_gc_count >= 5) #define SHOULD_REPORT_EVENT(curthr, e) \ (curthr->report_events && \ (((curthr)->event_mask | _thread_event_mask ) & e) != 0) extern int __isthreaded; /* * Global variables for the pthread kernel. */ extern char *_usrstack __hidden; extern struct pthread *_thr_initial __hidden; /* For debugger */ extern int _libthr_debug; extern int _thread_event_mask; extern struct pthread *_thread_last_event; /* List of all threads: */ extern pthreadlist _thread_list; /* List of threads needing GC: */ extern pthreadlist _thread_gc_list __hidden; extern int _thread_active_threads; extern atfork_head _thr_atfork_list __hidden; extern struct umutex _thr_atfork_lock __hidden; /* Default thread attributes: */ extern struct pthread_attr _pthread_attr_default __hidden; /* Default mutex attributes: */ extern struct pthread_mutex_attr _pthread_mutexattr_default __hidden; /* Default condition variable attributes: */ extern struct pthread_cond_attr _pthread_condattr_default __hidden; extern struct pthread_prio _thr_priorities[] __hidden; extern pid_t _thr_pid __hidden; extern int _thr_is_smp __hidden; extern size_t _thr_guard_default __hidden; extern size_t _thr_stack_default __hidden; extern size_t _thr_stack_initial __hidden; extern int _thr_page_size __hidden; extern int _thr_spinloops __hidden; extern int _thr_yieldloops __hidden; /* Garbage thread count. */ extern int _gc_count __hidden; extern struct umutex _mutex_static_lock __hidden; extern struct umutex _cond_static_lock __hidden; extern struct umutex _rwlock_static_lock __hidden; extern struct umutex _keytable_lock __hidden; extern struct umutex _thr_list_lock __hidden; extern struct umutex _thr_event_lock __hidden; /* * Function prototype definitions. */ __BEGIN_DECLS int _thr_setthreaded(int) __hidden; int _mutex_cv_lock(pthread_mutex_t *, int count) __hidden; int _mutex_cv_unlock(pthread_mutex_t *, int *count) __hidden; int _mutex_reinit(pthread_mutex_t *) __hidden; void _mutex_fork(struct pthread *curthread) __hidden; void _libpthread_init(struct pthread *) __hidden; struct pthread *_thr_alloc(struct pthread *) __hidden; void _thread_exit(const char *, int, const char *) __hidden __dead2; int _thr_ref_add(struct pthread *, struct pthread *, int) __hidden; void _thr_ref_delete(struct pthread *, struct pthread *) __hidden; void _thr_ref_delete_unlocked(struct pthread *, struct pthread *) __hidden; int _thr_find_thread(struct pthread *, struct pthread *, int) __hidden; void _thr_rtld_init(void) __hidden; void _thr_rtld_fini(void) __hidden; +void _thr_rtld_postfork_child(void) __hidden; int _thr_stack_alloc(struct pthread_attr *) __hidden; void _thr_stack_free(struct pthread_attr *) __hidden; void _thr_free(struct pthread *, struct pthread *) __hidden; void _thr_gc(struct pthread *) __hidden; void _thread_cleanupspecific(void) __hidden; void _thread_printf(int, const char *, ...) __hidden; void _thr_spinlock_init(void) __hidden; void _thr_cancel_enter(struct pthread *) __hidden; -void _thr_cancel_leave(struct pthread *) __hidden; -void _thr_cancel_leave2(struct pthread *, int) __hidden; -void _thr_cancel_enter_defer(struct pthread *, int) __hidden; -void _thr_cancel_leave_defer(struct pthread *, int) __hidden; +void _thr_cancel_enter2(struct pthread *, int) __hidden; +void _thr_cancel_leave(struct pthread *, int) __hidden; void _thr_testcancel(struct pthread *) __hidden; void _thr_signal_block(struct pthread *) __hidden; void _thr_signal_unblock(struct pthread *) __hidden; void _thr_signal_init(void) __hidden; void _thr_signal_deinit(void) __hidden; int _thr_send_sig(struct pthread *, int sig) __hidden; void _thr_list_init(void) __hidden; void _thr_hash_add(struct pthread *) __hidden; void _thr_hash_remove(struct pthread *) __hidden; struct pthread *_thr_hash_find(struct pthread *) __hidden; void _thr_link(struct pthread *, struct pthread *) __hidden; void _thr_unlink(struct pthread *, struct pthread *) __hidden; -void _thr_suspend_check(struct pthread *) __hidden; void _thr_assert_lock_level(void) __hidden __dead2; void _thr_ast(struct pthread *) __hidden; void _thr_once_init(void) __hidden; void _thr_report_creation(struct pthread *curthread, struct pthread *newthread) __hidden; void _thr_report_death(struct pthread *curthread) __hidden; int _thr_getscheduler(lwpid_t, int *, struct sched_param *) __hidden; int _thr_setscheduler(lwpid_t, int, const struct sched_param *) __hidden; +void _thr_signal_prefork(void) __hidden; +void _thr_signal_postfork(void) __hidden; +void _thr_signal_postfork_child(void) __hidden; int _rtp_to_schedparam(const struct rtprio *rtp, int *policy, struct sched_param *param) __hidden; int _schedparam_to_rtp(int policy, const struct sched_param *param, struct rtprio *rtp) __hidden; void _thread_bp_create(void); void _thread_bp_death(void); int _sched_yield(void); void _pthread_cleanup_push(void (*)(void *), void *); void _pthread_cleanup_pop(int); +void _pthread_exit_mask(void *status, sigset_t *mask) __dead2 __hidden; + /* #include */ #ifdef _SYS_FCNTL_H_ int __sys_fcntl(int, int, ...); int __sys_open(const char *, int, ...); int __sys_openat(int, const char *, int, ...); #endif /* #include */ #ifdef _SIGNAL_H_ int __sys_kill(pid_t, int); int __sys_sigaction(int, const struct sigaction *, struct sigaction *); int __sys_sigpending(sigset_t *); int __sys_sigprocmask(int, const sigset_t *, sigset_t *); int __sys_sigsuspend(const sigset_t *); -int __sys_sigreturn(ucontext_t *); +int __sys_sigreturn(const ucontext_t *); int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); int __sys_sigwait(const sigset_t *, int *); int __sys_sigtimedwait(const sigset_t *, siginfo_t *, const struct timespec *); int __sys_sigwaitinfo(const sigset_t *set, siginfo_t *info); #endif /* #include */ #ifdef _TIME_H_ int __sys_nanosleep(const struct timespec *, struct timespec *); #endif /* #include */ #ifdef _SYS_UCONTEXT_H_ int __sys_setcontext(const ucontext_t *ucp); int __sys_swapcontext(ucontext_t *oucp, const ucontext_t *ucp); #endif /* #include */ #ifdef _UNISTD_H_ int __sys_close(int); int __sys_fork(void); pid_t __sys_getpid(void); ssize_t __sys_read(int, void *, size_t); ssize_t __sys_write(int, const void *, size_t); void __sys_exit(int); #endif int _umtx_op_err(void *, int op, u_long, void *, void *) __hidden; static inline int _thr_isthreaded(void) { return (__isthreaded != 0); } static inline int _thr_is_inited(void) { return (_thr_initial != NULL); } static inline void _thr_check_init(void) { if (_thr_initial == NULL) _libpthread_init(NULL); } struct dl_phdr_info; void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info); void _thr_tsd_unload(struct dl_phdr_info *phdr_info) __hidden; +void _thr_sigact_unload(struct dl_phdr_info *phdr_info) __hidden; __END_DECLS #endif /* !_THR_PRIVATE_H */ diff --git a/lib/libthr/thread/thr_rtld.c b/lib/libthr/thread/thr_rtld.c index d9041adcfb90..e6af7029c76d 100644 --- a/lib/libthr/thread/thr_rtld.c +++ b/lib/libthr/thread/thr_rtld.c @@ -1,220 +1,221 @@ /* * Copyright (c) 2006, David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ /* * A lockless rwlock for rtld. */ #include #include +#include #include "rtld_lock.h" #include "thr_private.h" #undef errno extern int errno; static int _thr_rtld_clr_flag(int); static void *_thr_rtld_lock_create(void); static void _thr_rtld_lock_destroy(void *); static void _thr_rtld_lock_release(void *); static void _thr_rtld_rlock_acquire(void *); static int _thr_rtld_set_flag(int); static void _thr_rtld_wlock_acquire(void *); struct rtld_lock { struct urwlock lock; char _pad[CACHE_LINE_SIZE - sizeof(struct urwlock)]; }; static struct rtld_lock lock_place[MAX_RTLD_LOCKS] __aligned(CACHE_LINE_SIZE); static int busy_places; static void * _thr_rtld_lock_create(void) { int locki; struct rtld_lock *l; static const char fail[] = "_thr_rtld_lock_create failed\n"; for (locki = 0; locki < MAX_RTLD_LOCKS; locki++) { if ((busy_places & (1 << locki)) == 0) break; } if (locki == MAX_RTLD_LOCKS) { write(2, fail, sizeof(fail) - 1); return (NULL); } busy_places |= (1 << locki); l = &lock_place[locki]; l->lock.rw_flags = URWLOCK_PREFER_READER; return (l); } static void _thr_rtld_lock_destroy(void *lock) { int locki; size_t i; locki = (struct rtld_lock *)lock - &lock_place[0]; for (i = 0; i < sizeof(struct rtld_lock); ++i) ((char *)lock)[i] = 0; busy_places &= ~(1 << locki); } #define SAVE_ERRNO() { \ if (curthread != _thr_initial) \ errsave = curthread->error; \ else \ errsave = errno; \ } #define RESTORE_ERRNO() { \ if (curthread != _thr_initial) \ curthread->error = errsave; \ else \ errno = errsave; \ } static void _thr_rtld_rlock_acquire(void *lock) { struct pthread *curthread; struct rtld_lock *l; int errsave; curthread = _get_curthread(); SAVE_ERRNO(); l = (struct rtld_lock *)lock; THR_CRITICAL_ENTER(curthread); while (_thr_rwlock_rdlock(&l->lock, 0, NULL) != 0) ; curthread->rdlock_count++; RESTORE_ERRNO(); } static void _thr_rtld_wlock_acquire(void *lock) { struct pthread *curthread; struct rtld_lock *l; int errsave; curthread = _get_curthread(); SAVE_ERRNO(); l = (struct rtld_lock *)lock; - _thr_signal_block(curthread); + THR_CRITICAL_ENTER(curthread); while (_thr_rwlock_wrlock(&l->lock, NULL) != 0) ; RESTORE_ERRNO(); } static void _thr_rtld_lock_release(void *lock) { struct pthread *curthread; struct rtld_lock *l; int32_t state; int errsave; curthread = _get_curthread(); SAVE_ERRNO(); l = (struct rtld_lock *)lock; state = l->lock.rw_state; if (_thr_rwlock_unlock(&l->lock) == 0) { - if ((state & URWLOCK_WRITE_OWNER) == 0) { + if ((state & URWLOCK_WRITE_OWNER) == 0) curthread->rdlock_count--; - THR_CRITICAL_LEAVE(curthread); - } else { - _thr_signal_unblock(curthread); - } + THR_CRITICAL_LEAVE(curthread); } RESTORE_ERRNO(); } static int _thr_rtld_set_flag(int mask __unused) { /* * The caller's code in rtld-elf is broken, it is not signal safe, * just return zero to fool it. */ return (0); } static int _thr_rtld_clr_flag(int mask __unused) { return (0); } void _thr_rtld_init(void) { struct RtldLockInfo li; struct pthread *curthread; long dummy = -1; curthread = _get_curthread(); /* force to resolve _umtx_op PLT */ _umtx_op_err((struct umtx *)&dummy, UMTX_OP_WAKE, 1, 0, 0); /* force to resolve errno() PLT */ __error(); + /* force to resolve memcpy PLT */ + memcpy(&dummy, &dummy, sizeof(dummy)); + li.lock_create = _thr_rtld_lock_create; li.lock_destroy = _thr_rtld_lock_destroy; li.rlock_acquire = _thr_rtld_rlock_acquire; li.wlock_acquire = _thr_rtld_wlock_acquire; li.lock_release = _thr_rtld_lock_release; li.thread_set_flag = _thr_rtld_set_flag; li.thread_clr_flag = _thr_rtld_clr_flag; li.at_fork = NULL; /* mask signals, also force to resolve __sys_sigprocmask PLT */ _thr_signal_block(curthread); _rtld_thread_init(&li); _thr_signal_unblock(curthread); } void _thr_rtld_fini(void) { struct pthread *curthread; curthread = _get_curthread(); _thr_signal_block(curthread); _rtld_thread_init(NULL); _thr_signal_unblock(curthread); } diff --git a/lib/libthr/thread/thr_sig.c b/lib/libthr/thread/thr_sig.c index 312c15f4a3bf..382ff4fbbab7 100644 --- a/lib/libthr/thread/thr_sig.c +++ b/lib/libthr/thread/thr_sig.c @@ -1,422 +1,742 @@ /* * Copyright (c) 2005, David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include "namespace.h" #include #include #include #include #include -#include -#include #include #include #include "un-namespace.h" +#include "libc_private.h" #include "thr_private.h" /* #define DEBUG_SIGNAL */ #ifdef DEBUG_SIGNAL #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif -extern int __pause(void); +struct usigaction { + struct sigaction sigact; + struct urwlock lock; +}; + +static struct usigaction _thr_sigact[_SIG_MAXSIG]; + +static void thr_sighandler(int, siginfo_t *, void *); +static void handle_signal(struct sigaction *, int, siginfo_t *, ucontext_t *); +static void check_deferred_signal(struct pthread *); +static void check_suspend(struct pthread *); +static void check_cancel(struct pthread *curthread, ucontext_t *ucp); + int ___pause(void); int _raise(int); int __sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec * timeout); int _sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec * timeout); int __sigwaitinfo(const sigset_t *set, siginfo_t *info); int _sigwaitinfo(const sigset_t *set, siginfo_t *info); int __sigwait(const sigset_t *set, int *sig); int _sigwait(const sigset_t *set, int *sig); int __sigsuspend(const sigset_t *sigmask); +int _sigaction(int, const struct sigaction *, struct sigaction *); int _setcontext(const ucontext_t *); int _swapcontext(ucontext_t *, const ucontext_t *); -static void +static const sigset_t _thr_deferset={{ + 0xffffffff & ~(_SIG_BIT(SIGBUS)|_SIG_BIT(SIGILL)|_SIG_BIT(SIGFPE)| + _SIG_BIT(SIGSEGV)|_SIG_BIT(SIGTRAP)|_SIG_BIT(SIGSYS)), + 0xffffffff, + 0xffffffff, + 0xffffffff}}; + +static const sigset_t _thr_maskset={{ + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff}}; + +void +_thr_signal_block(struct pthread *curthread) +{ + + if (curthread->sigblock > 0) { + curthread->sigblock++; + return; + } + __sys_sigprocmask(SIG_BLOCK, &_thr_maskset, &curthread->sigmask); + curthread->sigblock++; +} + +void +_thr_signal_unblock(struct pthread *curthread) +{ + if (--curthread->sigblock == 0) + __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL); +} + +int +_thr_send_sig(struct pthread *thread, int sig) +{ + return thr_kill(thread->tid, sig); +} + +static inline void remove_thr_signals(sigset_t *set) { if (SIGISMEMBER(*set, SIGCANCEL)) SIGDELSET(*set, SIGCANCEL); } static const sigset_t * thr_remove_thr_signals(const sigset_t *set, sigset_t *newset) { - const sigset_t *pset; - - if (SIGISMEMBER(*set, SIGCANCEL)) { - *newset = *set; - SIGDELSET(*newset, SIGCANCEL); - pset = newset; - } else - pset = set; - return (pset); + *newset = *set; + remove_thr_signals(newset); + return (newset); } static void sigcancel_handler(int sig __unused, - siginfo_t *info __unused, ucontext_t *ucp __unused) + siginfo_t *info __unused, ucontext_t *ucp) { struct pthread *curthread = _get_curthread(); + int err; - curthread->in_sigcancel_handler++; - _thr_ast(curthread); - curthread->in_sigcancel_handler--; + if (THR_IN_CRITICAL(curthread)) + return; + err = errno; + check_suspend(curthread); + check_cancel(curthread, ucp); + errno = err; +} + +typedef void (*ohandler)(int sig, int code, + struct sigcontext *scp, char *addr, __sighandler_t *catcher); + +/* + * The signal handler wrapper is entered with all signal masked. + */ +static void +thr_sighandler(int sig, siginfo_t *info, void *_ucp) +{ + struct pthread *curthread = _get_curthread(); + ucontext_t *ucp = _ucp; + struct sigaction act; + int err; + + err = errno; + _thr_rwl_rdlock(&_thr_sigact[sig-1].lock); + act = _thr_sigact[sig-1].sigact; + _thr_rwl_unlock(&_thr_sigact[sig-1].lock); + errno = err; + + /* + * if a thread is in critical region, for example it holds low level locks, + * try to defer the signal processing, however if the signal is synchronous + * signal, it means a bad thing has happened, this is a programming error, + * resuming fault point can not help anything (normally causes deadloop), + * so here we let user code handle it immediately. + */ + if (THR_IN_CRITICAL(curthread) && SIGISMEMBER(_thr_deferset, sig)) { + memcpy(&curthread->deferred_sigact, &act, sizeof(struct sigaction)); + memcpy(&curthread->deferred_siginfo, info, sizeof(siginfo_t)); + curthread->deferred_sigmask = ucp->uc_sigmask; + /* mask all signals, we will restore it later. */ + ucp->uc_sigmask = _thr_deferset; + return; + } + + handle_signal(&act, sig, info, ucp); +} + +static void +handle_signal(struct sigaction *actp, int sig, siginfo_t *info, ucontext_t *ucp) +{ + struct pthread *curthread = _get_curthread(); + ucontext_t uc2; + __siginfohandler_t *sigfunc; + int cancel_defer; + int cancel_point; + int cancel_async; + int cancel_enable; + int in_sigsuspend; + int err; + + /* add previous level mask */ + SIGSETOR(actp->sa_mask, ucp->uc_sigmask); + + /* add this signal's mask */ + if (!(actp->sa_flags & SA_NODEFER)) + SIGADDSET(actp->sa_mask, sig); + + in_sigsuspend = curthread->in_sigsuspend; + curthread->in_sigsuspend = 0; + + /* + * if thread is in deferred cancellation mode, disable cancellation + * in signal handler. + * if user signal handler calls a cancellation point function, e.g, + * it calls write() to write data to file, because write() is a + * cancellation point, the thread is immediately cancelled if + * cancellation is pending, to avoid this problem while thread is in + * deferring mode, cancellation is temporarily disabled. + */ + cancel_defer = curthread->cancel_defer; + cancel_point = curthread->cancel_point; + cancel_async = curthread->cancel_async; + cancel_enable = curthread->cancel_enable; + curthread->cancel_point = 0; + curthread->cancel_defer = 0; + if (!cancel_async) + curthread->cancel_enable = 0; + + /* restore correct mask before calling user handler */ + __sys_sigprocmask(SIG_SETMASK, &actp->sa_mask, NULL); + + sigfunc = actp->sa_sigaction; + + /* + * We have already reset cancellation point flags, so if user's code + * longjmp()s out of its signal handler, wish its jmpbuf was set + * outside of a cancellation point, in most cases, this would be + * true. however, ther is no way to save cancel_enable in jmpbuf, + * so after setjmps() returns once more, the user code may need to + * re-set cancel_enable flag by calling pthread_setcancelstate(). + */ + if ((actp->sa_flags & SA_SIGINFO) != 0) + (*(sigfunc))(sig, info, ucp); + else { + ((ohandler)(*sigfunc))( + sig, info->si_code, (struct sigcontext *)ucp, + info->si_addr, (__sighandler_t *)sigfunc); + } + err = errno; + + curthread->in_sigsuspend = in_sigsuspend; + curthread->cancel_defer = cancel_defer; + curthread->cancel_point = cancel_point; + curthread->cancel_enable = cancel_enable; + + memcpy(&uc2, ucp, sizeof(uc2)); + SIGDELSET(uc2.uc_sigmask, SIGCANCEL); + + /* reschedule cancellation */ + check_cancel(curthread, &uc2); + errno = err; + __sys_sigreturn(&uc2); } void _thr_ast(struct pthread *curthread) { - if (THR_IN_CRITICAL(curthread)) + if (!THR_IN_CRITICAL(curthread)) { + check_deferred_signal(curthread); + check_suspend(curthread); + check_cancel(curthread, NULL); + } +} + +/* reschedule cancellation */ +static void +check_cancel(struct pthread *curthread, ucontext_t *ucp) +{ + + if (__predict_true(!curthread->cancel_pending || !curthread->cancel_enable || + curthread->cancelling)) return; - if (curthread->cancel_pending && curthread->cancel_enable - && !curthread->cancelling) { - if (curthread->cancel_async) { - /* - * asynchronous cancellation mode, act upon - * immediately. - */ - _pthread_exit(PTHREAD_CANCELED); - } else { - /* - * Otherwise, we are in defer mode, and we are at - * cancel point, tell kernel to not block the current - * thread on next cancelable system call. - * - * There are two cases we should call thr_wake() to - * turn on TDP_WAKEUP in kernel: - * 1) we are going to call a cancelable system call, - * non-zero cancel_point means we are already in - * cancelable state, next system call is cancelable. - * 2) because _thr_ast() may be called by - * THR_CRITICAL_LEAVE() which is used by rtld rwlock - * and any libthr internal locks, when rtld rwlock - * is used, it is mostly caused my an unresolved PLT. - * those routines may clear the TDP_WAKEUP flag by - * invoking some system calls, in those cases, we - * also should reenable the flag. - */ - if (curthread->cancel_point) { - if (curthread->cancel_defer) - thr_wake(curthread->tid); - else - _pthread_exit(PTHREAD_CANCELED); - } + if (curthread->cancel_async) { + /* + * asynchronous cancellation mode, act upon + * immediately. + */ + _pthread_exit_mask(PTHREAD_CANCELED, + ucp? &ucp->uc_sigmask : NULL); + } else { + /* + * Otherwise, we are in defer mode, and we are at + * cancel point, tell kernel to not block the current + * thread on next cancelable system call. + * + * There are three cases we should call thr_wake() to + * turn on TDP_WAKEUP or send SIGCANCEL in kernel: + * 1) we are going to call a cancelable system call, + * non-zero cancel_point means we are already in + * cancelable state, next system call is cancelable. + * 2) because _thr_ast() may be called by + * THR_CRITICAL_LEAVE() which is used by rtld rwlock + * and any libthr internal locks, when rtld rwlock + * is used, it is mostly caused my an unresolved PLT. + * those routines may clear the TDP_WAKEUP flag by + * invoking some system calls, in those cases, we + * also should reenable the flag. + * 3) thread is in sigsuspend(), and the syscall insists + * on getting a signal before it agrees to return. + */ + if (curthread->cancel_point) { + if (curthread->in_sigsuspend && ucp) { + SIGADDSET(ucp->uc_sigmask, SIGCANCEL); + curthread->unblock_sigcancel = 1; + _thr_send_sig(curthread, SIGCANCEL); + } else + thr_wake(curthread->tid); } } +} + +static void +check_deferred_signal(struct pthread *curthread) +{ + ucontext_t uc; + struct sigaction act; + siginfo_t info; + volatile int first; - if (__predict_false((curthread->flags & - (THR_FLAGS_NEED_SUSPEND | THR_FLAGS_SUSPENDED)) - == THR_FLAGS_NEED_SUSPEND)) - _thr_suspend_check(curthread); + if (__predict_true(curthread->deferred_siginfo.si_signo == 0)) + return; + first = 1; + getcontext(&uc); + if (first) { + first = 0; + act = curthread->deferred_sigact; + uc.uc_sigmask = curthread->deferred_sigmask; + memcpy(&info, &curthread->deferred_siginfo, sizeof(siginfo_t)); + /* remove signal */ + curthread->deferred_siginfo.si_signo = 0; + if (act.sa_flags & SA_RESETHAND) { + struct sigaction tact; + + tact = act; + tact.sa_handler = SIG_DFL; + _sigaction(info.si_signo, &tact, NULL); + } + handle_signal(&act, info.si_signo, &info, &uc); + } } -void -_thr_suspend_check(struct pthread *curthread) +static void +check_suspend(struct pthread *curthread) { uint32_t cycle; - int err; + + if (__predict_true((curthread->flags & + (THR_FLAGS_NEED_SUSPEND | THR_FLAGS_SUSPENDED)) + != THR_FLAGS_NEED_SUSPEND)) + return; if (curthread->force_exit) return; - err = errno; /* * Blocks SIGCANCEL which other threads must send. */ _thr_signal_block(curthread); /* * Increase critical_count, here we don't use THR_LOCK/UNLOCK * because we are leaf code, we don't want to recursively call * ourself. */ curthread->critical_count++; THR_UMUTEX_LOCK(curthread, &(curthread)->lock); while ((curthread->flags & (THR_FLAGS_NEED_SUSPEND | THR_FLAGS_SUSPENDED)) == THR_FLAGS_NEED_SUSPEND) { curthread->cycle++; cycle = curthread->cycle; /* Wake the thread suspending us. */ _thr_umtx_wake(&curthread->cycle, INT_MAX, 0); /* * if we are from pthread_exit, we don't want to * suspend, just go and die. */ if (curthread->state == PS_DEAD) break; curthread->flags |= THR_FLAGS_SUSPENDED; THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock); _thr_umtx_wait_uint(&curthread->cycle, cycle, NULL, 0); THR_UMUTEX_LOCK(curthread, &(curthread)->lock); curthread->flags &= ~THR_FLAGS_SUSPENDED; } THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock); curthread->critical_count--; - /* - * Unblocks SIGCANCEL, it is possible a new SIGCANCEL is ready and - * a new signal frame will nest us, this seems a problem because - * stack will grow and overflow, but because kernel will automatically - * mask the SIGCANCEL when delivering the signal, so we at most only - * have one nesting signal frame, this should be fine. - */ _thr_signal_unblock(curthread); - errno = err; } void _thr_signal_init(void) { struct sigaction act; - /* Install cancel handler. */ - SIGEMPTYSET(act.sa_mask); - act.sa_flags = SA_SIGINFO | SA_RESTART; + /* Install SIGCANCEL handler. */ + SIGFILLSET(act.sa_mask); + act.sa_flags = SA_SIGINFO; act.sa_sigaction = (__siginfohandler_t *)&sigcancel_handler; __sys_sigaction(SIGCANCEL, &act, NULL); + + /* Unblock SIGCANCEL */ + SIGEMPTYSET(act.sa_mask); + SIGADDSET(act.sa_mask, SIGCANCEL); + __sys_sigprocmask(SIG_UNBLOCK, &act.sa_mask, NULL); +} + +/* + * called from rtld with rtld_lock locked, because rtld_lock is + * a critical region, so all signals have already beeen masked. + */ +void +_thr_sigact_unload(struct dl_phdr_info *phdr_info) +{ + struct urwlock *rwlp; + struct sigaction *actp; + struct sigaction kact; + void (*handler)(int); + int sig; + + for (sig = 1; sig < _SIG_MAXSIG; sig++) { + actp = &_thr_sigact[sig].sigact; +retry: + handler = actp->sa_handler; + if (handler != SIG_DFL && handler != SIG_IGN && + __elf_phdr_match_addr(phdr_info, handler)) { + rwlp = &_thr_sigact[sig].lock; + _thr_rwl_wrlock(rwlp); + if (handler != actp->sa_handler) { + _thr_rwl_unlock(rwlp); + goto retry; + } + actp->sa_handler = SIG_DFL; + actp->sa_flags = SA_SIGINFO; + SIGEMPTYSET(actp->sa_mask); + if (__sys_sigaction(sig, NULL, &kact) == 0 && + kact.sa_handler != SIG_DFL && + kact.sa_handler != SIG_IGN) + __sys_sigaction(sig, actp, NULL); + _thr_rwl_unlock(rwlp); + } + } +} + +void +_thr_signal_prefork(void) +{ + int i; + + for (i = 1; i < _SIG_MAXSIG; ++i) + _thr_rwl_rdlock(&_thr_sigact[i-1].lock); +} + +void +_thr_signal_postfork(void) +{ + int i; + + for (i = 1; i < _SIG_MAXSIG; ++i) + _thr_rwl_unlock(&_thr_sigact[i-1].lock); +} + +void +_thr_signal_postfork_child(void) +{ + int i; + + for (i = 1; i < _SIG_MAXSIG; ++i) + bzero(&_thr_sigact[i-1].lock, sizeof(struct urwlock)); } void _thr_signal_deinit(void) { } __weak_reference(___pause, pause); int ___pause(void) { - struct pthread *curthread = _get_curthread(); - int ret; + sigset_t oset; - _thr_cancel_enter(curthread); - ret = __pause(); - _thr_cancel_leave(curthread); - - return ret; + if (_sigprocmask(SIG_BLOCK, NULL, &oset) == -1) + return (-1); + return (__sigsuspend(&oset)); } __weak_reference(_raise, raise); int _raise(int sig) { - int ret; - - if (!_thr_isthreaded()) - ret = kill(getpid(), sig); - else - ret = _thr_send_sig(_get_curthread(), sig); - return (ret); + return _thr_send_sig(_get_curthread(), sig); } __weak_reference(_sigaction, sigaction); int _sigaction(int sig, const struct sigaction * act, struct sigaction * oact) { - /* Check if the signal number is out of range: */ + struct sigaction newact, oldact, oldact2; + sigset_t oldset; + int ret = 0, err = 0; + if (!_SIG_VALID(sig) || sig == SIGCANCEL) { - /* Return an invalid argument: */ errno = EINVAL; return (-1); } - return __sys_sigaction(sig, act, oact); + if (act) + newact = *act; + + __sys_sigprocmask(SIG_SETMASK, &_thr_maskset, &oldset); + _thr_rwl_wrlock(&_thr_sigact[sig-1].lock); + + if (act != NULL) { + oldact2 = _thr_sigact[sig-1].sigact; + + /* + * if a new sig handler is SIG_DFL or SIG_IGN, + * don't remove old handler from _thr_sigact[], + * so deferred signals still can use the handlers, + * multiple threads invoking sigaction itself is + * a race condition, so it is not a problem. + */ + if (newact.sa_handler != SIG_DFL && + newact.sa_handler != SIG_IGN) { + _thr_sigact[sig-1].sigact = newact; + remove_thr_signals( + &_thr_sigact[sig-1].sigact.sa_mask); + newact.sa_flags &= ~SA_NODEFER; + newact.sa_flags |= SA_SIGINFO; + newact.sa_sigaction = thr_sighandler; + newact.sa_mask = _thr_maskset; /* mask all signals */ + } + if ((ret = __sys_sigaction(sig, &newact, &oldact))) { + err = errno; + _thr_sigact[sig-1].sigact = oldact2; + } + } else if (oact != NULL) { + ret = __sys_sigaction(sig, NULL, &oldact); + err = errno; + } + + if (oldact.sa_handler != SIG_DFL && + oldact.sa_handler != SIG_IGN) { + oldact = _thr_sigact[sig-1].sigact; + } + + _thr_rwl_unlock(&_thr_sigact[sig-1].lock); + __sys_sigprocmask(SIG_SETMASK, &oldset, NULL); + + if (ret == 0) { + if (oact != NULL) + *oact = oldact; + } else { + errno = err; + } + return (ret); } __weak_reference(_sigprocmask, sigprocmask); int _sigprocmask(int how, const sigset_t *set, sigset_t *oset) { const sigset_t *p = set; sigset_t newset; if (how != SIG_UNBLOCK) { if (set != NULL) { newset = *set; SIGDELSET(newset, SIGCANCEL); p = &newset; } } return (__sys_sigprocmask(how, p, oset)); } __weak_reference(_pthread_sigmask, pthread_sigmask); int _pthread_sigmask(int how, const sigset_t *set, sigset_t *oset) { if (_sigprocmask(how, set, oset)) return (errno); return (0); } __weak_reference(__sigsuspend, sigsuspend); int _sigsuspend(const sigset_t * set) { sigset_t newset; return (__sys_sigsuspend(thr_remove_thr_signals(set, &newset))); } int __sigsuspend(const sigset_t * set) { - struct pthread *curthread = _get_curthread(); + struct pthread *curthread; sigset_t newset; - int ret; + int ret, old; + curthread = _get_curthread(); + + old = curthread->in_sigsuspend; + curthread->in_sigsuspend = 1; _thr_cancel_enter(curthread); ret = __sys_sigsuspend(thr_remove_thr_signals(set, &newset)); - _thr_cancel_leave(curthread); + _thr_cancel_leave(curthread, 1); + curthread->in_sigsuspend = old; + if (curthread->unblock_sigcancel) { + curthread->unblock_sigcancel = 0; + SIGEMPTYSET(newset); + SIGADDSET(newset, SIGCANCEL); + __sys_sigprocmask(SIG_UNBLOCK, &newset, NULL); + } return (ret); } __weak_reference(__sigwait, sigwait); __weak_reference(__sigtimedwait, sigtimedwait); __weak_reference(__sigwaitinfo, sigwaitinfo); int _sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec * timeout) { sigset_t newset; return (__sys_sigtimedwait(thr_remove_thr_signals(set, &newset), info, timeout)); } /* * Cancellation behavior: * Thread may be canceled at start, if thread got signal, * it is not canceled. */ int __sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec * timeout) { struct pthread *curthread = _get_curthread(); sigset_t newset; int ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_sigtimedwait(thr_remove_thr_signals(set, &newset), info, timeout); - _thr_cancel_leave_defer(curthread, (ret == -1)); + _thr_cancel_leave(curthread, (ret == -1)); return (ret); } int _sigwaitinfo(const sigset_t *set, siginfo_t *info) { sigset_t newset; return (__sys_sigwaitinfo(thr_remove_thr_signals(set, &newset), info)); } /* * Cancellation behavior: * Thread may be canceled at start, if thread got signal, * it is not canceled. */ int __sigwaitinfo(const sigset_t *set, siginfo_t *info) { struct pthread *curthread = _get_curthread(); sigset_t newset; int ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_sigwaitinfo(thr_remove_thr_signals(set, &newset), info); - _thr_cancel_leave_defer(curthread, ret == -1); + _thr_cancel_leave(curthread, ret == -1); return (ret); } int _sigwait(const sigset_t *set, int *sig) { sigset_t newset; return (__sys_sigwait(thr_remove_thr_signals(set, &newset), sig)); } /* * Cancellation behavior: * Thread may be canceled at start, if thread got signal, * it is not canceled. */ int __sigwait(const sigset_t *set, int *sig) { struct pthread *curthread = _get_curthread(); sigset_t newset; int ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_sigwait(thr_remove_thr_signals(set, &newset), sig); - _thr_cancel_leave_defer(curthread, (ret != 0)); + _thr_cancel_leave(curthread, (ret != 0)); return (ret); } __weak_reference(_setcontext, setcontext); int _setcontext(const ucontext_t *ucp) { ucontext_t uc; - (void) memcpy(&uc, ucp, sizeof (uc)); + (void) memcpy(&uc, ucp, sizeof(uc)); remove_thr_signals(&uc.uc_sigmask); - return __sys_setcontext(&uc); } __weak_reference(_swapcontext, swapcontext); int _swapcontext(ucontext_t *oucp, const ucontext_t *ucp) { ucontext_t uc; - (void) memcpy(&uc, ucp, sizeof (uc)); + (void) memcpy(&uc, ucp, sizeof(uc)); remove_thr_signals(&uc.uc_sigmask); return __sys_swapcontext(oucp, &uc); } diff --git a/lib/libthr/thread/thr_syscalls.c b/lib/libthr/thread/thr_syscalls.c index 73fa56fcc8f3..2327d7484e6e 100644 --- a/lib/libthr/thread/thr_syscalls.c +++ b/lib/libthr/thread/thr_syscalls.c @@ -1,778 +1,763 @@ /* * Copyright (C) 2005 David Xu . * Copyright (c) 2003 Daniel Eischen . * Copyright (C) 2000 Jason Evans . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice(s), this list of conditions and the following disclaimer as * the first lines of this file unmodified other than the possible * addition of one or more copyright notices. * 2. Redistributions in binary form must reproduce the above copyright * notice(s), this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "namespace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "un-namespace.h" #include "thr_private.h" extern int __creat(const char *, mode_t); extern int __pselect(int, fd_set *, fd_set *, fd_set *, const struct timespec *, const sigset_t *); extern unsigned __sleep(unsigned int); extern int __system(const char *); extern int __tcdrain(int); extern int __usleep(useconds_t); extern pid_t __wait(int *); extern pid_t __waitpid(pid_t, int *, int); extern int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); extern int __sys_accept(int, struct sockaddr *, socklen_t *); extern int __sys_connect(int, const struct sockaddr *, socklen_t); extern int __sys_fsync(int); extern int __sys_msync(void *, size_t, int); extern int __sys_pselect(int, fd_set *, fd_set *, fd_set *, const struct timespec *, const sigset_t *); extern int __sys_poll(struct pollfd *, unsigned, int); extern ssize_t __sys_recv(int, void *, size_t, int); extern ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, socklen_t *); extern ssize_t __sys_recvmsg(int, struct msghdr *, int); extern int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); extern int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int); extern ssize_t __sys_sendmsg(int, const struct msghdr *, int); extern ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, socklen_t); extern ssize_t __sys_readv(int, const struct iovec *, int); extern pid_t __sys_wait4(pid_t, int *, int, struct rusage *); extern ssize_t __sys_writev(int, const struct iovec *, int); int ___creat(const char *, mode_t); int ___pselect(int, fd_set *, fd_set *, fd_set *, const struct timespec *, const sigset_t *); unsigned ___sleep(unsigned); int ___system(const char *); int ___tcdrain(int); int ___usleep(useconds_t useconds); pid_t ___wait(int *); pid_t ___waitpid(pid_t, int *, int); int __accept(int, struct sockaddr *, socklen_t *); int __aio_suspend(const struct aiocb * const iocbs[], int, const struct timespec *); int __close(int); int __connect(int, const struct sockaddr *, socklen_t); int __fcntl(int, int,...); #ifdef SYSCALL_COMPAT extern int __fcntl_compat(int, int,...); #endif int __fsync(int); int __msync(void *, size_t, int); int __nanosleep(const struct timespec *, struct timespec *); int __open(const char *, int,...); int __openat(int, const char *, int,...); int __poll(struct pollfd *, unsigned int, int); ssize_t __read(int, void *buf, size_t); ssize_t __readv(int, const struct iovec *, int); ssize_t __recvfrom(int, void *, size_t, int f, struct sockaddr *, socklen_t *); ssize_t __recvmsg(int, struct msghdr *, int); int __select(int, fd_set *, fd_set *, fd_set *, struct timeval *); ssize_t __sendmsg(int, const struct msghdr *, int); ssize_t __sendto(int, const void *, size_t, int, const struct sockaddr *, socklen_t); pid_t __wait3(int *, int, struct rusage *); pid_t __wait4(pid_t, int *, int, struct rusage *); ssize_t __write(int, const void *, size_t); ssize_t __writev(int, const struct iovec *, int); __weak_reference(__accept, accept); /* * Cancellation behavior: * If thread is canceled, no socket is created. */ int __accept(int s, struct sockaddr *addr, socklen_t *addrlen) { struct pthread *curthread; int ret; curthread = _get_curthread(); - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_accept(s, addr, addrlen); - _thr_cancel_leave_defer(curthread, ret == -1); + _thr_cancel_leave(curthread, ret == -1); return (ret); } __weak_reference(__aio_suspend, aio_suspend); int __aio_suspend(const struct aiocb * const iocbs[], int niocb, const struct timespec *timeout) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = __sys_aio_suspend(iocbs, niocb, timeout); - _thr_cancel_leave(curthread); + _thr_cancel_leave(curthread, 1); return (ret); } __weak_reference(__close, close); /* * Cancellation behavior: * According to manual of close(), the file descriptor is always deleted. * Here, thread is only canceled after the system call, so the file * descriptor is always deleted despite whether the thread is canceled * or not. */ int __close(int fd) { struct pthread *curthread = _get_curthread(); int ret; - _thr_cancel_enter_defer(curthread, 0); + _thr_cancel_enter2(curthread, 0); ret = __sys_close(fd); - _thr_cancel_leave_defer(curthread, 1); + _thr_cancel_leave(curthread, 1); return (ret); } __weak_reference(__connect, connect); /* * Cancellation behavior: * If the thread is canceled, connection is not made. */ int __connect(int fd, const struct sockaddr *name, socklen_t namelen) { struct pthread *curthread = _get_curthread(); int ret; - _thr_cancel_enter_defer(curthread, 0); + _thr_cancel_enter(curthread); ret = __sys_connect(fd, name, namelen); - _thr_cancel_leave_defer(curthread, ret == -1); + _thr_cancel_leave(curthread, ret == -1); return (ret); } __weak_reference(___creat, creat); /* * Cancellation behavior: * If thread is canceled, file is not created. */ int ___creat(const char *path, mode_t mode) { struct pthread *curthread = _get_curthread(); int ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __creat(path, mode); - _thr_cancel_leave_defer(curthread, ret == -1); + _thr_cancel_leave(curthread, ret == -1); return ret; } __weak_reference(__fcntl, fcntl); /* * Cancellation behavior: * According to specification, only F_SETLKW is a cancellation point. * Thread is only canceled at start, or canceled if the system call * is failure, this means the function does not generate side effect * if it is canceled. */ int __fcntl(int fd, int cmd,...) { struct pthread *curthread = _get_curthread(); int ret; va_list ap; va_start(ap, cmd); - switch (cmd) { - case F_DUPFD: - case F_DUP2FD: - ret = __sys_fcntl(fd, cmd, va_arg(ap, int)); - break; - case F_SETFD: - case F_SETFL: - ret = __sys_fcntl(fd, cmd, va_arg(ap, int)); - break; - case F_GETFD: - case F_GETFL: - ret = __sys_fcntl(fd, cmd); - break; - case F_OSETLKW: - case F_SETLKW: - _thr_cancel_enter_defer(curthread, 1); + if (cmd == F_OSETLKW || cmd == F_SETLKW) { + _thr_cancel_enter(curthread); #ifdef SYSCALL_COMPAT ret = __fcntl_compat(fd, cmd, va_arg(ap, void *)); #else ret = __sys_fcntl(fd, cmd, va_arg(ap, void *)); #endif - _thr_cancel_leave_defer(curthread, ret == -1); - break; - default: + _thr_cancel_leave(curthread, ret == -1); + } else { #ifdef SYSCALL_COMPAT ret = __fcntl_compat(fd, cmd, va_arg(ap, void *)); #else ret = __sys_fcntl(fd, cmd, va_arg(ap, void *)); #endif } va_end(ap); return (ret); } __weak_reference(__fsync, fsync); /* * Cancellation behavior: * Thread may be canceled after system call. */ int __fsync(int fd) { struct pthread *curthread = _get_curthread(); int ret; - _thr_cancel_enter_defer(curthread, 0); + _thr_cancel_enter2(curthread, 0); ret = __sys_fsync(fd); - _thr_cancel_leave_defer(curthread, 1); + _thr_cancel_leave(curthread, 1); return (ret); } __weak_reference(__msync, msync); /* * Cancellation behavior: * Thread may be canceled after system call. */ int __msync(void *addr, size_t len, int flags) { struct pthread *curthread = _get_curthread(); int ret; - _thr_cancel_enter_defer(curthread, 0); + _thr_cancel_enter2(curthread, 0); ret = __sys_msync(addr, len, flags); - _thr_cancel_leave_defer(curthread, 1); + _thr_cancel_leave(curthread, 1); return ret; } __weak_reference(__nanosleep, nanosleep); int __nanosleep(const struct timespec *time_to_sleep, struct timespec *time_remaining) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = __sys_nanosleep(time_to_sleep, time_remaining); - _thr_cancel_leave(curthread); + _thr_cancel_leave(curthread, 1); return (ret); } __weak_reference(__open, open); /* * Cancellation behavior: * If the thread is canceled, file is not opened. */ int __open(const char *path, int flags,...) { struct pthread *curthread = _get_curthread(); int ret; int mode = 0; va_list ap; /* Check if the file is being created: */ if (flags & O_CREAT) { /* Get the creation mode: */ va_start(ap, flags); mode = va_arg(ap, int); va_end(ap); } - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_open(path, flags, mode); - _thr_cancel_leave_defer(curthread, ret == -1); + _thr_cancel_leave(curthread, ret == -1); return ret; } __weak_reference(__openat, openat); /* * Cancellation behavior: * If the thread is canceled, file is not opened. */ int __openat(int fd, const char *path, int flags, ...) { struct pthread *curthread = _get_curthread(); int ret; int mode = 0; va_list ap; /* Check if the file is being created: */ if (flags & O_CREAT) { /* Get the creation mode: */ va_start(ap, flags); mode = va_arg(ap, int); va_end(ap); } - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_openat(fd, path, flags, mode); - _thr_cancel_leave_defer(curthread, ret == -1); + _thr_cancel_leave(curthread, ret == -1); return ret; } __weak_reference(__poll, poll); /* * Cancellation behavior: * Thread may be canceled at start, but if the system call returns something, * the thread is not canceled. */ int __poll(struct pollfd *fds, unsigned int nfds, int timeout) { struct pthread *curthread = _get_curthread(); int ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_poll(fds, nfds, timeout); - _thr_cancel_leave_defer(curthread, ret == -1); + _thr_cancel_leave(curthread, ret == -1); return ret; } __weak_reference(___pselect, pselect); /* * Cancellation behavior: * Thread may be canceled at start, but if the system call returns something, * the thread is not canceled. */ int ___pselect(int count, fd_set *rfds, fd_set *wfds, fd_set *efds, const struct timespec *timo, const sigset_t *mask) { struct pthread *curthread = _get_curthread(); int ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_pselect(count, rfds, wfds, efds, timo, mask); - _thr_cancel_leave_defer(curthread, ret == -1); + _thr_cancel_leave(curthread, ret == -1); return (ret); } __weak_reference(__read, read); /* * Cancellation behavior: * Thread may be canceled at start, but if the system call got some data, * the thread is not canceled. */ ssize_t __read(int fd, void *buf, size_t nbytes) { struct pthread *curthread = _get_curthread(); ssize_t ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_read(fd, buf, nbytes); - _thr_cancel_leave_defer(curthread, ret == -1); + _thr_cancel_leave(curthread, ret == -1); return ret; } __weak_reference(__readv, readv); /* * Cancellation behavior: * Thread may be canceled at start, but if the system call got some data, * the thread is not canceled. */ ssize_t __readv(int fd, const struct iovec *iov, int iovcnt) { struct pthread *curthread = _get_curthread(); ssize_t ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_readv(fd, iov, iovcnt); - _thr_cancel_leave_defer(curthread, ret == -1); + _thr_cancel_leave(curthread, ret == -1); return ret; } __weak_reference(__recvfrom, recvfrom); /* * Cancellation behavior: * Thread may be canceled at start, but if the system call got some data, * the thread is not canceled. */ ssize_t __recvfrom(int s, void *b, size_t l, int f, struct sockaddr *from, socklen_t *fl) { struct pthread *curthread = _get_curthread(); ssize_t ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_recvfrom(s, b, l, f, from, fl); - _thr_cancel_leave_defer(curthread, ret == -1); + _thr_cancel_leave(curthread, ret == -1); return (ret); } __weak_reference(__recvmsg, recvmsg); /* * Cancellation behavior: * Thread may be canceled at start, but if the system call got some data, * the thread is not canceled. */ ssize_t __recvmsg(int s, struct msghdr *m, int f) { struct pthread *curthread = _get_curthread(); ssize_t ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_recvmsg(s, m, f); - _thr_cancel_leave_defer(curthread, ret == -1); + _thr_cancel_leave(curthread, ret == -1); return (ret); } __weak_reference(__select, select); /* * Cancellation behavior: * Thread may be canceled at start, but if the system call returns something, * the thread is not canceled. */ int __select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout) { struct pthread *curthread = _get_curthread(); int ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_select(numfds, readfds, writefds, exceptfds, timeout); - _thr_cancel_leave_defer(curthread, ret == -1); + _thr_cancel_leave(curthread, ret == -1); return ret; } __weak_reference(__sendmsg, sendmsg); /* * Cancellation behavior: * Thread may be canceled at start, but if the system call sent * data, the thread is not canceled. */ ssize_t __sendmsg(int s, const struct msghdr *m, int f) { struct pthread *curthread = _get_curthread(); ssize_t ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_sendmsg(s, m, f); - _thr_cancel_leave_defer(curthread, ret <= 0); + _thr_cancel_leave(curthread, ret <= 0); return (ret); } __weak_reference(__sendto, sendto); /* * Cancellation behavior: * Thread may be canceled at start, but if the system call sent some * data, the thread is not canceled. */ ssize_t __sendto(int s, const void *m, size_t l, int f, const struct sockaddr *t, socklen_t tl) { struct pthread *curthread = _get_curthread(); ssize_t ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_sendto(s, m, l, f, t, tl); - _thr_cancel_leave_defer(curthread, ret <= 0); + _thr_cancel_leave(curthread, ret <= 0); return (ret); } __weak_reference(___sleep, sleep); unsigned int ___sleep(unsigned int seconds) { struct pthread *curthread = _get_curthread(); unsigned int ret; _thr_cancel_enter(curthread); ret = __sleep(seconds); - _thr_cancel_leave(curthread); + _thr_cancel_leave(curthread, 1); return (ret); } __weak_reference(___system, system); int ___system(const char *string) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = __system(string); - _thr_cancel_leave(curthread); + _thr_cancel_leave(curthread, 1); return ret; } __weak_reference(___tcdrain, tcdrain); /* * Cancellation behavior: * If thread is canceled, the system call is not completed, * this means not all bytes were drained. */ int ___tcdrain(int fd) { struct pthread *curthread = _get_curthread(); int ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __tcdrain(fd); - _thr_cancel_leave_defer(curthread, ret == -1); + _thr_cancel_leave(curthread, ret == -1); return (ret); } __weak_reference(___usleep, usleep); int ___usleep(useconds_t useconds) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = __usleep(useconds); - _thr_cancel_leave(curthread); + _thr_cancel_leave(curthread, 1); return (ret); } __weak_reference(___wait, wait); /* * Cancellation behavior: * Thread may be canceled at start, but if the system call returns * a child pid, the thread is not canceled. */ pid_t ___wait(int *istat) { struct pthread *curthread = _get_curthread(); pid_t ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __wait(istat); - _thr_cancel_leave_defer(curthread, ret <= 0); + _thr_cancel_leave(curthread, ret <= 0); return ret; } __weak_reference(__wait3, wait3); /* * Cancellation behavior: * Thread may be canceled at start, but if the system call returns * a child pid, the thread is not canceled. */ pid_t __wait3(int *status, int options, struct rusage *rusage) { struct pthread *curthread = _get_curthread(); pid_t ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = _wait4(WAIT_ANY, status, options, rusage); - _thr_cancel_leave_defer(curthread, ret <= 0); + _thr_cancel_leave(curthread, ret <= 0); return (ret); } __weak_reference(__wait4, wait4); /* * Cancellation behavior: * Thread may be canceled at start, but if the system call returns * a child pid, the thread is not canceled. */ pid_t __wait4(pid_t pid, int *status, int options, struct rusage *rusage) { struct pthread *curthread = _get_curthread(); pid_t ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_wait4(pid, status, options, rusage); - _thr_cancel_leave_defer(curthread, ret <= 0); + _thr_cancel_leave(curthread, ret <= 0); return ret; } __weak_reference(___waitpid, waitpid); /* * Cancellation behavior: * Thread may be canceled at start, but if the system call returns * a child pid, the thread is not canceled. */ pid_t ___waitpid(pid_t wpid, int *status, int options) { struct pthread *curthread = _get_curthread(); pid_t ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __waitpid(wpid, status, options); - _thr_cancel_leave_defer(curthread, ret <= 0); + _thr_cancel_leave(curthread, ret <= 0); return ret; } __weak_reference(__write, write); /* * Cancellation behavior: * Thread may be canceled at start, but if the thread wrote some data, * it is not canceled. */ ssize_t __write(int fd, const void *buf, size_t nbytes) { struct pthread *curthread = _get_curthread(); ssize_t ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_write(fd, buf, nbytes); - _thr_cancel_leave_defer(curthread, (ret <= 0)); + _thr_cancel_leave(curthread, (ret <= 0)); return ret; } __weak_reference(__writev, writev); /* * Cancellation behavior: * Thread may be canceled at start, but if the thread wrote some data, * it is not canceled. */ ssize_t __writev(int fd, const struct iovec *iov, int iovcnt) { struct pthread *curthread = _get_curthread(); ssize_t ret; - _thr_cancel_enter_defer(curthread, 1); + _thr_cancel_enter(curthread); ret = __sys_writev(fd, iov, iovcnt); - _thr_cancel_leave_defer(curthread, (ret <= 0)); + _thr_cancel_leave(curthread, (ret <= 0)); return ret; } diff --git a/lib/libthr/thread/thr_umtx.c b/lib/libthr/thread/thr_umtx.c index 5af923dce4fb..b712b7af880b 100644 --- a/lib/libthr/thread/thr_umtx.c +++ b/lib/libthr/thread/thr_umtx.c @@ -1,219 +1,258 @@ /* * Copyright (c) 2005 David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #include "thr_private.h" #include "thr_umtx.h" #ifndef HAS__UMTX_OP_ERR int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2) { if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1) return (errno); return (0); } #endif void _thr_umutex_init(struct umutex *mtx) { static struct umutex default_mtx = DEFAULT_UMUTEX; *mtx = default_mtx; } int __thr_umutex_lock(struct umutex *mtx, uint32_t id) { uint32_t owner; if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { for (;;) { /* wait in kernel */ _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); owner = mtx->m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0 && atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) return (0); } } return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); } int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, const struct timespec *ets) { struct timespec timo, cts; uint32_t owner; int ret; clock_gettime(CLOCK_REALTIME, &cts); TIMESPEC_SUB(&timo, ets, &cts); if (timo.tv_sec < 0) return (ETIMEDOUT); for (;;) { if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { /* wait in kernel */ ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, &timo); /* now try to lock it */ owner = mtx->m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0 && atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) return (0); } else { ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, &timo); if (ret == 0) break; } if (ret == ETIMEDOUT) break; clock_gettime(CLOCK_REALTIME, &cts); TIMESPEC_SUB(&timo, ets, &cts); if (timo.tv_sec < 0 || (timo.tv_sec == 0 && timo.tv_nsec == 0)) { ret = ETIMEDOUT; break; } } return (ret); } int __thr_umutex_unlock(struct umutex *mtx, uint32_t id) { #ifndef __ia64__ /* XXX this logic has a race-condition on ia64. */ if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { atomic_cmpset_rel_32(&mtx->m_owner, id | UMUTEX_CONTESTED, UMUTEX_CONTESTED); return _umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE, 0, 0, 0); } #endif /* __ia64__ */ return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0); } int __thr_umutex_trylock(struct umutex *mtx) { return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0); } int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, uint32_t *oldceiling) { return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0); } int _thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout) { if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))) return (ETIMEDOUT); return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0, __DECONST(void*, timeout)); } int _thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared) { if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))) return (ETIMEDOUT); return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, __DECONST(void*, timeout)); } int _thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared) { return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, nr_wakeup, 0, 0); } void _thr_ucond_init(struct ucond *cv) { bzero(cv, sizeof(struct ucond)); } int _thr_ucond_wait(struct ucond *cv, struct umutex *m, const struct timespec *timeout, int check_unparking) { if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))) { struct pthread *curthread = _get_curthread(); _thr_umutex_unlock(m, TID(curthread)); return (ETIMEDOUT); } return _umtx_op_err(cv, UMTX_OP_CV_WAIT, check_unparking ? UMTX_CHECK_UNPARKING : 0, m, __DECONST(void*, timeout)); } int _thr_ucond_signal(struct ucond *cv) { if (!cv->c_has_waiters) return (0); return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL); } int _thr_ucond_broadcast(struct ucond *cv) { if (!cv->c_has_waiters) return (0); return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL); } int __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) { return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, NULL, tsp); } int __thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) { return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, NULL, tsp); } int __thr_rwlock_unlock(struct urwlock *rwlock) { return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL); } + +void +_thr_rwl_rdlock(struct urwlock *rwlock) +{ + int ret; + + for (;;) { + if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0) + return; + ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL); + if (ret == 0) + return; + if (ret != EINTR) + PANIC("rdlock error"); + } +} + +void +_thr_rwl_wrlock(struct urwlock *rwlock) +{ + int ret; + + for (;;) { + if (_thr_rwlock_trywrlock(rwlock) == 0) + return; + ret = __thr_rwlock_wrlock(rwlock, NULL); + if (ret == 0) + return; + if (ret != EINTR) + PANIC("wrlock error"); + } +} + +void +_thr_rwl_unlock(struct urwlock *rwlock) +{ + if (_thr_rwlock_unlock(rwlock)) + PANIC("unlock error"); +} diff --git a/lib/libthr/thread/thr_umtx.h b/lib/libthr/thread/thr_umtx.h index a6e462e29ca5..20489849f53f 100644 --- a/lib/libthr/thread/thr_umtx.h +++ b/lib/libthr/thread/thr_umtx.h @@ -1,188 +1,193 @@ /*- * Copyright (c) 2005 David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _THR_FBSD_UMTX_H_ #define _THR_FBSD_UMTX_H_ #include #include #define DEFAULT_UMUTEX {0,0, {0,0},{0,0,0,0}} int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden; int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, const struct timespec *timeout) __hidden; int __thr_umutex_unlock(struct umutex *mtx, uint32_t id) __hidden; int __thr_umutex_trylock(struct umutex *mtx) __hidden; int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, uint32_t *oldceiling) __hidden; void _thr_umutex_init(struct umutex *mtx) __hidden; int _thr_umtx_wait(volatile long *mtx, long exp, const struct timespec *timeout) __hidden; int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp, const struct timespec *timeout, int shared) __hidden; int _thr_umtx_wake(volatile void *mtx, int count, int shared) __hidden; int _thr_ucond_wait(struct ucond *cv, struct umutex *m, const struct timespec *timeout, int check_unpaking) __hidden; void _thr_ucond_init(struct ucond *cv) __hidden; int _thr_ucond_signal(struct ucond *cv) __hidden; int _thr_ucond_broadcast(struct ucond *cv) __hidden; int __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) __hidden; int __thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) __hidden; int __thr_rwlock_unlock(struct urwlock *rwlock) __hidden; +/* Internal used only */ +void _thr_rwl_rdlock(struct urwlock *rwlock) __hidden; +void _thr_rwl_wrlock(struct urwlock *rwlock) __hidden; +void _thr_rwl_unlock(struct urwlock *rwlock) __hidden; + static inline int _thr_umutex_trylock(struct umutex *mtx, uint32_t id) { if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) return (0); if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0) return (EBUSY); return (__thr_umutex_trylock(mtx)); } static inline int _thr_umutex_trylock2(struct umutex *mtx, uint32_t id) { if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0) return (0); if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED && __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0)) if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED)) return (0); return (EBUSY); } static inline int _thr_umutex_lock(struct umutex *mtx, uint32_t id) { if (_thr_umutex_trylock2(mtx, id) == 0) return (0); return (__thr_umutex_lock(mtx, id)); } static inline int _thr_umutex_timedlock(struct umutex *mtx, uint32_t id, const struct timespec *timeout) { if (_thr_umutex_trylock2(mtx, id) == 0) return (0); return (__thr_umutex_timedlock(mtx, id, timeout)); } static inline int _thr_umutex_unlock(struct umutex *mtx, uint32_t id) { if (atomic_cmpset_rel_32(&mtx->m_owner, id, UMUTEX_UNOWNED)) return (0); return (__thr_umutex_unlock(mtx, id)); } static inline int _thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags) { int32_t state; int32_t wrflags; if (flags & URWLOCK_PREFER_READER || rwlock->rw_flags & URWLOCK_PREFER_READER) wrflags = URWLOCK_WRITE_OWNER; else wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS; state = rwlock->rw_state; while (!(state & wrflags)) { if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) return (EAGAIN); if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1)) return (0); state = rwlock->rw_state; } return (EBUSY); } static inline int _thr_rwlock_trywrlock(struct urwlock *rwlock) { int32_t state; state = rwlock->rw_state; while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) { if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER)) return (0); state = rwlock->rw_state; } return (EBUSY); } static inline int _thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) { if (_thr_rwlock_tryrdlock(rwlock, flags) == 0) return (0); return (__thr_rwlock_rdlock(rwlock, flags, tsp)); } static inline int _thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) { if (_thr_rwlock_trywrlock(rwlock) == 0) return (0); return (__thr_rwlock_wrlock(rwlock, tsp)); } static inline int _thr_rwlock_unlock(struct urwlock *rwlock) { int32_t state; state = rwlock->rw_state; if (state & URWLOCK_WRITE_OWNER) { if (atomic_cmpset_rel_32(&rwlock->rw_state, URWLOCK_WRITE_OWNER, 0)) return (0); } else { for (;;) { if (__predict_false(URWLOCK_READER_COUNT(state) == 0)) return (EPERM); if (!((state & (URWLOCK_WRITE_WAITERS | URWLOCK_READ_WAITERS)) && URWLOCK_READER_COUNT(state) == 1)) { if (atomic_cmpset_rel_32(&rwlock->rw_state, state, state-1)) return (0); state = rwlock->rw_state; } else { break; } } } return (__thr_rwlock_unlock(rwlock)); } #endif