Index: head/lib/libc_r/uthread/pthread_private.h =================================================================== --- head/lib/libc_r/uthread/pthread_private.h (revision 72373) +++ head/lib/libc_r/uthread/pthread_private.h (revision 72374) @@ -1,1401 +1,1400 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Private thread definitions for the uthread kernel. * * $FreeBSD$ */ #ifndef _PTHREAD_PRIVATE_H #define _PTHREAD_PRIVATE_H /* * Evaluate the storage class specifier. */ #ifdef GLOBAL_PTHREAD_PRIVATE #define SCLASS #else #define SCLASS extern #endif /* * Include files. */ #include #include #include #include #include #include #include #include #include #include /* * Define machine dependent macros to get and set the stack pointer * from the supported contexts. Also define a macro to set the return * address in a jmp_buf context. * * XXX - These need to be moved into architecture dependent support files. */ #if defined(__i386__) #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2])) #define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp)) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk) #define FP_SAVE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("fnsave %0": :"m"(*fdata)); \ } while (0) #define FP_RESTORE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("frstor %0": :"m"(*fdata)); \ } while (0) #define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra) #elif defined(__alpha__) #include #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[R_SP + 4])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[R_SP + 4])) #define GET_STACK_UC(ucp) ((ucp)->uc_mcontext.mc_regs[R_SP]) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk) #define FP_SAVE_UC(ucp) #define FP_RESTORE_UC(ucp) #define SET_RETURN_ADDR_JB(jb, ra) do { \ (jb)[0]._jb[2] = (unsigned long)(ra) + 8UL; \ (jb)[0]._jb[R_RA + 4] = 0; \ (jb)[0]._jb[R_T12 + 4] = (long)(ra); \ } while (0) #else #error "Don't recognize this architecture!" #endif /* * Kernel fatal error handler macro. */ #define PANIC(string) _thread_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ __sys_write(1, buf, strlen(buf)); \ } while (0) #define stderr_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ __sys_write(2, buf, strlen(buf)); \ } while (0) /* * Priority queue manipulation macros (using pqe link): */ #define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd) #define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd) #define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd) #define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq) /* * Waiting queue manipulation macros (using pqe link): */ #define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd) #define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd) #if defined(_PTHREADS_INVARIANTS) #define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive() #define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive() #else #define PTHREAD_WAITQ_CLEARACTIVE() #define PTHREAD_WAITQ_SETACTIVE() #endif /* * Work queue manipulation macros (using qe link): */ #define PTHREAD_WORKQ_INSERT(thrd) do { \ TAILQ_INSERT_TAIL(&_workq,thrd,qe); \ (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \ } while (0) #define PTHREAD_WORKQ_REMOVE(thrd) do { \ TAILQ_REMOVE(&_workq,thrd,qe); \ (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \ } while (0) /* * State change macro without scheduling queue change: */ #define PTHREAD_SET_STATE(thrd, newstate) do { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } while (0) /* * State change macro with scheduling queue change - This must be * called with preemption deferred (see thread_kern_sched_[un]defer). */ #if defined(_PTHREADS_INVARIANTS) #include #define PTHREAD_ASSERT(cond, msg) do { \ if (!(cond)) \ PANIC(msg); \ } while (0) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \ PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \ "Illegal call from signal handler"); #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if (_thread_kern_new_state != 0) \ PANIC("Recursive PTHREAD_NEW_STATE"); \ _thread_kern_new_state = 1; \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ _thread_kern_new_state = 0; \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #else #define PTHREAD_ASSERT(cond, msg) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #endif /* * Define the signals to be used for scheduling. */ #if defined(_PTHREADS_COMPAT_SCHED) #define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL #define _SCHED_SIGNAL SIGVTALRM #else #define _ITIMER_SCHED_TIMER ITIMER_PROF #define _SCHED_SIGNAL SIGPROF #endif /* * Priority queues. * * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. */ typedef struct pq_list { TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ int pl_prio; /* the priority of this list */ int pl_queued; /* is this in the priority queue */ } pq_list_t; typedef struct pq_queue { TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ pq_list_t *pq_lists; /* array of all priority lists */ int pq_size; /* number of priority lists */ } pq_queue_t; /* * TailQ initialization values. */ #define TAILQ_INITIALIZER { NULL, NULL } /* * Mutex definitions. */ union pthread_mutex_data { void *m_ptr; int m_count; }; struct pthread_mutex { enum pthread_mutextype m_type; int m_protocol; TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; union pthread_mutex_data m_data; long m_flags; int m_refcount; /* * Used for priority inheritence and protection. * * m_prio - For priority inheritence, the highest active * priority (threads locking the mutex inherit * this priority). For priority protection, the * ceiling priority of this mutex. * m_saved_prio - mutex owners inherited priority before * taking the mutex, restored when the owner * unlocks the mutex. */ int m_prio; int m_saved_prio; /* * Link for list of all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; /* * Lock for accesses to this structure. */ spinlock_t lock; }; /* * Flags for mutexes. */ #define MUTEX_FLAGS_PRIVATE 0x01 #define MUTEX_FLAGS_INITED 0x02 #define MUTEX_FLAGS_BUSY 0x04 /* * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \ NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \ _SPINLOCK_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; long m_flags; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } /* * Condition variable definitions. */ enum pthread_cond_type { COND_TYPE_FAST, COND_TYPE_MAX }; struct pthread_cond { enum pthread_cond_type c_type; TAILQ_HEAD(cond_head, pthread) c_queue; pthread_mutex_t c_mutex; void *c_data; long c_flags; int c_seqno; /* * Lock for accesses to this structure. */ spinlock_t lock; }; struct pthread_cond_attr { enum pthread_cond_type c_type; long c_flags; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \ 0, 0, _SPINLOCK_INITIALIZER } /* * Semaphore definitions. */ struct sem { #define SEM_MAGIC ((u_int32_t) 0x09fa4012) u_int32_t magic; pthread_mutex_t lock; pthread_cond_t gtzero; u_int32_t count; u_int32_t nwaiters; }; /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *next; void (*routine) (); void *routine_arg; }; struct pthread_attr { int sched_policy; int sched_inherit; int sched_interval; int prio; int suspend; int flags; void *arg_attr; void (*cleanup_attr) (); void *stackaddr_attr; size_t stacksize_attr; }; /* * Thread creation state attributes. */ #define PTHREAD_CREATE_RUNNING 0 #define PTHREAD_CREATE_SUSPENDED 1 /* * Additional state for a thread suspended with pthread_suspend_np(). */ enum pthread_susp { SUSP_NO, /* Not suspended. */ SUSP_YES, /* Suspended. */ SUSP_NOWAIT, /* Suspended, was in a mutex or condition queue. */ SUSP_MUTEX_WAIT,/* Suspended, still in a mutex queue. */ SUSP_COND_WAIT /* Suspended, still in a condition queue. */ }; /* * Miscellaneous definitions. */ #define PTHREAD_STACK_DEFAULT 65536 /* * Size of red zone at the end of each stack. In actuality, this "red zone" is * merely an unmapped region, except in the case of the initial stack. Since * mmap() makes it possible to specify the maximum growth of a MAP_STACK region, * an unmapped gap between thread stacks achieves the same effect as explicitly * mapped red zones. */ #define PTHREAD_STACK_GUARD PAGE_SIZE /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define PTHREAD_STACK_INITIAL 0x100000 /* Size of the scheduler stack: */ #define SCHED_STACK_SIZE PAGE_SIZE /* * Define the different priority ranges. All applications have thread * priorities constrained within 0-31. The threads library raises the * priority when delivering signals in order to ensure that signal * delivery happens (from the POSIX spec) "as soon as possible". * In the future, the threads library will also be able to map specific * threads into real-time (cooperating) processes or kernel threads. * The RT and SIGNAL priorities will be used internally and added to * thread base priorities so that the scheduling queue can handle both * normal and RT priority threads with and without signal handling. * * The approach taken is that, within each class, signal delivery * always has priority over thread execution. */ #define PTHREAD_DEFAULT_PRIORITY 15 #define PTHREAD_MIN_PRIORITY 0 #define PTHREAD_MAX_PRIORITY 31 /* 0x1F */ #define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */ #define PTHREAD_RT_PRIORITY 64 /* 0x40 */ #define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY #define PTHREAD_LAST_PRIORITY \ (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY) #define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY) /* * Clock resolution in microseconds. */ #define CLOCK_RES_USEC 10000 /* * Time slice period in microseconds. */ #define TIMESLICE_USEC 20000 /* * Define a thread-safe macro to get the current time of day * which is updated at regular intervals by the scheduling signal * handler. */ #define GET_CURRENT_TOD(tv) \ do { \ tv.tv_sec = _sched_tod.tv_sec; \ tv.tv_usec = _sched_tod.tv_usec; \ } while (tv.tv_sec != _sched_tod.tv_sec) struct pthread_key { spinlock_t lock; volatile int allocated; volatile int count; void (*destructor) (); }; struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ int state; /* 0 = idle >0 = # of readers -1 = writer */ pthread_cond_t read_signal; pthread_cond_t write_signal; int blocked_writers; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_SIGTHREAD, PS_MUTEX_WAIT, PS_COND_WAIT, PS_FDLR_WAIT, PS_FDLW_WAIT, PS_FDR_WAIT, PS_FDW_WAIT, PS_FILE_WAIT, PS_POLL_WAIT, PS_SELECT_WAIT, PS_SLEEP_WAIT, PS_WAIT_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, PS_SPINBLOCK, PS_JOIN, PS_SUSPENDED, PS_DEAD, PS_DEADLOCK, PS_STATE_MAX }; /* * File descriptor locking definitions. */ #define FD_READ 0x1 #define FD_WRITE 0x2 #define FD_RDWR (FD_READ | FD_WRITE) /* * File descriptor table structure. */ struct fd_table_entry { /* * Lock for accesses to this file descriptor table * entry. This is passed to _spinlock() to provide atomic * access to this structure. It does *not* represent the * state of the lock on the file descriptor. */ spinlock_t lock; TAILQ_HEAD(, pthread) r_queue; /* Read queue. */ TAILQ_HEAD(, pthread) w_queue; /* Write queue. */ struct pthread *r_owner; /* Ptr to thread owning read lock. */ struct pthread *w_owner; /* Ptr to thread owning write lock. */ char *r_fname; /* Ptr to read lock source file name */ int r_lineno; /* Read lock source line number. */ char *w_fname; /* Ptr to write lock source file name */ int w_lineno; /* Write lock source line number. */ int r_lockcount; /* Count for FILE read locks. */ int w_lockcount; /* Count for FILE write locks. */ int flags; /* Flags used in open. */ }; struct pthread_poll_data { int nfds; struct pollfd *fds; }; union pthread_wait_data { pthread_mutex_t mutex; pthread_cond_t cond; const sigset_t *sigwait; /* Waiting on a signal in sigwait */ struct { short fd; /* Used when thread waiting on fd */ short branch; /* Line number, for debugging. */ char *fname; /* Source file name for debugging.*/ } fd; FILE *fp; struct pthread_poll_data *poll_data; spinlock_t *spinlock; struct pthread *thread; }; /* * Define a continuation routine that can be used to perform a * transfer of control: */ typedef void (*thread_continuation_t) (void *); struct pthread_signal_frame; struct pthread_state_data { struct pthread_signal_frame *psd_curframe; sigset_t psd_sigmask; struct timespec psd_wakeup_time; union pthread_wait_data psd_wait_data; enum pthread_state psd_state; int psd_flags; int psd_interrupted; int psd_longjmp_val; int psd_sigmask_seqno; int psd_signo; int psd_sig_defer_count; /* XXX - What about thread->timeout and/or thread->error? */ }; /* * Normally thread contexts are stored as jmp_bufs via _setjmp()/_longjmp(), * but they may also be sigjmp_buf and ucontext_t. When a thread is * interrupted by a signal, it's context is saved as a ucontext_t. An * application is also free to use [_]longjmp()/[_]siglongjmp() to jump * between contexts within the same thread. Future support will also * include setcontext()/getcontext(). * * Define an enumerated type that can identify the 4 different context * types. */ typedef enum { CTX_JB_NOSIG, /* context is jmp_buf without saved sigset */ CTX_JB, /* context is jmp_buf (with saved sigset) */ CTX_SJB, /* context is sigjmp_buf (with saved sigset) */ CTX_UC /* context is ucontext_t (with saved sigset) */ } thread_context_t; /* * There are 2 basic contexts that a frame may contain at any * one time: * * o ctx - The context that the thread should return to after normal * completion of the signal handler. * o sig_jb - The context just before the signal handler is invoked. * Attempts at abnormal returns from user supplied signal handlers * will return back to the signal context to perform any necessary * cleanup. */ struct pthread_signal_frame { /* * This stores the threads state before the signal. */ struct pthread_state_data saved_state; /* * Threads return context; ctxtype identifies the type of context. * For signal frame 0, these point to the context storage area * within the pthread structure. When handling signals (frame > 0), * these point to a context storage area that is allocated off the * threads stack. */ union { jmp_buf jb; sigjmp_buf sigjb; ucontext_t uc; } ctx; thread_context_t ctxtype; int longjmp_val; int signo; /* signal, arg 1 to sighandler */ int sig_has_args; /* use signal args if true */ ucontext_t uc; siginfo_t siginfo; }; /* * Thread structure. */ struct pthread { /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; char *name; u_int64_t uniqueid; /* for gdb */ /* * Lock for accesses to this thread structure. */ spinlock_t lock; /* Queue entry for list of all threads: */ TAILQ_ENTRY(pthread) tle; /* Queue entry for list of dead threads: */ TAILQ_ENTRY(pthread) dle; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; void *stack; struct pthread_attr attr; /* * Threads return context; ctxtype identifies the type of context. */ union { jmp_buf jb; sigjmp_buf sigjb; ucontext_t uc; } ctx; thread_context_t ctxtype; int longjmp_val; /* * Used for tracking delivery of signal handlers. */ struct pthread_signal_frame *curframe; /* * Cancelability flags - the lower 2 bits are used by cancel * definitions in pthread.h */ #define PTHREAD_AT_CANCEL_POINT 0x0004 #define PTHREAD_CANCELLING 0x0008 #define PTHREAD_CANCEL_NEEDED 0x0010 int cancelflags; enum pthread_susp suspended; thread_continuation_t continuation; /* * Current signal mask and pending signals. */ sigset_t sigmask; sigset_t sigpend; int sigmask_seqno; int check_pending; /* Thread state: */ enum pthread_state state; /* Scheduling clock when this thread was last made active. */ long last_active; /* Scheduling clock when this thread was last made inactive. */ long last_inactive; /* * Number of microseconds accumulated by this thread when * time slicing is active. */ long slice_usec; /* * Time to wake up thread. This is used for sleeping threads and * for any operation which may time out (such as select). */ struct timespec wakeup_time; /* TRUE if operation has timed out. */ int timeout; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* Join queue head and link for waiting threads: */ TAILQ_HEAD(join_head, pthread) join_queue; /* * The current thread can belong to only one scheduling queue at * a time (ready or waiting queue). It can also belong to: * * o A queue of threads waiting for a mutex * o A queue of threads waiting for a condition variable * o A queue of threads waiting for another thread to terminate * (the join queue above) * o A queue of threads waiting for a file descriptor lock * o A queue of threads needing work done by the kernel thread * (waiting for a spinlock or file I/O) * * It is possible for a thread to belong to more than one of the * above queues if it is handling a signal. A thread may only * enter a mutex, condition variable, or join queue when it is * not being called from a signal handler. If a thread is a * member of one of these queues when a signal handler is invoked, * it must remain in the queue. For this reason, the links for * these queues must not be (re)used for other queues. * * Use pqe for the scheduling queue link (both ready and waiting), * sqe for synchronization (mutex, condition variable, and join) * queue links, and qe for all other links. */ TAILQ_ENTRY(pthread) pqe; /* priority queue link */ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ TAILQ_ENTRY(pthread) qe; /* all other queues link */ /* Wait data. */ union pthread_wait_data data; /* * Allocated for converting select into poll. */ struct pthread_poll_data poll_data; /* * Set to TRUE if a blocking operation was * interrupted by a signal: */ int interrupted; /* Signal number when in state PS_SIGWAIT: */ int signo; /* * Set to non-zero when this thread has deferred signals. * We allow for recursive deferral. */ int sig_defer_count; /* * Set to TRUE if this thread should yield after undeferring * signals. */ int yield_on_sig_undefer; /* Miscellaneous flags; only set with signals deferred. */ int flags; #define PTHREAD_FLAGS_PRIVATE 0x0001 #define PTHREAD_EXITING 0x0002 #define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */ #define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */ #define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */ #define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */ #define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */ #define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/ #define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */ #define PTHREAD_FLAGS_IN_JOINQ 0x0200 /* in join queue using sqe link */ #define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */ #define PTHREAD_FLAGS_IN_SYNCQ \ (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ | PTHREAD_FLAGS_IN_JOINQ) /* * Base priority is the user setable and retrievable priority * of the thread. It is only affected by explicit calls to * set thread priority and upon thread creation via a thread * attribute or default priority. */ char base_priority; /* * Inherited priority is the priority a thread inherits by * taking a priority inheritence or protection mutex. It * is not affected by base priority changes. Inherited * priority defaults to and remains 0 until a mutex is taken * that is being waited on by any other thread whose priority * is non-zero. */ char inherited_priority; /* * Active priority is always the maximum of the threads base * priority and inherited priority. When there is a change * in either the base or inherited priority, the active * priority must be recalculated. */ char active_priority; /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; /* * Queue of currently owned mutexes. */ TAILQ_HEAD(, pthread_mutex) mutexq; void *ret; const void **specific_data; int specific_data_count; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; char *fname; /* Ptr to source file name */ int lineno; /* Source line number. */ }; /* Spare thread stack. */ struct stack { SLIST_ENTRY(stack) qe; /* Queue entry for this stack. */ }; /* * Global variables for the uthread kernel. */ /* Kernel thread structure used when there are no running threads: */ SCLASS struct pthread _thread_kern_thread; /* Ptr to the thread structure for the running thread: */ SCLASS struct pthread * volatile _thread_run #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* Ptr to the thread structure for the last user thread to run: */ SCLASS struct pthread * volatile _last_user_thread #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* * Ptr to the thread running in single-threaded mode or NULL if * running multi-threaded (default POSIX behaviour). */ SCLASS struct pthread * volatile _thread_single #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* List of all threads: */ SCLASS TAILQ_HEAD(, pthread) _thread_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_thread_list); #else ; #endif /* * Array of kernel pipe file descriptors that are used to ensure that * no signals are missed in calls to _select. */ SCLASS int _thread_kern_pipe[2] #ifdef GLOBAL_PTHREAD_PRIVATE = { -1, -1 }; #else ; #endif SCLASS int volatile _queue_signals #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _thread_kern_in_sched #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _sig_in_handler #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Time of day at last scheduling timer signal: */ SCLASS struct timeval volatile _sched_tod #ifdef GLOBAL_PTHREAD_PRIVATE = { 0, 0 }; #else ; #endif /* * Current scheduling timer ticks; used as resource usage. */ SCLASS unsigned int volatile _sched_ticks #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Dead threads: */ SCLASS TAILQ_HEAD(, pthread) _dead_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_dead_list); #else ; #endif /* Initial thread: */ SCLASS struct pthread *_thread_initial #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Default thread attributes: */ SCLASS struct pthread_attr pthread_attr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, PTHREAD_STACK_DEFAULT }; #else ; #endif /* Default mutex attributes: */ SCLASS struct pthread_mutex_attr pthread_mutexattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }; #else ; #endif /* Default condition variable attributes: */ SCLASS struct pthread_cond_attr pthread_condattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { COND_TYPE_FAST, 0 }; #else ; #endif /* * Standard I/O file descriptors need special flag treatment since * setting one to non-blocking does all on *BSD. Sigh. This array * is used to store the initial flag settings. */ SCLASS int _pthread_stdio_flags[3]; /* File table information: */ SCLASS struct fd_table_entry **_thread_fd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Table for polling file descriptors: */ SCLASS struct pollfd *_thread_pfd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif SCLASS const int dtablecount #ifdef GLOBAL_PTHREAD_PRIVATE = 4096/sizeof(struct fd_table_entry); #else ; #endif SCLASS int _thread_dtablesize /* Descriptor table size. */ #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _clock_res_usec /* Clock resolution in usec. */ #ifdef GLOBAL_PTHREAD_PRIVATE = CLOCK_RES_USEC; #else ; #endif /* Garbage collector mutex and condition variable. */ SCLASS pthread_mutex_t _gc_mutex #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; SCLASS pthread_cond_t _gc_cond #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Array of signal actions for this process. */ SCLASS struct sigaction _thread_sigact[NSIG]; /* * Array of counts of dummy handlers for SIG_DFL signals. This is used to * assure that there is always a dummy signal handler installed while there is a * thread sigwait()ing on the corresponding signal. */ SCLASS int _thread_dfl_count[NSIG]; /* * Pending signals and mask for this process: */ SCLASS sigset_t _process_sigpending; SCLASS sigset_t _process_sigmask #ifdef GLOBAL_PTHREAD_PRIVATE = { {0, 0, 0, 0} } #endif ; /* * Scheduling queues: */ SCLASS pq_queue_t _readyq; SCLASS TAILQ_HEAD(, pthread) _waitingq; /* * Work queue: */ SCLASS TAILQ_HEAD(, pthread) _workq; /* Tracks the number of threads blocked while waiting for a spinlock. */ SCLASS volatile int _spinblock_count #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Used to maintain pending and active signals: */ struct sigstatus { int pending; /* Is this a pending signal? */ int blocked; /* * A handler is currently active for * this signal; ignore subsequent * signals until the handler is done. */ int signo; /* arg 1 to signal handler */ siginfo_t siginfo; /* arg 2 to signal handler */ ucontext_t uc; /* arg 3 to signal handler */ }; SCLASS struct sigstatus _thread_sigq[NSIG]; /* Indicates that the signal queue needs to be checked. */ SCLASS volatile int _sigq_check_reqd #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* The signal stack. */ SCLASS struct sigaltstack _thread_sigstack; /* Thread switch hook. */ SCLASS pthread_switch_routine_t _sched_switch_hook #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Spare stack queue. Stacks of default size are cached in order to reduce * thread creation time. Spare stacks are used in LIFO order to increase cache * locality. */ SCLASS SLIST_HEAD(, stack) _stackq; /* * Base address of next unallocated default-size {stack, red zone}. Stacks are * allocated contiguously, starting below the bottom of the main stack. When a * new stack is created, a red zone is created (actually, the red zone is simply * left unmapped) below the bottom of the stack, such that the stack will not be * able to grow all the way to the top of the next stack. This isn't * fool-proof. It is possible for a stack to grow by a large amount, such that * it grows into the next stack, and as long as the memory within the red zone * is never accessed, nothing will prevent one thread stack from trouncing all * over the next. */ SCLASS void * _next_stack #ifdef GLOBAL_PTHREAD_PRIVATE /* main stack top - main stack size - stack size - (red zone + main stack red zone) */ = (void *) USRSTACK - PTHREAD_STACK_INITIAL - PTHREAD_STACK_DEFAULT - (2 * PTHREAD_STACK_GUARD) #endif ; /* * Declare the kernel scheduler jump buffer and stack: */ SCLASS jmp_buf _thread_kern_sched_jb; SCLASS void * _thread_kern_sched_stack #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* Used for _PTHREADS_INVARIANTS checking. */ SCLASS int _thread_kern_new_state #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Undefine the storage class specifier: */ #undef SCLASS #ifdef _LOCK_DEBUG #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \ _ts, __FILE__, __LINE__) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \ __FILE__, __LINE__) #else #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type) #endif /* * Function prototype definitions. */ __BEGIN_DECLS char *__ttyname_basic(int); char *__ttyname_r_basic(int, char *, size_t); char *ttyname_r(int, char *, size_t); void _cond_wait_backout(pthread_t); void _fd_lock_backout(pthread_t); int _find_dead_thread(pthread_t); int _find_thread(pthread_t); -void _flockfile_backout(pthread_t); -void _funlock_owned(pthread_t); struct pthread *_get_curthread(void); void _set_curthread(struct pthread *); void _join_backout(pthread_t); int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t); int _thread_fd_lock(int, int, struct timespec *); int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); void _mutex_lock_backout(pthread_t); void _mutex_notify_priochange(pthread_t); int _mutex_reinit(pthread_mutex_t *); void _mutex_unlock_private(pthread_t); int _cond_reinit(pthread_cond_t *); int _pq_alloc(struct pq_queue *, int, int); int _pq_init(struct pq_queue *); void _pq_remove(struct pq_queue *pq, struct pthread *); void _pq_insert_head(struct pq_queue *pq, struct pthread *); void _pq_insert_tail(struct pq_queue *pq, struct pthread *); struct pthread *_pq_first(struct pq_queue *pq); void *_pthread_getspecific(pthread_key_t); int _pthread_key_create(pthread_key_t *, void (*) (void *)); int _pthread_key_delete(pthread_key_t); int _pthread_mutex_destroy(pthread_mutex_t *); int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); int _pthread_mutex_lock(pthread_mutex_t *); int _pthread_mutex_trylock(pthread_mutex_t *); int _pthread_mutex_unlock(pthread_mutex_t *); int _pthread_mutexattr_init(pthread_mutexattr_t *); int _pthread_mutexattr_destroy(pthread_mutexattr_t *); int _pthread_mutexattr_settype(pthread_mutexattr_t *, int); int _pthread_once(pthread_once_t *, void (*) (void)); +pthread_t _pthread_self(void); int _pthread_setspecific(pthread_key_t, const void *); void _waitq_insert(pthread_t pthread); void _waitq_remove(pthread_t pthread); #if defined(_PTHREADS_INVARIANTS) void _waitq_setactive(void); void _waitq_clearactive(void); #endif void _thread_exit(char *, int, char *); void _thread_exit_cleanup(void); void _thread_fd_unlock(int, int); void _thread_fd_unlock_debug(int, int, char *, int); void _thread_fd_unlock_owned(pthread_t); void *_thread_cleanup(pthread_t); void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_init(void); void _thread_kern_sched(ucontext_t *); void _thread_kern_scheduler(void); void _thread_kern_sched_frame(struct pthread_signal_frame *psf); void _thread_kern_sched_sig(void); void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno); void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno); void _thread_kern_set_timeout(const struct timespec *); void _thread_kern_sig_defer(void); void _thread_kern_sig_undefer(void); void _thread_sig_handler(int, siginfo_t *, ucontext_t *); void _thread_sig_check_pending(pthread_t pthread); void _thread_sig_handle_pending(void); void _thread_sig_send(pthread_t pthread, int sig); void _thread_sig_wrapper(void); void _thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf); void _thread_start(void); void _thread_seterrno(pthread_t, int); int _thread_fd_table_init(int fd); pthread_addr_t _thread_gc(pthread_addr_t); void _thread_enter_cancellation_point(void); void _thread_leave_cancellation_point(void); void _thread_cancellation_point(void); /* #include */ #ifdef _SYS_AIO_H_ int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); #endif /* #include */ #ifdef _SIGNAL_H_ int __sys_sigaction(int, const struct sigaction *, struct sigaction *); int __sys_sigpending(sigset_t *); int __sys_sigprocmask(int, const sigset_t *, sigset_t *); int __sys_sigsuspend(const sigset_t *); int __sys_sigreturn(ucontext_t *); int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); #endif /* #include */ #ifdef _SYS_STAT_H_ int __sys_fchmod(int, mode_t); int __sys_fstat(int, struct stat *); int __sys_fchflags(int, u_long); #endif /* #include */ #ifdef _SYS_MOUNT_H_ int __sys_fstatfs(int, struct statfs *); #endif /* #inclde */ #ifdef _SYS_EVENT_H_ int __sys_kevent(int, const struct kevent *, int, struct kevent *, int, const struct timespec *); #endif /* #include */ #ifdef _SYS_SOCKET_H_ int __sys_accept(int, struct sockaddr *, int *); int __sys_bind(int, const struct sockaddr *, int); int __sys_connect(int, const struct sockaddr *, int); int __sys_getpeername(int, struct sockaddr *, int *); int __sys_getsockname(int, struct sockaddr *, int *); int __sys_getsockopt(int, int, int, void *, int *); int __sys_listen(int, int); int __sys_setsockopt(int, int, int, const void *, int); int __sys_shutdown(int, int); int __sys_socket(int, int, int); int __sys_socketpair(int, int, int, int *); ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, int *); ssize_t __sys_recvmsg(int, struct msghdr *, int); ssize_t __sys_send(int, const void *, size_t, int); int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int); ssize_t __sys_sendmsg(int, const struct msghdr *, int); ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, int); #endif /* #include */ #ifdef _UNISTD_H_ int __sys_close(int); int __sys_dup(int); int __sys_dup2(int, int); int __sys_execve(const char *, char * const *, char * const *); int __sys_fchown(int, uid_t, gid_t); int __sys_fork(void); int __sys_fsync(int); int __sys_pipe(int *); int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); long __sys_fpathconf(int, int); ssize_t __sys_read(int, void *, size_t); ssize_t __sys_write(int, const void *, size_t); void __sys_exit(int); #endif /* #include */ #ifdef _SYS_FCNTL_H_ int __sys_fcntl(int, int, ...); int __sys_flock(int, int); int __sys_open(const char *, int, ...); #endif /* #include */ #ifdef _SYS_IOCTL_H_ int __sys_ioctl(int, unsigned long, ...); #endif /* #include */ #ifdef _DIRENT_H_ int __sys_getdirentries(int, char *, int, long *); #endif /* #include */ #ifdef _SYS_UIO_H_ ssize_t __sys_readv(int, const struct iovec *, int); ssize_t __sys_writev(int, const struct iovec *, int); #endif /* #include */ #ifdef WNOHANG pid_t __sys_wait4(pid_t, int *, int, struct rusage *); #endif /* #include */ #ifdef _SYS_POLL_H_ int __sys_poll(struct pollfd *, unsigned, int); #endif /* #include */ #ifdef _SYS_MMAN_H_ int __sys_msync(void *, size_t, int); #endif /* #include */ #ifdef _SETJMP_H_ extern void __siglongjmp(sigjmp_buf, int) __dead2; extern void __longjmp(jmp_buf, int) __dead2; extern void ___longjmp(jmp_buf, int) __dead2; #endif __END_DECLS #endif /* !_PTHREAD_PRIVATE_H */ Index: head/lib/libc_r/uthread/uthread_exit.c =================================================================== --- head/lib/libc_r/uthread/uthread_exit.c (revision 72373) +++ head/lib/libc_r/uthread/uthread_exit.c (revision 72374) @@ -1,238 +1,231 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include "pthread_private.h" #define FLAGS_IN_SCHEDQ \ (PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ) #pragma weak pthread_exit=_pthread_exit void _exit(int status) { int flags; int i; struct itimerval itimer; /* Disable the interval timer: */ itimer.it_interval.tv_sec = 0; itimer.it_interval.tv_usec = 0; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = 0; setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL); /* Close the pthread kernel pipe: */ __sys_close(_thread_kern_pipe[0]); __sys_close(_thread_kern_pipe[1]); /* * Enter a loop to set all file descriptors to blocking * if they were not created as non-blocking: */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file descriptor is in use: */ if (_thread_fd_table[i] != NULL && !(_thread_fd_table[i]->flags & O_NONBLOCK)) { /* Get the current flags: */ flags = __sys_fcntl(i, F_GETFL, NULL); /* Clear the nonblocking file descriptor flag: */ __sys_fcntl(i, F_SETFL, flags & ~O_NONBLOCK); } } /* Call the _exit syscall: */ __sys_exit(status); } void _thread_exit(char *fname, int lineno, char *string) { char s[256]; /* Prepare an error message string: */ - strcpy(s, "Fatal error '"); - strcat(s, string); - strcat(s, "' at line ? "); - strcat(s, "in file "); - strcat(s, fname); - strcat(s, " (errno = ?"); - strcat(s, ")\n"); + snprintf(s, sizeof(s), + "Fatal error '%s' at line %d in file %s (errno = %d)\n", + string, lineno, fname, errno); /* Write the string to the standard error file descriptor: */ __sys_write(2, s, strlen(s)); /* Force this process to exit: */ /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */ #if defined(_PTHREADS_INVARIANTS) abort(); #else __sys_exit(1); #endif } /* * Only called when a thread is cancelled. It may be more useful * to call it from pthread_exit() if other ways of asynchronous or * abnormal thread termination can be found. */ void _thread_exit_cleanup(void) { struct pthread *curthread = _get_curthread(); /* * POSIX states that cancellation/termination of a thread should * not release any visible resources (such as mutexes) and that * it is the applications responsibility. Resources that are * internal to the threads library, including file and fd locks, * are not visible to the application and need to be released. */ /* Unlock all owned fd locks: */ _thread_fd_unlock_owned(curthread); - - /* Unlock all owned file locks: */ - _funlock_owned(curthread); /* Unlock all private mutexes: */ _mutex_unlock_private(curthread); /* * This still isn't quite correct because we don't account * for held spinlocks (see libc/stdlib/malloc.c). */ } void _pthread_exit(void *status) { struct pthread *curthread = _get_curthread(); pthread_t pthread; /* Check if this thread is already in the process of exiting: */ if ((curthread->flags & PTHREAD_EXITING) != 0) { char msg[128]; snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",curthread); PANIC(msg); } /* Flag this thread as exiting: */ curthread->flags |= PTHREAD_EXITING; /* Save the return value: */ curthread->ret = status; while (curthread->cleanup != NULL) { pthread_cleanup_pop(1); } if (curthread->attr.cleanup_attr != NULL) { curthread->attr.cleanup_attr(curthread->attr.arg_attr); } /* Check if there is thread specific data: */ if (curthread->specific_data != NULL) { /* Run the thread-specific data destructors: */ _thread_cleanupspecific(); } /* Free thread-specific poll_data structure, if allocated: */ if (curthread->poll_data.fds != NULL) { free(curthread->poll_data.fds); curthread->poll_data.fds = NULL; } /* * Lock the garbage collector mutex to ensure that the garbage * collector is not using the dead thread list. */ if (pthread_mutex_lock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Add this thread to the list of dead threads. */ TAILQ_INSERT_HEAD(&_dead_list, curthread, dle); /* * Signal the garbage collector thread that there is something * to clean up. */ if (pthread_cond_signal(&_gc_cond) != 0) PANIC("Cannot signal gc cond"); /* * Avoid a race condition where a scheduling signal can occur * causing the garbage collector thread to run. If this happens, * the current thread can be cleaned out from under us. */ _thread_kern_sig_defer(); /* Unlock the garbage collector mutex: */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Check if there are any threads joined to this one: */ while ((pthread = TAILQ_FIRST(&(curthread->join_queue))) != NULL) { /* Remove the thread from the queue: */ TAILQ_REMOVE(&curthread->join_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ; /* * Wake the joined thread and let it * detach this thread: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* * Set the return value for the woken thread: */ if ((curthread->attr.flags & PTHREAD_DETACHED) != 0) pthread->error = ESRCH; else { pthread->ret = curthread->ret; pthread->error = 0; } } /* Remove this thread from the thread list: */ TAILQ_REMOVE(&_thread_list, curthread, tle); /* This thread will never be re-scheduled. */ _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__); /* This point should not be reached. */ PANIC("Dead thread has resumed"); } Index: head/lib/libc_r/uthread/uthread_fd.c =================================================================== --- head/lib/libc_r/uthread/uthread_fd.c (revision 72373) +++ head/lib/libc_r/uthread/uthread_fd.c (revision 72374) @@ -1,993 +1,1033 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include #include #include #include #include #include "pthread_private.h" #define FDQ_INSERT(q,p) \ do { \ TAILQ_INSERT_TAIL(q,p,qe); \ p->flags |= PTHREAD_FLAGS_IN_FDQ; \ } while (0) #define FDQ_REMOVE(q,p) \ do { \ if ((p->flags & PTHREAD_FLAGS_IN_FDQ) != 0) { \ TAILQ_REMOVE(q,p,qe); \ p->flags &= ~PTHREAD_FLAGS_IN_FDQ; \ } \ } while (0) /* Static variables: */ static spinlock_t fd_table_lock = _SPINLOCK_INITIALIZER; /* Prototypes: */ +#ifdef _FDLOCKS_ENABLED static inline pthread_t fd_next_reader(int fd); static inline pthread_t fd_next_writer(int fd); +#endif /* * This function *must* return -1 and set the thread specific errno * as a system call. This is because the error return from this * function is propagated directly back from thread-wrapped system * calls. */ int _thread_fd_table_init(int fd) { int ret = 0; struct fd_table_entry *entry; int saved_errno; if (_thread_initial == NULL) _thread_init(); /* Check if the file descriptor is out of range: */ if (fd < 0 || fd >= _thread_dtablesize) { /* Return a bad file descriptor error: */ errno = EBADF; ret = -1; } /* * Check if memory has already been allocated for this file * descriptor: */ else if (_thread_fd_table[fd] != NULL) { /* Memory has already been allocated. */ /* Allocate memory for the file descriptor table entry: */ } else if ((entry = (struct fd_table_entry *) malloc(sizeof(struct fd_table_entry))) == NULL) { /* Return an insufficient memory error: */ errno = ENOMEM; ret = -1; } else { /* Initialise the file locks: */ memset(&entry->lock, 0, sizeof(entry->lock)); entry->r_owner = NULL; entry->w_owner = NULL; entry->r_fname = NULL; entry->w_fname = NULL; entry->r_lineno = 0; entry->w_lineno = 0; entry->r_lockcount = 0; entry->w_lockcount = 0; /* Initialise the read/write queues: */ TAILQ_INIT(&entry->r_queue); TAILQ_INIT(&entry->w_queue); /* Get the flags for the file: */ if (((fd >= 3) || (_pthread_stdio_flags[fd] == -1)) && (entry->flags = __sys_fcntl(fd, F_GETFL, 0)) == -1) { ret = -1; } else { /* Check if a stdio descriptor: */ if ((fd < 3) && (_pthread_stdio_flags[fd] != -1)) /* * Use the stdio flags read by * _pthread_init() to avoid * mistaking the non-blocking * flag that, when set on one * stdio fd, is set on all stdio * fds. */ entry->flags = _pthread_stdio_flags[fd]; /* * Make the file descriptor non-blocking. * This might fail if the device driver does * not support non-blocking calls, or if the * driver is naturally non-blocking. */ saved_errno = errno; __sys_fcntl(fd, F_SETFL, entry->flags | O_NONBLOCK); errno = saved_errno; /* Lock the file descriptor table: */ _SPINLOCK(&fd_table_lock); /* * Check if another thread allocated the * file descriptor entry while this thread * was doing the same thing. The table wasn't * kept locked during this operation because * it has the potential to recurse. */ if (_thread_fd_table[fd] == NULL) { /* This thread wins: */ _thread_fd_table[fd] = entry; entry = NULL; } /* Unlock the file descriptor table: */ _SPINUNLOCK(&fd_table_lock); } /* * Check if another thread initialised the table entry * before this one could: */ if (entry != NULL) /* * Throw away the table entry that this thread * prepared. The other thread wins. */ free(entry); } /* Return the completion status: */ return (ret); } +#ifdef _FDLOCKS_ENABLED void _thread_fd_unlock(int fd, int lock_type) { struct pthread *curthread = _get_curthread(); int ret; /* * Check that the file descriptor table is initialised for this * entry: */ if ((ret = _thread_fd_table_init(fd)) == 0) { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* * Lock the file descriptor table entry to prevent * other threads for clashing with the current * thread's accesses: */ _SPINLOCK(&_thread_fd_table[fd]->lock); /* Check if the running thread owns the read lock: */ if (_thread_fd_table[fd]->r_owner == curthread) { /* Check the file descriptor and lock types: */ if (lock_type == FD_READ || lock_type == FD_RDWR) { /* * Decrement the read lock count for the * running thread: */ _thread_fd_table[fd]->r_lockcount--; /* * Check if the running thread still has read * locks on this file descriptor: */ if (_thread_fd_table[fd]->r_lockcount != 0) { } /* * Get the next thread in the queue for a * read lock on this file descriptor: */ else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) { } else { /* Remove this thread from the queue: */ FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, _thread_fd_table[fd]->r_owner); /* * Set the state of the new owner of * the thread to running: */ PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING); /* * Reset the number of read locks. * This will be incremented by the * new owner of the lock when it sees * that it has the lock. */ _thread_fd_table[fd]->r_lockcount = 0; } } } /* Check if the running thread owns the write lock: */ if (_thread_fd_table[fd]->w_owner == curthread) { /* Check the file descriptor and lock types: */ if (lock_type == FD_WRITE || lock_type == FD_RDWR) { /* * Decrement the write lock count for the * running thread: */ _thread_fd_table[fd]->w_lockcount--; /* * Check if the running thread still has * write locks on this file descriptor: */ if (_thread_fd_table[fd]->w_lockcount != 0) { } /* * Get the next thread in the queue for a * write lock on this file descriptor: */ else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) { } else { /* Remove this thread from the queue: */ FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, _thread_fd_table[fd]->w_owner); /* * Set the state of the new owner of * the thread to running: */ PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING); /* * Reset the number of write locks. * This will be incremented by the * new owner of the lock when it * sees that it has the lock. */ _thread_fd_table[fd]->w_lockcount = 0; } } } /* Unlock the file descriptor table entry: */ _SPINUNLOCK(&_thread_fd_table[fd]->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } } int _thread_fd_lock(int fd, int lock_type, struct timespec * timeout) { struct pthread *curthread = _get_curthread(); int ret; /* * Check that the file descriptor table is initialised for this * entry: */ if ((ret = _thread_fd_table_init(fd)) == 0) { /* Clear the interrupted flag: */ curthread->interrupted = 0; /* * Lock the file descriptor table entry to prevent * other threads for clashing with the current * thread's accesses: */ _SPINLOCK(&_thread_fd_table[fd]->lock); /* Check the file descriptor and lock types: */ if (lock_type == FD_READ || lock_type == FD_RDWR) { /* * Wait for the file descriptor to be locked * for read for the current thread: */ while ((_thread_fd_table[fd]->r_owner != curthread) && (curthread->interrupted == 0)) { /* * Check if the file descriptor is locked by * another thread: */ if (_thread_fd_table[fd]->r_owner != NULL) { /* * Another thread has locked the file * descriptor for read, so join the * queue of threads waiting for a * read lock on this file descriptor: */ FDQ_INSERT(&_thread_fd_table[fd]->r_queue, curthread); /* * Save the file descriptor details * in the thread structure for the * running thread: */ curthread->data.fd.fd = fd; /* Set the timeout: */ _thread_kern_set_timeout(timeout); /* * Unlock the file descriptor * table entry: */ _SPINUNLOCK(&_thread_fd_table[fd]->lock); /* * Schedule this thread to wait on * the read lock. It will only be * woken when it becomes the next in * the queue and is granted access * to the lock by the thread * that is unlocking the file * descriptor. */ _thread_kern_sched_state(PS_FDLR_WAIT, __FILE__, __LINE__); /* * Lock the file descriptor * table entry again: */ _SPINLOCK(&_thread_fd_table[fd]->lock); if (curthread->interrupted != 0) { FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, curthread); } } else { /* * The running thread now owns the * read lock on this file descriptor: */ _thread_fd_table[fd]->r_owner = curthread; /* * Reset the number of read locks for * this file descriptor: */ _thread_fd_table[fd]->r_lockcount = 0; } } if (_thread_fd_table[fd]->r_owner == curthread) /* Increment the read lock count: */ _thread_fd_table[fd]->r_lockcount++; } /* Check the file descriptor and lock types: */ if (curthread->interrupted == 0 && (lock_type == FD_WRITE || lock_type == FD_RDWR)) { /* * Wait for the file descriptor to be locked * for write for the current thread: */ while ((_thread_fd_table[fd]->w_owner != curthread) && (curthread->interrupted == 0)) { /* * Check if the file descriptor is locked by * another thread: */ if (_thread_fd_table[fd]->w_owner != NULL) { /* * Another thread has locked the file * descriptor for write, so join the * queue of threads waiting for a * write lock on this file * descriptor: */ FDQ_INSERT(&_thread_fd_table[fd]->w_queue, curthread); /* * Save the file descriptor details * in the thread structure for the * running thread: */ curthread->data.fd.fd = fd; /* Set the timeout: */ _thread_kern_set_timeout(timeout); /* * Unlock the file descriptor * table entry: */ _SPINUNLOCK(&_thread_fd_table[fd]->lock); /* * Schedule this thread to wait on * the write lock. It will only be * woken when it becomes the next in * the queue and is granted access to * the lock by the thread that is * unlocking the file descriptor. */ _thread_kern_sched_state(PS_FDLW_WAIT, __FILE__, __LINE__); /* * Lock the file descriptor * table entry again: */ _SPINLOCK(&_thread_fd_table[fd]->lock); if (curthread->interrupted != 0) { FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, curthread); } } else { /* * The running thread now owns the * write lock on this file * descriptor: */ _thread_fd_table[fd]->w_owner = curthread; /* * Reset the number of write locks * for this file descriptor: */ _thread_fd_table[fd]->w_lockcount = 0; } } if (_thread_fd_table[fd]->w_owner == curthread) /* Increment the write lock count: */ _thread_fd_table[fd]->w_lockcount++; } /* Unlock the file descriptor table entry: */ _SPINUNLOCK(&_thread_fd_table[fd]->lock); if (curthread->interrupted != 0) { ret = -1; errno = EINTR; if (curthread->continuation != NULL) curthread->continuation((void *)curthread); } } /* Return the completion status: */ return (ret); } void _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno) { struct pthread *curthread = _get_curthread(); int ret; /* * Check that the file descriptor table is initialised for this * entry: */ if ((ret = _thread_fd_table_init(fd)) == 0) { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* * Lock the file descriptor table entry to prevent * other threads for clashing with the current * thread's accesses: */ _spinlock_debug(&_thread_fd_table[fd]->lock, fname, lineno); /* Check if the running thread owns the read lock: */ if (_thread_fd_table[fd]->r_owner == curthread) { /* Check the file descriptor and lock types: */ if (lock_type == FD_READ || lock_type == FD_RDWR) { /* * Decrement the read lock count for the * running thread: */ _thread_fd_table[fd]->r_lockcount--; /* * Check if the running thread still has read * locks on this file descriptor: */ if (_thread_fd_table[fd]->r_lockcount != 0) { } /* * Get the next thread in the queue for a * read lock on this file descriptor: */ else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) { } else { /* Remove this thread from the queue: */ FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, _thread_fd_table[fd]->r_owner); /* * Set the state of the new owner of * the thread to running: */ PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING); /* * Reset the number of read locks. * This will be incremented by the * new owner of the lock when it sees * that it has the lock. */ _thread_fd_table[fd]->r_lockcount = 0; } } } /* Check if the running thread owns the write lock: */ if (_thread_fd_table[fd]->w_owner == curthread) { /* Check the file descriptor and lock types: */ if (lock_type == FD_WRITE || lock_type == FD_RDWR) { /* * Decrement the write lock count for the * running thread: */ _thread_fd_table[fd]->w_lockcount--; /* * Check if the running thread still has * write locks on this file descriptor: */ if (_thread_fd_table[fd]->w_lockcount != 0) { } /* * Get the next thread in the queue for a * write lock on this file descriptor: */ else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) { } else { /* Remove this thread from the queue: */ FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, _thread_fd_table[fd]->w_owner); /* * Set the state of the new owner of * the thread to running: */ PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING); /* * Reset the number of write locks. * This will be incremented by the * new owner of the lock when it * sees that it has the lock. */ _thread_fd_table[fd]->w_lockcount = 0; } } } /* Unlock the file descriptor table entry: */ _SPINUNLOCK(&_thread_fd_table[fd]->lock); /* * Undefer and handle pending signals, yielding if * necessary. */ _thread_kern_sig_undefer(); } } int _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout, char *fname, int lineno) { struct pthread *curthread = _get_curthread(); int ret; /* * Check that the file descriptor table is initialised for this * entry: */ if ((ret = _thread_fd_table_init(fd)) == 0) { /* Clear the interrupted flag: */ curthread->interrupted = 0; /* * Lock the file descriptor table entry to prevent * other threads for clashing with the current * thread's accesses: */ _spinlock_debug(&_thread_fd_table[fd]->lock, fname, lineno); /* Check the file descriptor and lock types: */ if (lock_type == FD_READ || lock_type == FD_RDWR) { /* * Wait for the file descriptor to be locked * for read for the current thread: */ while ((_thread_fd_table[fd]->r_owner != curthread) && (curthread->interrupted == 0)) { /* * Check if the file descriptor is locked by * another thread: */ if (_thread_fd_table[fd]->r_owner != NULL) { /* * Another thread has locked the file * descriptor for read, so join the * queue of threads waiting for a * read lock on this file descriptor: */ FDQ_INSERT(&_thread_fd_table[fd]->r_queue, curthread); /* * Save the file descriptor details * in the thread structure for the * running thread: */ curthread->data.fd.fd = fd; curthread->data.fd.branch = lineno; curthread->data.fd.fname = fname; /* Set the timeout: */ _thread_kern_set_timeout(timeout); /* * Unlock the file descriptor * table entry: */ _SPINUNLOCK(&_thread_fd_table[fd]->lock); /* * Schedule this thread to wait on * the read lock. It will only be * woken when it becomes the next in * the queue and is granted access * to the lock by the thread * that is unlocking the file * descriptor. */ _thread_kern_sched_state(PS_FDLR_WAIT, __FILE__, __LINE__); /* * Lock the file descriptor * table entry again: */ _SPINLOCK(&_thread_fd_table[fd]->lock); if (curthread->interrupted != 0) { FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, curthread); } } else { /* * The running thread now owns the * read lock on this file descriptor: */ _thread_fd_table[fd]->r_owner = curthread; /* * Reset the number of read locks for * this file descriptor: */ _thread_fd_table[fd]->r_lockcount = 0; /* * Save the source file details for * debugging: */ _thread_fd_table[fd]->r_fname = fname; _thread_fd_table[fd]->r_lineno = lineno; } } if (_thread_fd_table[fd]->r_owner == curthread) /* Increment the read lock count: */ _thread_fd_table[fd]->r_lockcount++; } /* Check the file descriptor and lock types: */ if (curthread->interrupted == 0 && (lock_type == FD_WRITE || lock_type == FD_RDWR)) { /* * Wait for the file descriptor to be locked * for write for the current thread: */ while ((_thread_fd_table[fd]->w_owner != curthread) && (curthread->interrupted == 0)) { /* * Check if the file descriptor is locked by * another thread: */ if (_thread_fd_table[fd]->w_owner != NULL) { /* * Another thread has locked the file * descriptor for write, so join the * queue of threads waiting for a * write lock on this file * descriptor: */ FDQ_INSERT(&_thread_fd_table[fd]->w_queue, curthread); /* * Save the file descriptor details * in the thread structure for the * running thread: */ curthread->data.fd.fd = fd; curthread->data.fd.branch = lineno; curthread->data.fd.fname = fname; /* Set the timeout: */ _thread_kern_set_timeout(timeout); /* * Unlock the file descriptor * table entry: */ _SPINUNLOCK(&_thread_fd_table[fd]->lock); /* * Schedule this thread to wait on * the write lock. It will only be * woken when it becomes the next in * the queue and is granted access to * the lock by the thread that is * unlocking the file descriptor. */ _thread_kern_sched_state(PS_FDLW_WAIT, __FILE__, __LINE__); /* * Lock the file descriptor * table entry again: */ _SPINLOCK(&_thread_fd_table[fd]->lock); if (curthread->interrupted != 0) { FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, curthread); } } else { /* * The running thread now owns the * write lock on this file * descriptor: */ _thread_fd_table[fd]->w_owner = curthread; /* * Reset the number of write locks * for this file descriptor: */ _thread_fd_table[fd]->w_lockcount = 0; /* * Save the source file details for * debugging: */ _thread_fd_table[fd]->w_fname = fname; _thread_fd_table[fd]->w_lineno = lineno; } } if (_thread_fd_table[fd]->w_owner == curthread) /* Increment the write lock count: */ _thread_fd_table[fd]->w_lockcount++; } /* Unlock the file descriptor table entry: */ _SPINUNLOCK(&_thread_fd_table[fd]->lock); if (curthread->interrupted != 0) { ret = -1; errno = EINTR; if (curthread->continuation != NULL) curthread->continuation((void *)curthread); } } /* Return the completion status: */ return (ret); } void _thread_fd_unlock_owned(pthread_t pthread) { int fd; for (fd = 0; fd < _thread_dtablesize; fd++) { if ((_thread_fd_table[fd] != NULL) && ((_thread_fd_table[fd]->r_owner == pthread) || (_thread_fd_table[fd]->w_owner == pthread))) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* * Lock the file descriptor table entry to prevent * other threads for clashing with the current * thread's accesses: */ _SPINLOCK(&_thread_fd_table[fd]->lock); /* Check if the thread owns the read lock: */ if (_thread_fd_table[fd]->r_owner == pthread) { /* Clear the read lock count: */ _thread_fd_table[fd]->r_lockcount = 0; /* * Get the next thread in the queue for a * read lock on this file descriptor: */ if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) != NULL) { /* Remove this thread from the queue: */ FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, _thread_fd_table[fd]->r_owner); /* * Set the state of the new owner of * the thread to running: */ PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING); } } /* Check if the thread owns the write lock: */ if (_thread_fd_table[fd]->w_owner == pthread) { /* Clear the write lock count: */ _thread_fd_table[fd]->w_lockcount = 0; /* * Get the next thread in the queue for a * write lock on this file descriptor: */ if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) != NULL) { /* Remove this thread from the queue: */ FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, _thread_fd_table[fd]->w_owner); /* * Set the state of the new owner of * the thread to running: */ PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING); } } /* Unlock the file descriptor table entry: */ _SPINUNLOCK(&_thread_fd_table[fd]->lock); /* * Undefer and handle pending signals, yielding if * necessary. */ _thread_kern_sig_undefer(); } } } void _fd_lock_backout(pthread_t pthread) { int fd; /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); switch (pthread->state) { case PS_FDLR_WAIT: fd = pthread->data.fd.fd; /* * Lock the file descriptor table entry to prevent * other threads for clashing with the current * thread's accesses: */ _SPINLOCK(&_thread_fd_table[fd]->lock); /* Remove the thread from the waiting queue: */ FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread); break; case PS_FDLW_WAIT: fd = pthread->data.fd.fd; /* * Lock the file descriptor table entry to prevent * other threads from clashing with the current * thread's accesses: */ _SPINLOCK(&_thread_fd_table[fd]->lock); /* Remove the thread from the waiting queue: */ FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread); break; default: break; } /* * Undefer and handle pending signals, yielding if * necessary. */ _thread_kern_sig_undefer(); } static inline pthread_t fd_next_reader(int fd) { pthread_t pthread; while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) != NULL) && (pthread->interrupted != 0)) { /* * This thread has either been interrupted by a signal or * it has been canceled. Remove it from the queue. */ FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread); } return (pthread); } static inline pthread_t fd_next_writer(int fd) { pthread_t pthread; while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) != NULL) && (pthread->interrupted != 0)) { /* * This thread has either been interrupted by a signal or * it has been canceled. Remove it from the queue. */ FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread); } return (pthread); } + +#else + +void +_thread_fd_unlock(int fd, int lock_type) +{ +} + +int +_thread_fd_lock(int fd, int lock_type, struct timespec * timeout) +{ + return (0); +} + +void +_thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno) +{ +} + +int +_thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout, + char *fname, int lineno) +{ + return (0); +} + +void +_thread_fd_unlock_owned(pthread_t pthread) +{ +} + +void +_fd_lock_backout(pthread_t pthread) +{ +} + +#endif Index: head/lib/libc_r/uthread/uthread_file.c =================================================================== --- head/lib/libc_r/uthread/uthread_file.c (revision 72373) +++ head/lib/libc_r/uthread/uthread_file.c (revision 72374) @@ -1,508 +1,54 @@ /* * Copyright (c) 1995 John Birrell . + * Copyright (c) 2001 Daniel Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * * POSIX stdio FILE locking functions. These assume that the locking * is only required at FILE structure level, not at file descriptor * level too. * */ #include -#include -#include -#include #include #include "pthread_private.h" -/* - * Weak symbols for externally visible functions in this file: - */ -#pragma weak flockfile=_flockfile -#pragma weak ftrylockfile=_ftrylockfile -#pragma weak funlockfile=_funlockfile +extern void _flockfile(FILE *); -/* - * The FILE lock structure. The FILE *fp is locked if the owner is - * not NULL. If not locked, the file lock structure can be - * reassigned to a different file by setting fp. - */ -struct file_lock { - LIST_ENTRY(file_lock) entry; /* Entry if file list. */ - TAILQ_HEAD(lock_head, pthread) - l_head; /* Head of queue for threads */ - /* waiting on this lock. */ - FILE *fp; /* The target file. */ - pthread_t owner; /* Thread that owns lock. */ - int count; /* Lock count for owner. */ -}; - -/* - * The number of file lock lists into which the file pointer is - * hashed. Ideally, the FILE structure size would have been increased, - * but this causes incompatibility, so separate data structures are - * required. - */ -#define NUM_HEADS 128 - -/* - * This macro casts a file pointer to a long integer and right - * shifts this by the number of bytes in a pointer. The shifted - * value is then remaindered using the maximum number of hash - * entries to produce and index into the array of static lock - * structures. If there is a collision, a linear search of the - * dynamic list of locks linked to each static lock is perfomed. - */ -#define file_idx(_p) ((((u_long) _p) >> sizeof(void *)) % NUM_HEADS) - -/* - * Global array of file locks. The first lock for each hash bucket is - * allocated statically in the hope that there won't be too many - * collisions that require a malloc and an element added to the list. - */ -struct static_file_lock { - LIST_HEAD(file_list_head, file_lock) head; - struct file_lock fl; -} flh[NUM_HEADS]; - -/* Set to non-zero when initialisation is complete: */ -static int init_done = 0; - -/* Lock for accesses to the hash table: */ -static spinlock_t hash_lock = _SPINLOCK_INITIALIZER; - -/* - * Find a lock structure for a FILE, return NULL if the file is - * not locked: - */ -static -struct file_lock * -find_lock(int idx, FILE *fp) -{ - struct file_lock *p; - - /* Check if the file is locked using the static structure: */ - if (flh[idx].fl.fp == fp && flh[idx].fl.owner != NULL) - /* Return a pointer to the static lock: */ - p = &flh[idx].fl; - else { - /* Point to the first dynamic lock: */ - p = LIST_FIRST(&flh[idx].head); - - /* - * Loop through the dynamic locks looking for the - * target file: - */ - while (p != NULL && (p->fp != fp || p->owner == NULL)) - /* Not this file, try the next: */ - p = LIST_NEXT(p, entry); - } - return(p); -} - -/* - * Lock a file, assuming that there is no lock structure currently - * assigned to it. - */ -static -struct file_lock * -do_lock(int idx, FILE *fp) -{ - struct pthread *curthread = _get_curthread(); - struct file_lock *p; - - /* Check if the static structure is not being used: */ - if (flh[idx].fl.owner == NULL) { - /* Return a pointer to the static lock: */ - p = &flh[idx].fl; - } - else { - /* Point to the first dynamic lock: */ - p = LIST_FIRST(&flh[idx].head); - - /* - * Loop through the dynamic locks looking for a - * lock structure that is not being used: - */ - while (p != NULL && p->owner != NULL) - /* This one is used, try the next: */ - p = LIST_NEXT(p, entry); - } - - /* - * If an existing lock structure has not been found, - * allocate memory for a new one: - */ - if (p == NULL && (p = (struct file_lock *) - malloc(sizeof(struct file_lock))) != NULL) { - /* Add the new element to the list: */ - LIST_INSERT_HEAD(&flh[idx].head, p, entry); - } - - /* Check if there is a lock structure to acquire: */ - if (p != NULL) { - /* Acquire the lock for the running thread: */ - p->fp = fp; - p->owner = curthread; - p->count = 1; - TAILQ_INIT(&p->l_head); - } - return(p); -} - void -_flockfile_debug(FILE * fp, char *fname, int lineno) +_flockfile_debug(FILE *fp, char *fname, int lineno) { - struct pthread *curthread = _get_curthread(); - int idx = file_idx(fp); - struct file_lock *p; + pthread_t curthread = _pthread_self(); - /* Check if this is a real file: */ - if (fp->_file >= 0) { - /* Lock the hash table: */ - _SPINLOCK(&hash_lock); - - /* Check if the static array has not been initialised: */ - if (!init_done) { - /* Initialise the global array: */ - memset(flh,0,sizeof(flh)); - - /* Flag the initialisation as complete: */ - init_done = 1; - } - - /* Get a pointer to any existing lock for the file: */ - if ((p = find_lock(idx, fp)) == NULL) { - /* - * The file is not locked, so this thread can - * grab the lock: - */ - p = do_lock(idx, fp); - - /* Unlock the hash table: */ - _SPINUNLOCK(&hash_lock); - - /* - * The file is already locked, so check if the - * running thread is the owner: - */ - } else if (p->owner == curthread) { - /* - * The running thread is already the - * owner, so increment the count of - * the number of times it has locked - * the file: - */ - p->count++; - - /* Unlock the hash table: */ - _SPINUNLOCK(&hash_lock); - } else { - /* Clear the interrupted flag: */ - curthread->interrupted = 0; - - /* - * Prevent being context switched out while - * adding this thread to the file lock queue. - */ - _thread_kern_sig_defer(); - - /* - * The file is locked for another thread. - * Append this thread to the queue of - * threads waiting on the lock. - */ - TAILQ_INSERT_TAIL(&p->l_head,curthread,qe); - curthread->flags |= PTHREAD_FLAGS_IN_FILEQ; - - /* Unlock the hash table: */ - _SPINUNLOCK(&hash_lock); - - curthread->data.fp = fp; - - /* Wait on the FILE lock: */ - _thread_kern_sched_state(PS_FILE_WAIT, fname, lineno); - - if ((curthread->flags & PTHREAD_FLAGS_IN_FILEQ) != 0) { - TAILQ_REMOVE(&p->l_head,curthread,qe); - curthread->flags &= ~PTHREAD_FLAGS_IN_FILEQ; - } - - _thread_kern_sig_undefer(); - - if (curthread->interrupted != 0 && - curthread->continuation != NULL) - curthread->continuation((void *)curthread); - } - } + curthread->lineno = lineno; + curthread->fname = fname; + _flockfile(fp); } - -void -_flockfile(FILE * fp) -{ - _flockfile_debug(fp, __FILE__, __LINE__); -} - -int -_ftrylockfile(FILE * fp) -{ - struct pthread *curthread = _get_curthread(); - int ret = -1; - int idx = file_idx(fp); - struct file_lock *p; - - /* Check if this is a real file: */ - if (fp->_file >= 0) { - /* Lock the hash table: */ - _SPINLOCK(&hash_lock); - - /* Get a pointer to any existing lock for the file: */ - if ((p = find_lock(idx, fp)) == NULL) { - /* - * The file is not locked, so this thread can - * grab the lock: - */ - p = do_lock(idx, fp); - - /* - * The file is already locked, so check if the - * running thread is the owner: - */ - } else if (p->owner == curthread) { - /* - * The running thread is already the - * owner, so increment the count of - * the number of times it has locked - * the file: - */ - p->count++; - } else { - /* - * The file is locked for another thread, - * so this try fails. - */ - p = NULL; - } - - /* Check if the lock was obtained: */ - if (p != NULL) - /* Return success: */ - ret = 0; - - /* Unlock the hash table: */ - _SPINUNLOCK(&hash_lock); - - } - return (ret); -} - -void -_funlockfile(FILE * fp) -{ - struct pthread *curthread = _get_curthread(); - int idx = file_idx(fp); - struct file_lock *p; - - /* Check if this is a real file: */ - if (fp->_file >= 0) { - /* - * Defer signals to protect the scheduling queues from - * access by the signal handler: - */ - _thread_kern_sig_defer(); - - /* Lock the hash table: */ - _SPINLOCK(&hash_lock); - - /* - * Get a pointer to the lock for the file and check that - * the running thread is the one with the lock: - */ - if ((p = find_lock(idx, fp)) != NULL && - p->owner == curthread) { - /* - * Check if this thread has locked the FILE - * more than once: - */ - if (p->count > 1) - /* - * Decrement the count of the number of - * times the running thread has locked this - * file: - */ - p->count--; - else { - /* - * The running thread will release the - * lock now: - */ - p->count = 0; - - /* Get the new owner of the lock: */ - while ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) { - /* Pop the thread off the queue: */ - TAILQ_REMOVE(&p->l_head,p->owner,qe); - p->owner->flags &= ~PTHREAD_FLAGS_IN_FILEQ; - - if (p->owner->interrupted == 0) { - /* - * This is the first lock for - * the new owner: - */ - p->count = 1; - - /* Allow the new owner to run: */ - PTHREAD_NEW_STATE(p->owner,PS_RUNNING); - - /* End the loop when we find a - * thread that hasn't been - * cancelled or interrupted; - */ - break; - } - } - } - } - - /* Unlock the hash table: */ - _SPINUNLOCK(&hash_lock); - - /* - * Undefer and handle pending signals, yielding if - * necessary: - */ - _thread_kern_sig_undefer(); - } -} - -void -_funlock_owned(pthread_t pthread) -{ - int idx; - struct file_lock *p, *next_p; - - /* - * Defer signals to protect the scheduling queues from - * access by the signal handler: - */ - _thread_kern_sig_defer(); - - /* Lock the hash table: */ - _SPINLOCK(&hash_lock); - - for (idx = 0; idx < NUM_HEADS; idx++) { - /* Check the static file lock first: */ - p = &flh[idx].fl; - next_p = LIST_FIRST(&flh[idx].head); - - while (p != NULL) { - if (p->owner == pthread) { - /* - * The running thread will release the - * lock now: - */ - p->count = 0; - - /* Get the new owner of the lock: */ - while ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) { - /* Pop the thread off the queue: */ - TAILQ_REMOVE(&p->l_head,p->owner,qe); - p->owner->flags &= ~PTHREAD_FLAGS_IN_FILEQ; - - if (p->owner->interrupted == 0) { - /* - * This is the first lock for - * the new owner: - */ - p->count = 1; - - /* Allow the new owner to run: */ - PTHREAD_NEW_STATE(p->owner,PS_RUNNING); - - /* End the loop when we find a - * thread that hasn't been - * cancelled or interrupted; - */ - break; - } - } - } - p = next_p; - if (next_p != NULL) - next_p = LIST_NEXT(next_p, entry); - } - } - - /* Unlock the hash table: */ - _SPINUNLOCK(&hash_lock); - - /* - * Undefer and handle pending signals, yielding if - * necessary: - */ - _thread_kern_sig_undefer(); -} - -void -_flockfile_backout(pthread_t pthread) -{ - int idx = file_idx(pthread->data.fp); - struct file_lock *p; - - /* - * Defer signals to protect the scheduling queues from - * access by the signal handler: - */ - _thread_kern_sig_defer(); - - /* - * Get a pointer to the lock for the file and check that - * the running thread is the one with the lock: - */ - if (((pthread->flags & PTHREAD_FLAGS_IN_FILEQ) != 0) && - ((p = find_lock(idx, pthread->data.fp)) != NULL)) { - /* Lock the hash table: */ - _SPINLOCK(&hash_lock); - - /* Remove the thread from the queue: */ - TAILQ_REMOVE(&p->l_head, pthread, qe); - pthread->flags &= ~PTHREAD_FLAGS_IN_FILEQ; - - /* Unlock the hash table: */ - _SPINUNLOCK(&hash_lock); - } - - /* - * Undefer and handle pending signals, yielding if necessary: - */ - _thread_kern_sig_undefer(); -} - Index: head/lib/libc_r/uthread/uthread_sig.c =================================================================== --- head/lib/libc_r/uthread/uthread_sig.c (revision 72373) +++ head/lib/libc_r/uthread/uthread_sig.c (revision 72374) @@ -1,1121 +1,1116 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include "pthread_private.h" /* Prototypes: */ static void thread_sig_add(pthread_t pthread, int sig, int has_args); static void thread_sig_check_state(pthread_t pthread, int sig); static pthread_t thread_sig_find(int sig); static void thread_sig_handle_special(int sig); static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp); static void thread_sigframe_add(pthread_t thread, int sig, int has_args); static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf); /* #define DEBUG_SIGNAL */ #ifdef DEBUG_SIGNAL #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif #if defined(_PTHREADS_INVARIANTS) #define SIG_SET_ACTIVE() _sig_in_handler = 1 #define SIG_SET_INACTIVE() _sig_in_handler = 0 #else #define SIG_SET_ACTIVE() #define SIG_SET_INACTIVE() #endif void _thread_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) { struct pthread *curthread = _get_curthread(); pthread_t pthread, pthread_h; void *stackp; int in_sched = 0; char c; if (ucp == NULL) PANIC("Thread signal handler received null context"); DBG_MSG("Got signal %d, current thread %p\n", sig, curthread); if (_thread_kern_in_sched != 0) in_sched = 1; else { stackp = (void *)GET_STACK_UC(ucp); if ((stackp >= _thread_kern_sched_stack) && (stackp <= _thread_kern_sched_stack + SCHED_STACK_SIZE)) in_sched = 1; } /* Check if an interval timer signal: */ if (sig == _SCHED_SIGNAL) { /* Update the scheduling clock: */ gettimeofday((struct timeval *)&_sched_tod, NULL); _sched_ticks++; if (in_sched != 0) { /* * The scheduler is already running; ignore this * signal. */ } /* * Check if the scheduler interrupt has come when * the currently running thread has deferred thread * signals. */ else if (curthread->sig_defer_count > 0) curthread->yield_on_sig_undefer = 1; else { /* * Save the context of the currently running thread: */ thread_sig_savecontext(curthread, ucp); /* * Schedule the next thread. This function is not * expected to return because it will do a longjmp * instead. */ _thread_kern_sched(ucp); /* * This point should not be reached, so abort the * process: */ PANIC("Returned to signal function from scheduler"); } } /* * Check if the kernel has been interrupted while the scheduler * is accessing the scheduling queues or if there is a currently * running thread that has deferred signals. */ else if ((in_sched != 0) || (curthread->sig_defer_count > 0)) { /* Cast the signal number to a character variable: */ c = sig; /* * Write the signal number to the kernel pipe so that it will * be ready to read when this signal handler returns. */ if (_queue_signals != 0) { __sys_write(_thread_kern_pipe[1], &c, 1); DBG_MSG("Got signal %d, queueing to kernel pipe\n", sig); } if (_thread_sigq[sig - 1].blocked == 0) { DBG_MSG("Got signal %d, adding to _thread_sigq\n", sig); /* * Do not block this signal; it will be blocked * when the pending signals are run down. */ /* _thread_sigq[sig - 1].blocked = 1; */ /* * Queue the signal, saving siginfo and sigcontext * (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); /* Indicate that there are queued signals: */ _thread_sigq[sig - 1].pending = 1; _sigq_check_reqd = 1; } /* These signals need special handling: */ else if (sig == SIGCHLD || sig == SIGTSTP || sig == SIGTTIN || sig == SIGTTOU) { _thread_sigq[sig - 1].pending = 1; _thread_sigq[sig - 1].signo = sig; _sigq_check_reqd = 1; } else DBG_MSG("Got signal %d, ignored.\n", sig); } /* * The signal handlers should have been installed so that they * cannot be interrupted by other signals. */ else if (_thread_sigq[sig - 1].blocked == 0) { /* * The signal is not blocked; handle the signal. * * Ignore subsequent occurrences of this signal * until the current signal is handled: */ _thread_sigq[sig - 1].blocked = 1; /* This signal will be handled; clear the pending flag: */ _thread_sigq[sig - 1].pending = 0; /* * Save siginfo and sigcontext (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); SIG_SET_ACTIVE(); /* Handle special signals: */ thread_sig_handle_special(sig); pthread_h = NULL; if ((pthread = thread_sig_find(sig)) != NULL) { DBG_MSG("Got signal %d, adding frame to thread %p\n", sig, pthread); /* * A thread was found that can handle the signal. * Save the context of the currently running thread * so that we can switch to another thread without * losing track of where the current thread left off. * This also applies if the current thread is the * thread to be signaled. */ thread_sig_savecontext(curthread, ucp); /* Setup the target thread to receive the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); /* Take a peek at the next ready to run thread: */ pthread_h = PTHREAD_PRIOQ_FIRST(); DBG_MSG("Finished adding frame, head of prio list %p\n", pthread_h); } else DBG_MSG("No thread to handle signal %d\n", sig); SIG_SET_INACTIVE(); /* * Switch to a different context if the currently running * thread takes a signal, or if another thread takes a * signal and the currently running thread is not in a * signal handler. */ if ((pthread == curthread) || ((pthread_h != NULL) && (pthread_h->active_priority > curthread->active_priority))) { /* Enter the kernel scheduler: */ _thread_kern_sched(ucp); } } else { SIG_SET_ACTIVE(); thread_sig_handle_special(sig); SIG_SET_INACTIVE(); } } static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp) { memcpy(&pthread->ctx.uc, ucp, sizeof(*ucp)); /* XXX - Save FP registers too? */ FP_SAVE_UC(&pthread->ctx.uc); /* Mark the context saved as a ucontext: */ pthread->ctxtype = CTX_UC; } /* * Find a thread that can handle the signal. */ pthread_t thread_sig_find(int sig) { struct pthread *curthread = _get_curthread(); int handler_installed; pthread_t pthread, pthread_next; pthread_t suspended_thread, signaled_thread; DBG_MSG("Looking for thread to handle signal %d\n", sig); /* Check if the signal requires a dump of thread information: */ if (sig == SIGINFO) { /* Dump thread information to file: */ _thread_dump_info(); /* Unblock this signal to allow further dumps: */ _thread_sigq[sig - 1].blocked = 0; } /* Check if an interval timer signal: */ else if (sig == _SCHED_SIGNAL) { /* * This shouldn't ever occur (should this panic?). */ } else { /* * Enter a loop to look for threads that have the signal * unmasked. POSIX specifies that a thread in a sigwait * will get the signal over any other threads. Second * preference will be threads in in a sigsuspend. Third * preference will be the current thread. If none of the * above, then the signal is delivered to the first thread * that is found. Note that if a custom handler is not * installed, the signal only affects threads in sigwait. */ suspended_thread = NULL; if ((curthread != &_thread_kern_thread) && !sigismember(&curthread->sigmask, sig)) signaled_thread = curthread; else signaled_thread = NULL; if ((_thread_sigact[sig - 1].sa_handler == SIG_IGN) || (_thread_sigact[sig - 1].sa_handler == SIG_DFL)) handler_installed = 0; else handler_installed = 1; for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly destroying * the link entry. */ pthread_next = TAILQ_NEXT(pthread, pqe); if ((pthread->state == PS_SIGWAIT) && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* * A signal handler is not invoked for threads * in sigwait. Clear the blocked and pending * flags. */ _thread_sigq[sig - 1].blocked = 0; _thread_sigq[sig - 1].pending = 0; /* Return the signal number: */ pthread->signo = sig; /* * POSIX doesn't doesn't specify which thread * will get the signal if there are multiple * waiters, so we give it to the first thread * we find. * * Do not attempt to deliver this signal * to other threads and do not add the signal * to the process pending set. */ return (NULL); } else if ((handler_installed != 0) && !sigismember(&pthread->sigmask, sig)) { if (pthread->state == PS_SIGSUSPEND) { if (suspended_thread == NULL) suspended_thread = pthread; } else if (signaled_thread == NULL) signaled_thread = pthread; } } /* * Only perform wakeups and signal delivery if there is a * custom handler installed: */ if (handler_installed == 0) { /* * There is no handler installed. Unblock the * signal so that if a handler _is_ installed, any * subsequent signals can be handled. */ _thread_sigq[sig - 1].blocked = 0; } else { /* * If we didn't find a thread in the waiting queue, * check the all threads queue: */ if (suspended_thread == NULL && signaled_thread == NULL) { /* * Enter a loop to look for other threads * capable of receiving the signal: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { if (!sigismember(&pthread->sigmask, sig)) { signaled_thread = pthread; break; } } } if (suspended_thread == NULL && signaled_thread == NULL) /* * Add it to the set of signals pending * on the process: */ sigaddset(&_process_sigpending, sig); else { /* * We only deliver the signal to one thread; * give preference to the suspended thread: */ if (suspended_thread != NULL) pthread = suspended_thread; else pthread = signaled_thread; return (pthread); } } } /* Returns nothing. */ return (NULL); } void _thread_sig_check_pending(pthread_t pthread) { sigset_t sigset; int i; /* * Check if there are pending signals for the running * thread or process that aren't blocked: */ sigset = pthread->sigpend; SIGSETOR(sigset, _process_sigpending); SIGSETNAND(sigset, pthread->sigmask); if (SIGNOTEMPTY(sigset)) { for (i = 1; i < NSIG; i++) { if (sigismember(&sigset, i) != 0) { if (sigismember(&pthread->sigpend, i) != 0) thread_sig_add(pthread, i, /*has_args*/ 0); else { thread_sig_add(pthread, i, /*has_args*/ 1); sigdelset(&_process_sigpending, i); } } } } } /* * This can only be called from the kernel scheduler. It assumes that * all thread contexts are saved and that a signal frame can safely be * added to any user thread. */ void _thread_sig_handle_pending(void) { pthread_t pthread; int i, sig; PTHREAD_ASSERT(_thread_kern_in_sched != 0, "_thread_sig_handle_pending called from outside kernel schedule"); /* * Check the array of pending signals: */ for (i = 0; i < NSIG; i++) { if (_thread_sigq[i].pending != 0) { /* This signal is no longer pending. */ _thread_sigq[i].pending = 0; sig = _thread_sigq[i].signo; /* Some signals need special handling: */ thread_sig_handle_special(sig); if (_thread_sigq[i].blocked == 0) { /* * Block future signals until this one * is handled: */ _thread_sigq[i].blocked = 1; if ((pthread = thread_sig_find(sig)) != NULL) { /* * Setup the target thread to receive * the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); } } } } } static void thread_sig_handle_special(int sig) { pthread_t pthread, pthread_next; int i; switch (sig) { case SIGCHLD: /* * Go through the file list and set all files * to non-blocking again in case the child * set some of them to block. Sigh. */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file is used: */ if (_thread_fd_table[i] != NULL) { /* * Set the file descriptor to non-blocking: */ __sys_fcntl(i, F_SETFL, _thread_fd_table[i]->flags | O_NONBLOCK); } } /* * Enter a loop to wake up all threads waiting * for a process to complete: */ for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly * destroying the link entry: */ pthread_next = TAILQ_NEXT(pthread, pqe); /* * If this thread is waiting for a child * process to complete, wake it up: */ if (pthread->state == PS_WAIT_WAIT) { /* Make the thread runnable: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } } break; /* * POSIX says that pending SIGCONT signals are * discarded when one of these signals occurs. */ case SIGTSTP: case SIGTTIN: case SIGTTOU: /* * Enter a loop to discard pending SIGCONT * signals: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { sigdelset(&pthread->sigpend, SIGCONT); } break; default: break; } } /* * Perform thread specific actions in response to a signal. * This function is only called if there is a handler installed * for the signal, and if the target thread has the signal * unmasked. */ static void thread_sig_add(pthread_t pthread, int sig, int has_args) { struct pthread *curthread = _get_curthread(); int restart; int suppress_handler = 0; restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: /* * You can't call a signal handler for threads in these * states. */ suppress_handler = 1; break; /* * States which do not need any cleanup handling when signals * occur: */ case PS_RUNNING: /* * Remove the thread from the queue before changing its * priority: */ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) PTHREAD_PRIOQ_REMOVE(pthread); break; case PS_SUSPENDED: break; case PS_SPINBLOCK: /* Remove the thread from the workq and waitq: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); /* Make the thread runnable: */ PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGWAIT: /* The signal handler is not called for threads in SIGWAIT. */ suppress_handler = 1; /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* Change the state of the thread to run: */ PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else { /* * Mark the thread as interrupted only if the * restart flag is not set on the signal action: */ if (restart == 0) pthread->interrupted = 1; PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); } break; /* * States which cannot be interrupted but still require the * signal handler to run: */ case PS_JOIN: /* Only set the interrupted flag for PS_JOIN: */ pthread->interrupted = 1; /* FALLTHROUGH */ case PS_COND_WAIT: case PS_MUTEX_WAIT: /* * Remove the thread from the wait queue. It will * be added back to the wait queue once all signal * handlers have been invoked. */ PTHREAD_WAITQ_REMOVE(pthread); break; /* * States which are interruptible but may need to be removed * from queues before any signal handler is called. * * XXX - We may not need to handle this condition, but will * mark it as a potential problem. */ case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: if (restart == 0) pthread->interrupted = 1; /* * Remove the thread from the wait queue. Our * signal handler hook will remove this thread * from the fd or file queue before invoking * the actual handler. */ PTHREAD_WAITQ_REMOVE(pthread); break; /* * States which are interruptible: */ case PS_FDR_WAIT: case PS_FDW_WAIT: if (restart == 0) { /* * Flag the operation as interrupted and * set the state to running: */ pthread->interrupted = 1; PTHREAD_SET_STATE(pthread, PS_RUNNING); } PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); break; case PS_POLL_WAIT: case PS_SELECT_WAIT: case PS_SLEEP_WAIT: /* * Unmasked signals always cause poll, select, and sleep * to terminate early, regardless of SA_RESTART: */ pthread->interrupted = 1; /* Remove threads in poll and select from the workq: */ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGSUSPEND: PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; } if (suppress_handler == 0) { /* Setup a signal frame and save the current threads state: */ thread_sigframe_add(pthread, sig, has_args); /* * Signals are deferred until just before the threads * signal handler is invoked: */ pthread->sig_defer_count = 1; /* Make sure the thread is runnable: */ if (pthread->state != PS_RUNNING) PTHREAD_SET_STATE(pthread, PS_RUNNING); /* * The thread should be removed from all scheduling * queues at this point. Raise the priority and place * the thread in the run queue. */ pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY; if (pthread != curthread) PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } static void thread_sig_check_state(pthread_t pthread, int sig) { /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: case PS_RUNNING: case PS_SUSPENDED: case PS_SPINBLOCK: case PS_COND_WAIT: case PS_JOIN: case PS_MUTEX_WAIT: break; case PS_SIGWAIT: /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* * Remove the thread from the wait queue and * make it runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } break; case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_SIGSUSPEND: case PS_SLEEP_WAIT: /* * Remove the thread from the wait queue and make it * runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; /* * These states are additionally in the work queue: */ case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_FILE_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* * Remove the thread from the wait and work queues, and * make it runnable: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; } } /* * Send a signal to a specific thread (ala pthread_kill): */ void _thread_sig_send(pthread_t pthread, int sig) { struct pthread *curthread = _get_curthread(); /* Check for signals whose actions are SIG_DFL: */ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) { /* * Check to see if a temporary signal handler is * installed for sigwaiters: */ if (_thread_dfl_count[sig] == 0) /* * Deliver the signal to the process if a handler * is not installed: */ kill(getpid(), sig); /* * Assuming we're still running after the above kill(), * make any necessary state changes to the thread: */ thread_sig_check_state(pthread, sig); } /* * Check that the signal is not being ignored: */ else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) { if (pthread->state == PS_SIGWAIT && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else if (pthread == curthread) { /* Add the signal to the pending set: */ sigaddset(&pthread->sigpend, sig); if (!sigismember(&pthread->sigmask, sig)) { /* * Call the kernel scheduler which will safely * install a signal frame for this thread: */ _thread_kern_sched_sig(); } } else if (!sigismember(&pthread->sigmask, sig)) { /* Protect the scheduling queues: */ _thread_kern_sig_defer(); /* * Perform any state changes due to signal * arrival: */ thread_sig_add(pthread, sig, /* has args */ 0); /* Unprotect the scheduling queues: */ _thread_kern_sig_undefer(); } else { /* Increment the pending signal count. */ sigaddset(&pthread->sigpend,sig); } } } /* * User thread signal handler wrapper. * * thread - current running thread */ void _thread_sig_wrapper(void) { void (*sigfunc)(int, siginfo_t *, void *); struct pthread_signal_frame *psf; struct pthread *thread = _get_curthread(); /* Get the current frame and state: */ psf = thread->curframe; thread->curframe = NULL; PTHREAD_ASSERT(psf != NULL, "Invalid signal frame in signal handler"); /* Check the threads previous state: */ if (psf->saved_state.psd_state != PS_RUNNING) { /* * Do a little cleanup handling for those threads in * queues before calling the signal handler. Signals * for these threads are temporarily blocked until * after cleanup handling. */ switch (psf->saved_state.psd_state) { case PS_FDLR_WAIT: case PS_FDLW_WAIT: _fd_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; - case PS_FILE_WAIT: - _flockfile_backout(thread); - psf->saved_state.psd_state = PS_RUNNING; - break; - case PS_COND_WAIT: _cond_wait_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; case PS_JOIN: _join_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; case PS_MUTEX_WAIT: _mutex_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; default: break; } } /* Unblock the signal in case we don't return from the handler: */ _thread_sigq[psf->signo - 1].blocked = 0; /* * Lower the priority before calling the handler in case * it never returns (longjmps back): */ thread->active_priority &= ~PTHREAD_SIGNAL_PRIORITY; /* * Reenable interruptions without checking for the need to * context switch: */ thread->sig_defer_count = 0; /* * Check that a custom handler is installed and if the signal * is not blocked: */ sigfunc = _thread_sigact[psf->signo - 1].sa_sigaction; if (((__sighandler_t *)sigfunc != SIG_DFL) && ((__sighandler_t *)sigfunc != SIG_IGN)) { DBG_MSG("_thread_sig_wrapper: Calling signal handler for " "thread 0x%p\n", thread); /* * Dispatch the signal via the custom signal * handler: */ if (psf->sig_has_args == 0) (*(sigfunc))(psf->signo, NULL, NULL); else if ((_thread_sigact[psf->signo - 1].sa_flags & SA_SIGINFO) != 0) (*(sigfunc))(psf->signo, &psf->siginfo, &psf->uc); else (*(sigfunc))(psf->signo, (siginfo_t *)psf->siginfo.si_code, &psf->uc); } /* * Call the kernel scheduler to safely restore the frame and * schedule the next thread: */ _thread_kern_sched_frame(psf); } static void thread_sigframe_add(pthread_t thread, int sig, int has_args) { struct pthread_signal_frame *psf = NULL; unsigned long stackp = 0; /* Get the top of the threads stack: */ switch (thread->ctxtype) { case CTX_JB: case CTX_JB_NOSIG: stackp = GET_STACK_JB(thread->ctx.jb); break; case CTX_SJB: stackp = GET_STACK_SJB(thread->ctx.sigjb); break; case CTX_UC: stackp = GET_STACK_UC(&thread->ctx.uc); break; default: PANIC("Invalid thread context type"); break; } /* * Leave a little space on the stack and round down to the * nearest aligned word: */ stackp -= sizeof(double); stackp &= ~0x3UL; /* Allocate room on top of the stack for a new signal frame: */ stackp -= sizeof(struct pthread_signal_frame); psf = (struct pthread_signal_frame *) stackp; /* Save the current context in the signal frame: */ thread_sigframe_save(thread, psf); /* Set handler specific information: */ psf->sig_has_args = has_args; psf->signo = sig; if (has_args) { /* Copy the signal handler arguments to the signal frame: */ memcpy(&psf->uc, &_thread_sigq[psf->signo - 1].uc, sizeof(psf->uc)); memcpy(&psf->siginfo, &_thread_sigq[psf->signo - 1].siginfo, sizeof(psf->siginfo)); } /* Setup the signal mask: */ SIGSETOR(thread->sigmask, _thread_sigact[sig - 1].sa_mask); sigaddset(&thread->sigmask, sig); /* Set up the new frame: */ thread->curframe = psf; thread->ctxtype = CTX_JB_NOSIG; thread->longjmp_val = 1; thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE | PTHREAD_FLAGS_IN_SYNCQ; /* * Set up the context: */ stackp -= sizeof(double); _setjmp(thread->ctx.jb); SET_STACK_JB(thread->ctx.jb, stackp); SET_RETURN_ADDR_JB(thread->ctx.jb, _thread_sig_wrapper); } void _thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf) { thread->ctxtype = psf->ctxtype; memcpy(&thread->ctx.uc, &psf->ctx.uc, sizeof(thread->ctx.uc)); /* * Only restore the signal mask if it hasn't been changed * by the application during invocation of the signal handler: */ if (thread->sigmask_seqno == psf->saved_state.psd_sigmask_seqno) thread->sigmask = psf->saved_state.psd_sigmask; thread->curframe = psf->saved_state.psd_curframe; thread->wakeup_time = psf->saved_state.psd_wakeup_time; thread->data = psf->saved_state.psd_wait_data; thread->state = psf->saved_state.psd_state; thread->flags = psf->saved_state.psd_flags; thread->interrupted = psf->saved_state.psd_interrupted; thread->longjmp_val = psf->saved_state.psd_longjmp_val; thread->signo = psf->saved_state.psd_signo; thread->sig_defer_count = psf->saved_state.psd_sig_defer_count; } static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf) { psf->ctxtype = thread->ctxtype; memcpy(&psf->ctx.uc, &thread->ctx.uc, sizeof(thread->ctx.uc)); psf->saved_state.psd_sigmask = thread->sigmask; psf->saved_state.psd_curframe = thread->curframe; psf->saved_state.psd_wakeup_time = thread->wakeup_time; psf->saved_state.psd_wait_data = thread->data; psf->saved_state.psd_state = thread->state; psf->saved_state.psd_flags = thread->flags & (PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE); psf->saved_state.psd_interrupted = thread->interrupted; psf->saved_state.psd_longjmp_val = thread->longjmp_val; psf->saved_state.psd_sigmask_seqno = thread->sigmask_seqno; psf->saved_state.psd_signo = thread->signo; psf->saved_state.psd_sig_defer_count = thread->sig_defer_count; } Index: head/lib/libkse/thread/thr_exit.c =================================================================== --- head/lib/libkse/thread/thr_exit.c (revision 72373) +++ head/lib/libkse/thread/thr_exit.c (revision 72374) @@ -1,238 +1,231 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include "pthread_private.h" #define FLAGS_IN_SCHEDQ \ (PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ) #pragma weak pthread_exit=_pthread_exit void _exit(int status) { int flags; int i; struct itimerval itimer; /* Disable the interval timer: */ itimer.it_interval.tv_sec = 0; itimer.it_interval.tv_usec = 0; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = 0; setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL); /* Close the pthread kernel pipe: */ __sys_close(_thread_kern_pipe[0]); __sys_close(_thread_kern_pipe[1]); /* * Enter a loop to set all file descriptors to blocking * if they were not created as non-blocking: */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file descriptor is in use: */ if (_thread_fd_table[i] != NULL && !(_thread_fd_table[i]->flags & O_NONBLOCK)) { /* Get the current flags: */ flags = __sys_fcntl(i, F_GETFL, NULL); /* Clear the nonblocking file descriptor flag: */ __sys_fcntl(i, F_SETFL, flags & ~O_NONBLOCK); } } /* Call the _exit syscall: */ __sys_exit(status); } void _thread_exit(char *fname, int lineno, char *string) { char s[256]; /* Prepare an error message string: */ - strcpy(s, "Fatal error '"); - strcat(s, string); - strcat(s, "' at line ? "); - strcat(s, "in file "); - strcat(s, fname); - strcat(s, " (errno = ?"); - strcat(s, ")\n"); + snprintf(s, sizeof(s), + "Fatal error '%s' at line %d in file %s (errno = %d)\n", + string, lineno, fname, errno); /* Write the string to the standard error file descriptor: */ __sys_write(2, s, strlen(s)); /* Force this process to exit: */ /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */ #if defined(_PTHREADS_INVARIANTS) abort(); #else __sys_exit(1); #endif } /* * Only called when a thread is cancelled. It may be more useful * to call it from pthread_exit() if other ways of asynchronous or * abnormal thread termination can be found. */ void _thread_exit_cleanup(void) { struct pthread *curthread = _get_curthread(); /* * POSIX states that cancellation/termination of a thread should * not release any visible resources (such as mutexes) and that * it is the applications responsibility. Resources that are * internal to the threads library, including file and fd locks, * are not visible to the application and need to be released. */ /* Unlock all owned fd locks: */ _thread_fd_unlock_owned(curthread); - - /* Unlock all owned file locks: */ - _funlock_owned(curthread); /* Unlock all private mutexes: */ _mutex_unlock_private(curthread); /* * This still isn't quite correct because we don't account * for held spinlocks (see libc/stdlib/malloc.c). */ } void _pthread_exit(void *status) { struct pthread *curthread = _get_curthread(); pthread_t pthread; /* Check if this thread is already in the process of exiting: */ if ((curthread->flags & PTHREAD_EXITING) != 0) { char msg[128]; snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",curthread); PANIC(msg); } /* Flag this thread as exiting: */ curthread->flags |= PTHREAD_EXITING; /* Save the return value: */ curthread->ret = status; while (curthread->cleanup != NULL) { pthread_cleanup_pop(1); } if (curthread->attr.cleanup_attr != NULL) { curthread->attr.cleanup_attr(curthread->attr.arg_attr); } /* Check if there is thread specific data: */ if (curthread->specific_data != NULL) { /* Run the thread-specific data destructors: */ _thread_cleanupspecific(); } /* Free thread-specific poll_data structure, if allocated: */ if (curthread->poll_data.fds != NULL) { free(curthread->poll_data.fds); curthread->poll_data.fds = NULL; } /* * Lock the garbage collector mutex to ensure that the garbage * collector is not using the dead thread list. */ if (pthread_mutex_lock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Add this thread to the list of dead threads. */ TAILQ_INSERT_HEAD(&_dead_list, curthread, dle); /* * Signal the garbage collector thread that there is something * to clean up. */ if (pthread_cond_signal(&_gc_cond) != 0) PANIC("Cannot signal gc cond"); /* * Avoid a race condition where a scheduling signal can occur * causing the garbage collector thread to run. If this happens, * the current thread can be cleaned out from under us. */ _thread_kern_sig_defer(); /* Unlock the garbage collector mutex: */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Check if there are any threads joined to this one: */ while ((pthread = TAILQ_FIRST(&(curthread->join_queue))) != NULL) { /* Remove the thread from the queue: */ TAILQ_REMOVE(&curthread->join_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ; /* * Wake the joined thread and let it * detach this thread: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* * Set the return value for the woken thread: */ if ((curthread->attr.flags & PTHREAD_DETACHED) != 0) pthread->error = ESRCH; else { pthread->ret = curthread->ret; pthread->error = 0; } } /* Remove this thread from the thread list: */ TAILQ_REMOVE(&_thread_list, curthread, tle); /* This thread will never be re-scheduled. */ _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__); /* This point should not be reached. */ PANIC("Dead thread has resumed"); } Index: head/lib/libkse/thread/thr_private.h =================================================================== --- head/lib/libkse/thread/thr_private.h (revision 72373) +++ head/lib/libkse/thread/thr_private.h (revision 72374) @@ -1,1401 +1,1400 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Private thread definitions for the uthread kernel. * * $FreeBSD$ */ #ifndef _PTHREAD_PRIVATE_H #define _PTHREAD_PRIVATE_H /* * Evaluate the storage class specifier. */ #ifdef GLOBAL_PTHREAD_PRIVATE #define SCLASS #else #define SCLASS extern #endif /* * Include files. */ #include #include #include #include #include #include #include #include #include #include /* * Define machine dependent macros to get and set the stack pointer * from the supported contexts. Also define a macro to set the return * address in a jmp_buf context. * * XXX - These need to be moved into architecture dependent support files. */ #if defined(__i386__) #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2])) #define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp)) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk) #define FP_SAVE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("fnsave %0": :"m"(*fdata)); \ } while (0) #define FP_RESTORE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("frstor %0": :"m"(*fdata)); \ } while (0) #define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra) #elif defined(__alpha__) #include #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[R_SP + 4])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[R_SP + 4])) #define GET_STACK_UC(ucp) ((ucp)->uc_mcontext.mc_regs[R_SP]) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk) #define FP_SAVE_UC(ucp) #define FP_RESTORE_UC(ucp) #define SET_RETURN_ADDR_JB(jb, ra) do { \ (jb)[0]._jb[2] = (unsigned long)(ra) + 8UL; \ (jb)[0]._jb[R_RA + 4] = 0; \ (jb)[0]._jb[R_T12 + 4] = (long)(ra); \ } while (0) #else #error "Don't recognize this architecture!" #endif /* * Kernel fatal error handler macro. */ #define PANIC(string) _thread_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ __sys_write(1, buf, strlen(buf)); \ } while (0) #define stderr_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ __sys_write(2, buf, strlen(buf)); \ } while (0) /* * Priority queue manipulation macros (using pqe link): */ #define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd) #define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd) #define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd) #define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq) /* * Waiting queue manipulation macros (using pqe link): */ #define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd) #define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd) #if defined(_PTHREADS_INVARIANTS) #define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive() #define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive() #else #define PTHREAD_WAITQ_CLEARACTIVE() #define PTHREAD_WAITQ_SETACTIVE() #endif /* * Work queue manipulation macros (using qe link): */ #define PTHREAD_WORKQ_INSERT(thrd) do { \ TAILQ_INSERT_TAIL(&_workq,thrd,qe); \ (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \ } while (0) #define PTHREAD_WORKQ_REMOVE(thrd) do { \ TAILQ_REMOVE(&_workq,thrd,qe); \ (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \ } while (0) /* * State change macro without scheduling queue change: */ #define PTHREAD_SET_STATE(thrd, newstate) do { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } while (0) /* * State change macro with scheduling queue change - This must be * called with preemption deferred (see thread_kern_sched_[un]defer). */ #if defined(_PTHREADS_INVARIANTS) #include #define PTHREAD_ASSERT(cond, msg) do { \ if (!(cond)) \ PANIC(msg); \ } while (0) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \ PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \ "Illegal call from signal handler"); #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if (_thread_kern_new_state != 0) \ PANIC("Recursive PTHREAD_NEW_STATE"); \ _thread_kern_new_state = 1; \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ _thread_kern_new_state = 0; \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #else #define PTHREAD_ASSERT(cond, msg) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #endif /* * Define the signals to be used for scheduling. */ #if defined(_PTHREADS_COMPAT_SCHED) #define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL #define _SCHED_SIGNAL SIGVTALRM #else #define _ITIMER_SCHED_TIMER ITIMER_PROF #define _SCHED_SIGNAL SIGPROF #endif /* * Priority queues. * * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. */ typedef struct pq_list { TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ int pl_prio; /* the priority of this list */ int pl_queued; /* is this in the priority queue */ } pq_list_t; typedef struct pq_queue { TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ pq_list_t *pq_lists; /* array of all priority lists */ int pq_size; /* number of priority lists */ } pq_queue_t; /* * TailQ initialization values. */ #define TAILQ_INITIALIZER { NULL, NULL } /* * Mutex definitions. */ union pthread_mutex_data { void *m_ptr; int m_count; }; struct pthread_mutex { enum pthread_mutextype m_type; int m_protocol; TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; union pthread_mutex_data m_data; long m_flags; int m_refcount; /* * Used for priority inheritence and protection. * * m_prio - For priority inheritence, the highest active * priority (threads locking the mutex inherit * this priority). For priority protection, the * ceiling priority of this mutex. * m_saved_prio - mutex owners inherited priority before * taking the mutex, restored when the owner * unlocks the mutex. */ int m_prio; int m_saved_prio; /* * Link for list of all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; /* * Lock for accesses to this structure. */ spinlock_t lock; }; /* * Flags for mutexes. */ #define MUTEX_FLAGS_PRIVATE 0x01 #define MUTEX_FLAGS_INITED 0x02 #define MUTEX_FLAGS_BUSY 0x04 /* * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \ NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \ _SPINLOCK_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; long m_flags; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } /* * Condition variable definitions. */ enum pthread_cond_type { COND_TYPE_FAST, COND_TYPE_MAX }; struct pthread_cond { enum pthread_cond_type c_type; TAILQ_HEAD(cond_head, pthread) c_queue; pthread_mutex_t c_mutex; void *c_data; long c_flags; int c_seqno; /* * Lock for accesses to this structure. */ spinlock_t lock; }; struct pthread_cond_attr { enum pthread_cond_type c_type; long c_flags; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \ 0, 0, _SPINLOCK_INITIALIZER } /* * Semaphore definitions. */ struct sem { #define SEM_MAGIC ((u_int32_t) 0x09fa4012) u_int32_t magic; pthread_mutex_t lock; pthread_cond_t gtzero; u_int32_t count; u_int32_t nwaiters; }; /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *next; void (*routine) (); void *routine_arg; }; struct pthread_attr { int sched_policy; int sched_inherit; int sched_interval; int prio; int suspend; int flags; void *arg_attr; void (*cleanup_attr) (); void *stackaddr_attr; size_t stacksize_attr; }; /* * Thread creation state attributes. */ #define PTHREAD_CREATE_RUNNING 0 #define PTHREAD_CREATE_SUSPENDED 1 /* * Additional state for a thread suspended with pthread_suspend_np(). */ enum pthread_susp { SUSP_NO, /* Not suspended. */ SUSP_YES, /* Suspended. */ SUSP_NOWAIT, /* Suspended, was in a mutex or condition queue. */ SUSP_MUTEX_WAIT,/* Suspended, still in a mutex queue. */ SUSP_COND_WAIT /* Suspended, still in a condition queue. */ }; /* * Miscellaneous definitions. */ #define PTHREAD_STACK_DEFAULT 65536 /* * Size of red zone at the end of each stack. In actuality, this "red zone" is * merely an unmapped region, except in the case of the initial stack. Since * mmap() makes it possible to specify the maximum growth of a MAP_STACK region, * an unmapped gap between thread stacks achieves the same effect as explicitly * mapped red zones. */ #define PTHREAD_STACK_GUARD PAGE_SIZE /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define PTHREAD_STACK_INITIAL 0x100000 /* Size of the scheduler stack: */ #define SCHED_STACK_SIZE PAGE_SIZE /* * Define the different priority ranges. All applications have thread * priorities constrained within 0-31. The threads library raises the * priority when delivering signals in order to ensure that signal * delivery happens (from the POSIX spec) "as soon as possible". * In the future, the threads library will also be able to map specific * threads into real-time (cooperating) processes or kernel threads. * The RT and SIGNAL priorities will be used internally and added to * thread base priorities so that the scheduling queue can handle both * normal and RT priority threads with and without signal handling. * * The approach taken is that, within each class, signal delivery * always has priority over thread execution. */ #define PTHREAD_DEFAULT_PRIORITY 15 #define PTHREAD_MIN_PRIORITY 0 #define PTHREAD_MAX_PRIORITY 31 /* 0x1F */ #define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */ #define PTHREAD_RT_PRIORITY 64 /* 0x40 */ #define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY #define PTHREAD_LAST_PRIORITY \ (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY) #define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY) /* * Clock resolution in microseconds. */ #define CLOCK_RES_USEC 10000 /* * Time slice period in microseconds. */ #define TIMESLICE_USEC 20000 /* * Define a thread-safe macro to get the current time of day * which is updated at regular intervals by the scheduling signal * handler. */ #define GET_CURRENT_TOD(tv) \ do { \ tv.tv_sec = _sched_tod.tv_sec; \ tv.tv_usec = _sched_tod.tv_usec; \ } while (tv.tv_sec != _sched_tod.tv_sec) struct pthread_key { spinlock_t lock; volatile int allocated; volatile int count; void (*destructor) (); }; struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ int state; /* 0 = idle >0 = # of readers -1 = writer */ pthread_cond_t read_signal; pthread_cond_t write_signal; int blocked_writers; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_SIGTHREAD, PS_MUTEX_WAIT, PS_COND_WAIT, PS_FDLR_WAIT, PS_FDLW_WAIT, PS_FDR_WAIT, PS_FDW_WAIT, PS_FILE_WAIT, PS_POLL_WAIT, PS_SELECT_WAIT, PS_SLEEP_WAIT, PS_WAIT_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, PS_SPINBLOCK, PS_JOIN, PS_SUSPENDED, PS_DEAD, PS_DEADLOCK, PS_STATE_MAX }; /* * File descriptor locking definitions. */ #define FD_READ 0x1 #define FD_WRITE 0x2 #define FD_RDWR (FD_READ | FD_WRITE) /* * File descriptor table structure. */ struct fd_table_entry { /* * Lock for accesses to this file descriptor table * entry. This is passed to _spinlock() to provide atomic * access to this structure. It does *not* represent the * state of the lock on the file descriptor. */ spinlock_t lock; TAILQ_HEAD(, pthread) r_queue; /* Read queue. */ TAILQ_HEAD(, pthread) w_queue; /* Write queue. */ struct pthread *r_owner; /* Ptr to thread owning read lock. */ struct pthread *w_owner; /* Ptr to thread owning write lock. */ char *r_fname; /* Ptr to read lock source file name */ int r_lineno; /* Read lock source line number. */ char *w_fname; /* Ptr to write lock source file name */ int w_lineno; /* Write lock source line number. */ int r_lockcount; /* Count for FILE read locks. */ int w_lockcount; /* Count for FILE write locks. */ int flags; /* Flags used in open. */ }; struct pthread_poll_data { int nfds; struct pollfd *fds; }; union pthread_wait_data { pthread_mutex_t mutex; pthread_cond_t cond; const sigset_t *sigwait; /* Waiting on a signal in sigwait */ struct { short fd; /* Used when thread waiting on fd */ short branch; /* Line number, for debugging. */ char *fname; /* Source file name for debugging.*/ } fd; FILE *fp; struct pthread_poll_data *poll_data; spinlock_t *spinlock; struct pthread *thread; }; /* * Define a continuation routine that can be used to perform a * transfer of control: */ typedef void (*thread_continuation_t) (void *); struct pthread_signal_frame; struct pthread_state_data { struct pthread_signal_frame *psd_curframe; sigset_t psd_sigmask; struct timespec psd_wakeup_time; union pthread_wait_data psd_wait_data; enum pthread_state psd_state; int psd_flags; int psd_interrupted; int psd_longjmp_val; int psd_sigmask_seqno; int psd_signo; int psd_sig_defer_count; /* XXX - What about thread->timeout and/or thread->error? */ }; /* * Normally thread contexts are stored as jmp_bufs via _setjmp()/_longjmp(), * but they may also be sigjmp_buf and ucontext_t. When a thread is * interrupted by a signal, it's context is saved as a ucontext_t. An * application is also free to use [_]longjmp()/[_]siglongjmp() to jump * between contexts within the same thread. Future support will also * include setcontext()/getcontext(). * * Define an enumerated type that can identify the 4 different context * types. */ typedef enum { CTX_JB_NOSIG, /* context is jmp_buf without saved sigset */ CTX_JB, /* context is jmp_buf (with saved sigset) */ CTX_SJB, /* context is sigjmp_buf (with saved sigset) */ CTX_UC /* context is ucontext_t (with saved sigset) */ } thread_context_t; /* * There are 2 basic contexts that a frame may contain at any * one time: * * o ctx - The context that the thread should return to after normal * completion of the signal handler. * o sig_jb - The context just before the signal handler is invoked. * Attempts at abnormal returns from user supplied signal handlers * will return back to the signal context to perform any necessary * cleanup. */ struct pthread_signal_frame { /* * This stores the threads state before the signal. */ struct pthread_state_data saved_state; /* * Threads return context; ctxtype identifies the type of context. * For signal frame 0, these point to the context storage area * within the pthread structure. When handling signals (frame > 0), * these point to a context storage area that is allocated off the * threads stack. */ union { jmp_buf jb; sigjmp_buf sigjb; ucontext_t uc; } ctx; thread_context_t ctxtype; int longjmp_val; int signo; /* signal, arg 1 to sighandler */ int sig_has_args; /* use signal args if true */ ucontext_t uc; siginfo_t siginfo; }; /* * Thread structure. */ struct pthread { /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; char *name; u_int64_t uniqueid; /* for gdb */ /* * Lock for accesses to this thread structure. */ spinlock_t lock; /* Queue entry for list of all threads: */ TAILQ_ENTRY(pthread) tle; /* Queue entry for list of dead threads: */ TAILQ_ENTRY(pthread) dle; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; void *stack; struct pthread_attr attr; /* * Threads return context; ctxtype identifies the type of context. */ union { jmp_buf jb; sigjmp_buf sigjb; ucontext_t uc; } ctx; thread_context_t ctxtype; int longjmp_val; /* * Used for tracking delivery of signal handlers. */ struct pthread_signal_frame *curframe; /* * Cancelability flags - the lower 2 bits are used by cancel * definitions in pthread.h */ #define PTHREAD_AT_CANCEL_POINT 0x0004 #define PTHREAD_CANCELLING 0x0008 #define PTHREAD_CANCEL_NEEDED 0x0010 int cancelflags; enum pthread_susp suspended; thread_continuation_t continuation; /* * Current signal mask and pending signals. */ sigset_t sigmask; sigset_t sigpend; int sigmask_seqno; int check_pending; /* Thread state: */ enum pthread_state state; /* Scheduling clock when this thread was last made active. */ long last_active; /* Scheduling clock when this thread was last made inactive. */ long last_inactive; /* * Number of microseconds accumulated by this thread when * time slicing is active. */ long slice_usec; /* * Time to wake up thread. This is used for sleeping threads and * for any operation which may time out (such as select). */ struct timespec wakeup_time; /* TRUE if operation has timed out. */ int timeout; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* Join queue head and link for waiting threads: */ TAILQ_HEAD(join_head, pthread) join_queue; /* * The current thread can belong to only one scheduling queue at * a time (ready or waiting queue). It can also belong to: * * o A queue of threads waiting for a mutex * o A queue of threads waiting for a condition variable * o A queue of threads waiting for another thread to terminate * (the join queue above) * o A queue of threads waiting for a file descriptor lock * o A queue of threads needing work done by the kernel thread * (waiting for a spinlock or file I/O) * * It is possible for a thread to belong to more than one of the * above queues if it is handling a signal. A thread may only * enter a mutex, condition variable, or join queue when it is * not being called from a signal handler. If a thread is a * member of one of these queues when a signal handler is invoked, * it must remain in the queue. For this reason, the links for * these queues must not be (re)used for other queues. * * Use pqe for the scheduling queue link (both ready and waiting), * sqe for synchronization (mutex, condition variable, and join) * queue links, and qe for all other links. */ TAILQ_ENTRY(pthread) pqe; /* priority queue link */ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ TAILQ_ENTRY(pthread) qe; /* all other queues link */ /* Wait data. */ union pthread_wait_data data; /* * Allocated for converting select into poll. */ struct pthread_poll_data poll_data; /* * Set to TRUE if a blocking operation was * interrupted by a signal: */ int interrupted; /* Signal number when in state PS_SIGWAIT: */ int signo; /* * Set to non-zero when this thread has deferred signals. * We allow for recursive deferral. */ int sig_defer_count; /* * Set to TRUE if this thread should yield after undeferring * signals. */ int yield_on_sig_undefer; /* Miscellaneous flags; only set with signals deferred. */ int flags; #define PTHREAD_FLAGS_PRIVATE 0x0001 #define PTHREAD_EXITING 0x0002 #define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */ #define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */ #define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */ #define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */ #define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */ #define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/ #define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */ #define PTHREAD_FLAGS_IN_JOINQ 0x0200 /* in join queue using sqe link */ #define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */ #define PTHREAD_FLAGS_IN_SYNCQ \ (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ | PTHREAD_FLAGS_IN_JOINQ) /* * Base priority is the user setable and retrievable priority * of the thread. It is only affected by explicit calls to * set thread priority and upon thread creation via a thread * attribute or default priority. */ char base_priority; /* * Inherited priority is the priority a thread inherits by * taking a priority inheritence or protection mutex. It * is not affected by base priority changes. Inherited * priority defaults to and remains 0 until a mutex is taken * that is being waited on by any other thread whose priority * is non-zero. */ char inherited_priority; /* * Active priority is always the maximum of the threads base * priority and inherited priority. When there is a change * in either the base or inherited priority, the active * priority must be recalculated. */ char active_priority; /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; /* * Queue of currently owned mutexes. */ TAILQ_HEAD(, pthread_mutex) mutexq; void *ret; const void **specific_data; int specific_data_count; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; char *fname; /* Ptr to source file name */ int lineno; /* Source line number. */ }; /* Spare thread stack. */ struct stack { SLIST_ENTRY(stack) qe; /* Queue entry for this stack. */ }; /* * Global variables for the uthread kernel. */ /* Kernel thread structure used when there are no running threads: */ SCLASS struct pthread _thread_kern_thread; /* Ptr to the thread structure for the running thread: */ SCLASS struct pthread * volatile _thread_run #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* Ptr to the thread structure for the last user thread to run: */ SCLASS struct pthread * volatile _last_user_thread #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* * Ptr to the thread running in single-threaded mode or NULL if * running multi-threaded (default POSIX behaviour). */ SCLASS struct pthread * volatile _thread_single #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* List of all threads: */ SCLASS TAILQ_HEAD(, pthread) _thread_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_thread_list); #else ; #endif /* * Array of kernel pipe file descriptors that are used to ensure that * no signals are missed in calls to _select. */ SCLASS int _thread_kern_pipe[2] #ifdef GLOBAL_PTHREAD_PRIVATE = { -1, -1 }; #else ; #endif SCLASS int volatile _queue_signals #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _thread_kern_in_sched #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _sig_in_handler #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Time of day at last scheduling timer signal: */ SCLASS struct timeval volatile _sched_tod #ifdef GLOBAL_PTHREAD_PRIVATE = { 0, 0 }; #else ; #endif /* * Current scheduling timer ticks; used as resource usage. */ SCLASS unsigned int volatile _sched_ticks #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Dead threads: */ SCLASS TAILQ_HEAD(, pthread) _dead_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_dead_list); #else ; #endif /* Initial thread: */ SCLASS struct pthread *_thread_initial #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Default thread attributes: */ SCLASS struct pthread_attr pthread_attr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, PTHREAD_STACK_DEFAULT }; #else ; #endif /* Default mutex attributes: */ SCLASS struct pthread_mutex_attr pthread_mutexattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }; #else ; #endif /* Default condition variable attributes: */ SCLASS struct pthread_cond_attr pthread_condattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { COND_TYPE_FAST, 0 }; #else ; #endif /* * Standard I/O file descriptors need special flag treatment since * setting one to non-blocking does all on *BSD. Sigh. This array * is used to store the initial flag settings. */ SCLASS int _pthread_stdio_flags[3]; /* File table information: */ SCLASS struct fd_table_entry **_thread_fd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Table for polling file descriptors: */ SCLASS struct pollfd *_thread_pfd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif SCLASS const int dtablecount #ifdef GLOBAL_PTHREAD_PRIVATE = 4096/sizeof(struct fd_table_entry); #else ; #endif SCLASS int _thread_dtablesize /* Descriptor table size. */ #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _clock_res_usec /* Clock resolution in usec. */ #ifdef GLOBAL_PTHREAD_PRIVATE = CLOCK_RES_USEC; #else ; #endif /* Garbage collector mutex and condition variable. */ SCLASS pthread_mutex_t _gc_mutex #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; SCLASS pthread_cond_t _gc_cond #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Array of signal actions for this process. */ SCLASS struct sigaction _thread_sigact[NSIG]; /* * Array of counts of dummy handlers for SIG_DFL signals. This is used to * assure that there is always a dummy signal handler installed while there is a * thread sigwait()ing on the corresponding signal. */ SCLASS int _thread_dfl_count[NSIG]; /* * Pending signals and mask for this process: */ SCLASS sigset_t _process_sigpending; SCLASS sigset_t _process_sigmask #ifdef GLOBAL_PTHREAD_PRIVATE = { {0, 0, 0, 0} } #endif ; /* * Scheduling queues: */ SCLASS pq_queue_t _readyq; SCLASS TAILQ_HEAD(, pthread) _waitingq; /* * Work queue: */ SCLASS TAILQ_HEAD(, pthread) _workq; /* Tracks the number of threads blocked while waiting for a spinlock. */ SCLASS volatile int _spinblock_count #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Used to maintain pending and active signals: */ struct sigstatus { int pending; /* Is this a pending signal? */ int blocked; /* * A handler is currently active for * this signal; ignore subsequent * signals until the handler is done. */ int signo; /* arg 1 to signal handler */ siginfo_t siginfo; /* arg 2 to signal handler */ ucontext_t uc; /* arg 3 to signal handler */ }; SCLASS struct sigstatus _thread_sigq[NSIG]; /* Indicates that the signal queue needs to be checked. */ SCLASS volatile int _sigq_check_reqd #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* The signal stack. */ SCLASS struct sigaltstack _thread_sigstack; /* Thread switch hook. */ SCLASS pthread_switch_routine_t _sched_switch_hook #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Spare stack queue. Stacks of default size are cached in order to reduce * thread creation time. Spare stacks are used in LIFO order to increase cache * locality. */ SCLASS SLIST_HEAD(, stack) _stackq; /* * Base address of next unallocated default-size {stack, red zone}. Stacks are * allocated contiguously, starting below the bottom of the main stack. When a * new stack is created, a red zone is created (actually, the red zone is simply * left unmapped) below the bottom of the stack, such that the stack will not be * able to grow all the way to the top of the next stack. This isn't * fool-proof. It is possible for a stack to grow by a large amount, such that * it grows into the next stack, and as long as the memory within the red zone * is never accessed, nothing will prevent one thread stack from trouncing all * over the next. */ SCLASS void * _next_stack #ifdef GLOBAL_PTHREAD_PRIVATE /* main stack top - main stack size - stack size - (red zone + main stack red zone) */ = (void *) USRSTACK - PTHREAD_STACK_INITIAL - PTHREAD_STACK_DEFAULT - (2 * PTHREAD_STACK_GUARD) #endif ; /* * Declare the kernel scheduler jump buffer and stack: */ SCLASS jmp_buf _thread_kern_sched_jb; SCLASS void * _thread_kern_sched_stack #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* Used for _PTHREADS_INVARIANTS checking. */ SCLASS int _thread_kern_new_state #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Undefine the storage class specifier: */ #undef SCLASS #ifdef _LOCK_DEBUG #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \ _ts, __FILE__, __LINE__) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \ __FILE__, __LINE__) #else #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type) #endif /* * Function prototype definitions. */ __BEGIN_DECLS char *__ttyname_basic(int); char *__ttyname_r_basic(int, char *, size_t); char *ttyname_r(int, char *, size_t); void _cond_wait_backout(pthread_t); void _fd_lock_backout(pthread_t); int _find_dead_thread(pthread_t); int _find_thread(pthread_t); -void _flockfile_backout(pthread_t); -void _funlock_owned(pthread_t); struct pthread *_get_curthread(void); void _set_curthread(struct pthread *); void _join_backout(pthread_t); int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t); int _thread_fd_lock(int, int, struct timespec *); int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); void _mutex_lock_backout(pthread_t); void _mutex_notify_priochange(pthread_t); int _mutex_reinit(pthread_mutex_t *); void _mutex_unlock_private(pthread_t); int _cond_reinit(pthread_cond_t *); int _pq_alloc(struct pq_queue *, int, int); int _pq_init(struct pq_queue *); void _pq_remove(struct pq_queue *pq, struct pthread *); void _pq_insert_head(struct pq_queue *pq, struct pthread *); void _pq_insert_tail(struct pq_queue *pq, struct pthread *); struct pthread *_pq_first(struct pq_queue *pq); void *_pthread_getspecific(pthread_key_t); int _pthread_key_create(pthread_key_t *, void (*) (void *)); int _pthread_key_delete(pthread_key_t); int _pthread_mutex_destroy(pthread_mutex_t *); int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); int _pthread_mutex_lock(pthread_mutex_t *); int _pthread_mutex_trylock(pthread_mutex_t *); int _pthread_mutex_unlock(pthread_mutex_t *); int _pthread_mutexattr_init(pthread_mutexattr_t *); int _pthread_mutexattr_destroy(pthread_mutexattr_t *); int _pthread_mutexattr_settype(pthread_mutexattr_t *, int); int _pthread_once(pthread_once_t *, void (*) (void)); +pthread_t _pthread_self(void); int _pthread_setspecific(pthread_key_t, const void *); void _waitq_insert(pthread_t pthread); void _waitq_remove(pthread_t pthread); #if defined(_PTHREADS_INVARIANTS) void _waitq_setactive(void); void _waitq_clearactive(void); #endif void _thread_exit(char *, int, char *); void _thread_exit_cleanup(void); void _thread_fd_unlock(int, int); void _thread_fd_unlock_debug(int, int, char *, int); void _thread_fd_unlock_owned(pthread_t); void *_thread_cleanup(pthread_t); void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_init(void); void _thread_kern_sched(ucontext_t *); void _thread_kern_scheduler(void); void _thread_kern_sched_frame(struct pthread_signal_frame *psf); void _thread_kern_sched_sig(void); void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno); void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno); void _thread_kern_set_timeout(const struct timespec *); void _thread_kern_sig_defer(void); void _thread_kern_sig_undefer(void); void _thread_sig_handler(int, siginfo_t *, ucontext_t *); void _thread_sig_check_pending(pthread_t pthread); void _thread_sig_handle_pending(void); void _thread_sig_send(pthread_t pthread, int sig); void _thread_sig_wrapper(void); void _thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf); void _thread_start(void); void _thread_seterrno(pthread_t, int); int _thread_fd_table_init(int fd); pthread_addr_t _thread_gc(pthread_addr_t); void _thread_enter_cancellation_point(void); void _thread_leave_cancellation_point(void); void _thread_cancellation_point(void); /* #include */ #ifdef _SYS_AIO_H_ int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); #endif /* #include */ #ifdef _SIGNAL_H_ int __sys_sigaction(int, const struct sigaction *, struct sigaction *); int __sys_sigpending(sigset_t *); int __sys_sigprocmask(int, const sigset_t *, sigset_t *); int __sys_sigsuspend(const sigset_t *); int __sys_sigreturn(ucontext_t *); int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); #endif /* #include */ #ifdef _SYS_STAT_H_ int __sys_fchmod(int, mode_t); int __sys_fstat(int, struct stat *); int __sys_fchflags(int, u_long); #endif /* #include */ #ifdef _SYS_MOUNT_H_ int __sys_fstatfs(int, struct statfs *); #endif /* #inclde */ #ifdef _SYS_EVENT_H_ int __sys_kevent(int, const struct kevent *, int, struct kevent *, int, const struct timespec *); #endif /* #include */ #ifdef _SYS_SOCKET_H_ int __sys_accept(int, struct sockaddr *, int *); int __sys_bind(int, const struct sockaddr *, int); int __sys_connect(int, const struct sockaddr *, int); int __sys_getpeername(int, struct sockaddr *, int *); int __sys_getsockname(int, struct sockaddr *, int *); int __sys_getsockopt(int, int, int, void *, int *); int __sys_listen(int, int); int __sys_setsockopt(int, int, int, const void *, int); int __sys_shutdown(int, int); int __sys_socket(int, int, int); int __sys_socketpair(int, int, int, int *); ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, int *); ssize_t __sys_recvmsg(int, struct msghdr *, int); ssize_t __sys_send(int, const void *, size_t, int); int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int); ssize_t __sys_sendmsg(int, const struct msghdr *, int); ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, int); #endif /* #include */ #ifdef _UNISTD_H_ int __sys_close(int); int __sys_dup(int); int __sys_dup2(int, int); int __sys_execve(const char *, char * const *, char * const *); int __sys_fchown(int, uid_t, gid_t); int __sys_fork(void); int __sys_fsync(int); int __sys_pipe(int *); int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); long __sys_fpathconf(int, int); ssize_t __sys_read(int, void *, size_t); ssize_t __sys_write(int, const void *, size_t); void __sys_exit(int); #endif /* #include */ #ifdef _SYS_FCNTL_H_ int __sys_fcntl(int, int, ...); int __sys_flock(int, int); int __sys_open(const char *, int, ...); #endif /* #include */ #ifdef _SYS_IOCTL_H_ int __sys_ioctl(int, unsigned long, ...); #endif /* #include */ #ifdef _DIRENT_H_ int __sys_getdirentries(int, char *, int, long *); #endif /* #include */ #ifdef _SYS_UIO_H_ ssize_t __sys_readv(int, const struct iovec *, int); ssize_t __sys_writev(int, const struct iovec *, int); #endif /* #include */ #ifdef WNOHANG pid_t __sys_wait4(pid_t, int *, int, struct rusage *); #endif /* #include */ #ifdef _SYS_POLL_H_ int __sys_poll(struct pollfd *, unsigned, int); #endif /* #include */ #ifdef _SYS_MMAN_H_ int __sys_msync(void *, size_t, int); #endif /* #include */ #ifdef _SETJMP_H_ extern void __siglongjmp(sigjmp_buf, int) __dead2; extern void __longjmp(jmp_buf, int) __dead2; extern void ___longjmp(jmp_buf, int) __dead2; #endif __END_DECLS #endif /* !_PTHREAD_PRIVATE_H */ Index: head/lib/libkse/thread/thr_sig.c =================================================================== --- head/lib/libkse/thread/thr_sig.c (revision 72373) +++ head/lib/libkse/thread/thr_sig.c (revision 72374) @@ -1,1121 +1,1116 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include "pthread_private.h" /* Prototypes: */ static void thread_sig_add(pthread_t pthread, int sig, int has_args); static void thread_sig_check_state(pthread_t pthread, int sig); static pthread_t thread_sig_find(int sig); static void thread_sig_handle_special(int sig); static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp); static void thread_sigframe_add(pthread_t thread, int sig, int has_args); static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf); /* #define DEBUG_SIGNAL */ #ifdef DEBUG_SIGNAL #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif #if defined(_PTHREADS_INVARIANTS) #define SIG_SET_ACTIVE() _sig_in_handler = 1 #define SIG_SET_INACTIVE() _sig_in_handler = 0 #else #define SIG_SET_ACTIVE() #define SIG_SET_INACTIVE() #endif void _thread_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) { struct pthread *curthread = _get_curthread(); pthread_t pthread, pthread_h; void *stackp; int in_sched = 0; char c; if (ucp == NULL) PANIC("Thread signal handler received null context"); DBG_MSG("Got signal %d, current thread %p\n", sig, curthread); if (_thread_kern_in_sched != 0) in_sched = 1; else { stackp = (void *)GET_STACK_UC(ucp); if ((stackp >= _thread_kern_sched_stack) && (stackp <= _thread_kern_sched_stack + SCHED_STACK_SIZE)) in_sched = 1; } /* Check if an interval timer signal: */ if (sig == _SCHED_SIGNAL) { /* Update the scheduling clock: */ gettimeofday((struct timeval *)&_sched_tod, NULL); _sched_ticks++; if (in_sched != 0) { /* * The scheduler is already running; ignore this * signal. */ } /* * Check if the scheduler interrupt has come when * the currently running thread has deferred thread * signals. */ else if (curthread->sig_defer_count > 0) curthread->yield_on_sig_undefer = 1; else { /* * Save the context of the currently running thread: */ thread_sig_savecontext(curthread, ucp); /* * Schedule the next thread. This function is not * expected to return because it will do a longjmp * instead. */ _thread_kern_sched(ucp); /* * This point should not be reached, so abort the * process: */ PANIC("Returned to signal function from scheduler"); } } /* * Check if the kernel has been interrupted while the scheduler * is accessing the scheduling queues or if there is a currently * running thread that has deferred signals. */ else if ((in_sched != 0) || (curthread->sig_defer_count > 0)) { /* Cast the signal number to a character variable: */ c = sig; /* * Write the signal number to the kernel pipe so that it will * be ready to read when this signal handler returns. */ if (_queue_signals != 0) { __sys_write(_thread_kern_pipe[1], &c, 1); DBG_MSG("Got signal %d, queueing to kernel pipe\n", sig); } if (_thread_sigq[sig - 1].blocked == 0) { DBG_MSG("Got signal %d, adding to _thread_sigq\n", sig); /* * Do not block this signal; it will be blocked * when the pending signals are run down. */ /* _thread_sigq[sig - 1].blocked = 1; */ /* * Queue the signal, saving siginfo and sigcontext * (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); /* Indicate that there are queued signals: */ _thread_sigq[sig - 1].pending = 1; _sigq_check_reqd = 1; } /* These signals need special handling: */ else if (sig == SIGCHLD || sig == SIGTSTP || sig == SIGTTIN || sig == SIGTTOU) { _thread_sigq[sig - 1].pending = 1; _thread_sigq[sig - 1].signo = sig; _sigq_check_reqd = 1; } else DBG_MSG("Got signal %d, ignored.\n", sig); } /* * The signal handlers should have been installed so that they * cannot be interrupted by other signals. */ else if (_thread_sigq[sig - 1].blocked == 0) { /* * The signal is not blocked; handle the signal. * * Ignore subsequent occurrences of this signal * until the current signal is handled: */ _thread_sigq[sig - 1].blocked = 1; /* This signal will be handled; clear the pending flag: */ _thread_sigq[sig - 1].pending = 0; /* * Save siginfo and sigcontext (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); SIG_SET_ACTIVE(); /* Handle special signals: */ thread_sig_handle_special(sig); pthread_h = NULL; if ((pthread = thread_sig_find(sig)) != NULL) { DBG_MSG("Got signal %d, adding frame to thread %p\n", sig, pthread); /* * A thread was found that can handle the signal. * Save the context of the currently running thread * so that we can switch to another thread without * losing track of where the current thread left off. * This also applies if the current thread is the * thread to be signaled. */ thread_sig_savecontext(curthread, ucp); /* Setup the target thread to receive the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); /* Take a peek at the next ready to run thread: */ pthread_h = PTHREAD_PRIOQ_FIRST(); DBG_MSG("Finished adding frame, head of prio list %p\n", pthread_h); } else DBG_MSG("No thread to handle signal %d\n", sig); SIG_SET_INACTIVE(); /* * Switch to a different context if the currently running * thread takes a signal, or if another thread takes a * signal and the currently running thread is not in a * signal handler. */ if ((pthread == curthread) || ((pthread_h != NULL) && (pthread_h->active_priority > curthread->active_priority))) { /* Enter the kernel scheduler: */ _thread_kern_sched(ucp); } } else { SIG_SET_ACTIVE(); thread_sig_handle_special(sig); SIG_SET_INACTIVE(); } } static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp) { memcpy(&pthread->ctx.uc, ucp, sizeof(*ucp)); /* XXX - Save FP registers too? */ FP_SAVE_UC(&pthread->ctx.uc); /* Mark the context saved as a ucontext: */ pthread->ctxtype = CTX_UC; } /* * Find a thread that can handle the signal. */ pthread_t thread_sig_find(int sig) { struct pthread *curthread = _get_curthread(); int handler_installed; pthread_t pthread, pthread_next; pthread_t suspended_thread, signaled_thread; DBG_MSG("Looking for thread to handle signal %d\n", sig); /* Check if the signal requires a dump of thread information: */ if (sig == SIGINFO) { /* Dump thread information to file: */ _thread_dump_info(); /* Unblock this signal to allow further dumps: */ _thread_sigq[sig - 1].blocked = 0; } /* Check if an interval timer signal: */ else if (sig == _SCHED_SIGNAL) { /* * This shouldn't ever occur (should this panic?). */ } else { /* * Enter a loop to look for threads that have the signal * unmasked. POSIX specifies that a thread in a sigwait * will get the signal over any other threads. Second * preference will be threads in in a sigsuspend. Third * preference will be the current thread. If none of the * above, then the signal is delivered to the first thread * that is found. Note that if a custom handler is not * installed, the signal only affects threads in sigwait. */ suspended_thread = NULL; if ((curthread != &_thread_kern_thread) && !sigismember(&curthread->sigmask, sig)) signaled_thread = curthread; else signaled_thread = NULL; if ((_thread_sigact[sig - 1].sa_handler == SIG_IGN) || (_thread_sigact[sig - 1].sa_handler == SIG_DFL)) handler_installed = 0; else handler_installed = 1; for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly destroying * the link entry. */ pthread_next = TAILQ_NEXT(pthread, pqe); if ((pthread->state == PS_SIGWAIT) && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* * A signal handler is not invoked for threads * in sigwait. Clear the blocked and pending * flags. */ _thread_sigq[sig - 1].blocked = 0; _thread_sigq[sig - 1].pending = 0; /* Return the signal number: */ pthread->signo = sig; /* * POSIX doesn't doesn't specify which thread * will get the signal if there are multiple * waiters, so we give it to the first thread * we find. * * Do not attempt to deliver this signal * to other threads and do not add the signal * to the process pending set. */ return (NULL); } else if ((handler_installed != 0) && !sigismember(&pthread->sigmask, sig)) { if (pthread->state == PS_SIGSUSPEND) { if (suspended_thread == NULL) suspended_thread = pthread; } else if (signaled_thread == NULL) signaled_thread = pthread; } } /* * Only perform wakeups and signal delivery if there is a * custom handler installed: */ if (handler_installed == 0) { /* * There is no handler installed. Unblock the * signal so that if a handler _is_ installed, any * subsequent signals can be handled. */ _thread_sigq[sig - 1].blocked = 0; } else { /* * If we didn't find a thread in the waiting queue, * check the all threads queue: */ if (suspended_thread == NULL && signaled_thread == NULL) { /* * Enter a loop to look for other threads * capable of receiving the signal: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { if (!sigismember(&pthread->sigmask, sig)) { signaled_thread = pthread; break; } } } if (suspended_thread == NULL && signaled_thread == NULL) /* * Add it to the set of signals pending * on the process: */ sigaddset(&_process_sigpending, sig); else { /* * We only deliver the signal to one thread; * give preference to the suspended thread: */ if (suspended_thread != NULL) pthread = suspended_thread; else pthread = signaled_thread; return (pthread); } } } /* Returns nothing. */ return (NULL); } void _thread_sig_check_pending(pthread_t pthread) { sigset_t sigset; int i; /* * Check if there are pending signals for the running * thread or process that aren't blocked: */ sigset = pthread->sigpend; SIGSETOR(sigset, _process_sigpending); SIGSETNAND(sigset, pthread->sigmask); if (SIGNOTEMPTY(sigset)) { for (i = 1; i < NSIG; i++) { if (sigismember(&sigset, i) != 0) { if (sigismember(&pthread->sigpend, i) != 0) thread_sig_add(pthread, i, /*has_args*/ 0); else { thread_sig_add(pthread, i, /*has_args*/ 1); sigdelset(&_process_sigpending, i); } } } } } /* * This can only be called from the kernel scheduler. It assumes that * all thread contexts are saved and that a signal frame can safely be * added to any user thread. */ void _thread_sig_handle_pending(void) { pthread_t pthread; int i, sig; PTHREAD_ASSERT(_thread_kern_in_sched != 0, "_thread_sig_handle_pending called from outside kernel schedule"); /* * Check the array of pending signals: */ for (i = 0; i < NSIG; i++) { if (_thread_sigq[i].pending != 0) { /* This signal is no longer pending. */ _thread_sigq[i].pending = 0; sig = _thread_sigq[i].signo; /* Some signals need special handling: */ thread_sig_handle_special(sig); if (_thread_sigq[i].blocked == 0) { /* * Block future signals until this one * is handled: */ _thread_sigq[i].blocked = 1; if ((pthread = thread_sig_find(sig)) != NULL) { /* * Setup the target thread to receive * the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); } } } } } static void thread_sig_handle_special(int sig) { pthread_t pthread, pthread_next; int i; switch (sig) { case SIGCHLD: /* * Go through the file list and set all files * to non-blocking again in case the child * set some of them to block. Sigh. */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file is used: */ if (_thread_fd_table[i] != NULL) { /* * Set the file descriptor to non-blocking: */ __sys_fcntl(i, F_SETFL, _thread_fd_table[i]->flags | O_NONBLOCK); } } /* * Enter a loop to wake up all threads waiting * for a process to complete: */ for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly * destroying the link entry: */ pthread_next = TAILQ_NEXT(pthread, pqe); /* * If this thread is waiting for a child * process to complete, wake it up: */ if (pthread->state == PS_WAIT_WAIT) { /* Make the thread runnable: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } } break; /* * POSIX says that pending SIGCONT signals are * discarded when one of these signals occurs. */ case SIGTSTP: case SIGTTIN: case SIGTTOU: /* * Enter a loop to discard pending SIGCONT * signals: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { sigdelset(&pthread->sigpend, SIGCONT); } break; default: break; } } /* * Perform thread specific actions in response to a signal. * This function is only called if there is a handler installed * for the signal, and if the target thread has the signal * unmasked. */ static void thread_sig_add(pthread_t pthread, int sig, int has_args) { struct pthread *curthread = _get_curthread(); int restart; int suppress_handler = 0; restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: /* * You can't call a signal handler for threads in these * states. */ suppress_handler = 1; break; /* * States which do not need any cleanup handling when signals * occur: */ case PS_RUNNING: /* * Remove the thread from the queue before changing its * priority: */ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) PTHREAD_PRIOQ_REMOVE(pthread); break; case PS_SUSPENDED: break; case PS_SPINBLOCK: /* Remove the thread from the workq and waitq: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); /* Make the thread runnable: */ PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGWAIT: /* The signal handler is not called for threads in SIGWAIT. */ suppress_handler = 1; /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* Change the state of the thread to run: */ PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else { /* * Mark the thread as interrupted only if the * restart flag is not set on the signal action: */ if (restart == 0) pthread->interrupted = 1; PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); } break; /* * States which cannot be interrupted but still require the * signal handler to run: */ case PS_JOIN: /* Only set the interrupted flag for PS_JOIN: */ pthread->interrupted = 1; /* FALLTHROUGH */ case PS_COND_WAIT: case PS_MUTEX_WAIT: /* * Remove the thread from the wait queue. It will * be added back to the wait queue once all signal * handlers have been invoked. */ PTHREAD_WAITQ_REMOVE(pthread); break; /* * States which are interruptible but may need to be removed * from queues before any signal handler is called. * * XXX - We may not need to handle this condition, but will * mark it as a potential problem. */ case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: if (restart == 0) pthread->interrupted = 1; /* * Remove the thread from the wait queue. Our * signal handler hook will remove this thread * from the fd or file queue before invoking * the actual handler. */ PTHREAD_WAITQ_REMOVE(pthread); break; /* * States which are interruptible: */ case PS_FDR_WAIT: case PS_FDW_WAIT: if (restart == 0) { /* * Flag the operation as interrupted and * set the state to running: */ pthread->interrupted = 1; PTHREAD_SET_STATE(pthread, PS_RUNNING); } PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); break; case PS_POLL_WAIT: case PS_SELECT_WAIT: case PS_SLEEP_WAIT: /* * Unmasked signals always cause poll, select, and sleep * to terminate early, regardless of SA_RESTART: */ pthread->interrupted = 1; /* Remove threads in poll and select from the workq: */ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGSUSPEND: PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; } if (suppress_handler == 0) { /* Setup a signal frame and save the current threads state: */ thread_sigframe_add(pthread, sig, has_args); /* * Signals are deferred until just before the threads * signal handler is invoked: */ pthread->sig_defer_count = 1; /* Make sure the thread is runnable: */ if (pthread->state != PS_RUNNING) PTHREAD_SET_STATE(pthread, PS_RUNNING); /* * The thread should be removed from all scheduling * queues at this point. Raise the priority and place * the thread in the run queue. */ pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY; if (pthread != curthread) PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } static void thread_sig_check_state(pthread_t pthread, int sig) { /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: case PS_RUNNING: case PS_SUSPENDED: case PS_SPINBLOCK: case PS_COND_WAIT: case PS_JOIN: case PS_MUTEX_WAIT: break; case PS_SIGWAIT: /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* * Remove the thread from the wait queue and * make it runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } break; case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_SIGSUSPEND: case PS_SLEEP_WAIT: /* * Remove the thread from the wait queue and make it * runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; /* * These states are additionally in the work queue: */ case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_FILE_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* * Remove the thread from the wait and work queues, and * make it runnable: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; } } /* * Send a signal to a specific thread (ala pthread_kill): */ void _thread_sig_send(pthread_t pthread, int sig) { struct pthread *curthread = _get_curthread(); /* Check for signals whose actions are SIG_DFL: */ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) { /* * Check to see if a temporary signal handler is * installed for sigwaiters: */ if (_thread_dfl_count[sig] == 0) /* * Deliver the signal to the process if a handler * is not installed: */ kill(getpid(), sig); /* * Assuming we're still running after the above kill(), * make any necessary state changes to the thread: */ thread_sig_check_state(pthread, sig); } /* * Check that the signal is not being ignored: */ else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) { if (pthread->state == PS_SIGWAIT && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else if (pthread == curthread) { /* Add the signal to the pending set: */ sigaddset(&pthread->sigpend, sig); if (!sigismember(&pthread->sigmask, sig)) { /* * Call the kernel scheduler which will safely * install a signal frame for this thread: */ _thread_kern_sched_sig(); } } else if (!sigismember(&pthread->sigmask, sig)) { /* Protect the scheduling queues: */ _thread_kern_sig_defer(); /* * Perform any state changes due to signal * arrival: */ thread_sig_add(pthread, sig, /* has args */ 0); /* Unprotect the scheduling queues: */ _thread_kern_sig_undefer(); } else { /* Increment the pending signal count. */ sigaddset(&pthread->sigpend,sig); } } } /* * User thread signal handler wrapper. * * thread - current running thread */ void _thread_sig_wrapper(void) { void (*sigfunc)(int, siginfo_t *, void *); struct pthread_signal_frame *psf; struct pthread *thread = _get_curthread(); /* Get the current frame and state: */ psf = thread->curframe; thread->curframe = NULL; PTHREAD_ASSERT(psf != NULL, "Invalid signal frame in signal handler"); /* Check the threads previous state: */ if (psf->saved_state.psd_state != PS_RUNNING) { /* * Do a little cleanup handling for those threads in * queues before calling the signal handler. Signals * for these threads are temporarily blocked until * after cleanup handling. */ switch (psf->saved_state.psd_state) { case PS_FDLR_WAIT: case PS_FDLW_WAIT: _fd_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; - case PS_FILE_WAIT: - _flockfile_backout(thread); - psf->saved_state.psd_state = PS_RUNNING; - break; - case PS_COND_WAIT: _cond_wait_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; case PS_JOIN: _join_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; case PS_MUTEX_WAIT: _mutex_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; default: break; } } /* Unblock the signal in case we don't return from the handler: */ _thread_sigq[psf->signo - 1].blocked = 0; /* * Lower the priority before calling the handler in case * it never returns (longjmps back): */ thread->active_priority &= ~PTHREAD_SIGNAL_PRIORITY; /* * Reenable interruptions without checking for the need to * context switch: */ thread->sig_defer_count = 0; /* * Check that a custom handler is installed and if the signal * is not blocked: */ sigfunc = _thread_sigact[psf->signo - 1].sa_sigaction; if (((__sighandler_t *)sigfunc != SIG_DFL) && ((__sighandler_t *)sigfunc != SIG_IGN)) { DBG_MSG("_thread_sig_wrapper: Calling signal handler for " "thread 0x%p\n", thread); /* * Dispatch the signal via the custom signal * handler: */ if (psf->sig_has_args == 0) (*(sigfunc))(psf->signo, NULL, NULL); else if ((_thread_sigact[psf->signo - 1].sa_flags & SA_SIGINFO) != 0) (*(sigfunc))(psf->signo, &psf->siginfo, &psf->uc); else (*(sigfunc))(psf->signo, (siginfo_t *)psf->siginfo.si_code, &psf->uc); } /* * Call the kernel scheduler to safely restore the frame and * schedule the next thread: */ _thread_kern_sched_frame(psf); } static void thread_sigframe_add(pthread_t thread, int sig, int has_args) { struct pthread_signal_frame *psf = NULL; unsigned long stackp = 0; /* Get the top of the threads stack: */ switch (thread->ctxtype) { case CTX_JB: case CTX_JB_NOSIG: stackp = GET_STACK_JB(thread->ctx.jb); break; case CTX_SJB: stackp = GET_STACK_SJB(thread->ctx.sigjb); break; case CTX_UC: stackp = GET_STACK_UC(&thread->ctx.uc); break; default: PANIC("Invalid thread context type"); break; } /* * Leave a little space on the stack and round down to the * nearest aligned word: */ stackp -= sizeof(double); stackp &= ~0x3UL; /* Allocate room on top of the stack for a new signal frame: */ stackp -= sizeof(struct pthread_signal_frame); psf = (struct pthread_signal_frame *) stackp; /* Save the current context in the signal frame: */ thread_sigframe_save(thread, psf); /* Set handler specific information: */ psf->sig_has_args = has_args; psf->signo = sig; if (has_args) { /* Copy the signal handler arguments to the signal frame: */ memcpy(&psf->uc, &_thread_sigq[psf->signo - 1].uc, sizeof(psf->uc)); memcpy(&psf->siginfo, &_thread_sigq[psf->signo - 1].siginfo, sizeof(psf->siginfo)); } /* Setup the signal mask: */ SIGSETOR(thread->sigmask, _thread_sigact[sig - 1].sa_mask); sigaddset(&thread->sigmask, sig); /* Set up the new frame: */ thread->curframe = psf; thread->ctxtype = CTX_JB_NOSIG; thread->longjmp_val = 1; thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE | PTHREAD_FLAGS_IN_SYNCQ; /* * Set up the context: */ stackp -= sizeof(double); _setjmp(thread->ctx.jb); SET_STACK_JB(thread->ctx.jb, stackp); SET_RETURN_ADDR_JB(thread->ctx.jb, _thread_sig_wrapper); } void _thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf) { thread->ctxtype = psf->ctxtype; memcpy(&thread->ctx.uc, &psf->ctx.uc, sizeof(thread->ctx.uc)); /* * Only restore the signal mask if it hasn't been changed * by the application during invocation of the signal handler: */ if (thread->sigmask_seqno == psf->saved_state.psd_sigmask_seqno) thread->sigmask = psf->saved_state.psd_sigmask; thread->curframe = psf->saved_state.psd_curframe; thread->wakeup_time = psf->saved_state.psd_wakeup_time; thread->data = psf->saved_state.psd_wait_data; thread->state = psf->saved_state.psd_state; thread->flags = psf->saved_state.psd_flags; thread->interrupted = psf->saved_state.psd_interrupted; thread->longjmp_val = psf->saved_state.psd_longjmp_val; thread->signo = psf->saved_state.psd_signo; thread->sig_defer_count = psf->saved_state.psd_sig_defer_count; } static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf) { psf->ctxtype = thread->ctxtype; memcpy(&psf->ctx.uc, &thread->ctx.uc, sizeof(thread->ctx.uc)); psf->saved_state.psd_sigmask = thread->sigmask; psf->saved_state.psd_curframe = thread->curframe; psf->saved_state.psd_wakeup_time = thread->wakeup_time; psf->saved_state.psd_wait_data = thread->data; psf->saved_state.psd_state = thread->state; psf->saved_state.psd_flags = thread->flags & (PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE); psf->saved_state.psd_interrupted = thread->interrupted; psf->saved_state.psd_longjmp_val = thread->longjmp_val; psf->saved_state.psd_sigmask_seqno = thread->sigmask_seqno; psf->saved_state.psd_signo = thread->signo; psf->saved_state.psd_sig_defer_count = thread->sig_defer_count; } Index: head/lib/libpthread/thread/thr_exit.c =================================================================== --- head/lib/libpthread/thread/thr_exit.c (revision 72373) +++ head/lib/libpthread/thread/thr_exit.c (revision 72374) @@ -1,238 +1,231 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include "pthread_private.h" #define FLAGS_IN_SCHEDQ \ (PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ) #pragma weak pthread_exit=_pthread_exit void _exit(int status) { int flags; int i; struct itimerval itimer; /* Disable the interval timer: */ itimer.it_interval.tv_sec = 0; itimer.it_interval.tv_usec = 0; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = 0; setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL); /* Close the pthread kernel pipe: */ __sys_close(_thread_kern_pipe[0]); __sys_close(_thread_kern_pipe[1]); /* * Enter a loop to set all file descriptors to blocking * if they were not created as non-blocking: */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file descriptor is in use: */ if (_thread_fd_table[i] != NULL && !(_thread_fd_table[i]->flags & O_NONBLOCK)) { /* Get the current flags: */ flags = __sys_fcntl(i, F_GETFL, NULL); /* Clear the nonblocking file descriptor flag: */ __sys_fcntl(i, F_SETFL, flags & ~O_NONBLOCK); } } /* Call the _exit syscall: */ __sys_exit(status); } void _thread_exit(char *fname, int lineno, char *string) { char s[256]; /* Prepare an error message string: */ - strcpy(s, "Fatal error '"); - strcat(s, string); - strcat(s, "' at line ? "); - strcat(s, "in file "); - strcat(s, fname); - strcat(s, " (errno = ?"); - strcat(s, ")\n"); + snprintf(s, sizeof(s), + "Fatal error '%s' at line %d in file %s (errno = %d)\n", + string, lineno, fname, errno); /* Write the string to the standard error file descriptor: */ __sys_write(2, s, strlen(s)); /* Force this process to exit: */ /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */ #if defined(_PTHREADS_INVARIANTS) abort(); #else __sys_exit(1); #endif } /* * Only called when a thread is cancelled. It may be more useful * to call it from pthread_exit() if other ways of asynchronous or * abnormal thread termination can be found. */ void _thread_exit_cleanup(void) { struct pthread *curthread = _get_curthread(); /* * POSIX states that cancellation/termination of a thread should * not release any visible resources (such as mutexes) and that * it is the applications responsibility. Resources that are * internal to the threads library, including file and fd locks, * are not visible to the application and need to be released. */ /* Unlock all owned fd locks: */ _thread_fd_unlock_owned(curthread); - - /* Unlock all owned file locks: */ - _funlock_owned(curthread); /* Unlock all private mutexes: */ _mutex_unlock_private(curthread); /* * This still isn't quite correct because we don't account * for held spinlocks (see libc/stdlib/malloc.c). */ } void _pthread_exit(void *status) { struct pthread *curthread = _get_curthread(); pthread_t pthread; /* Check if this thread is already in the process of exiting: */ if ((curthread->flags & PTHREAD_EXITING) != 0) { char msg[128]; snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",curthread); PANIC(msg); } /* Flag this thread as exiting: */ curthread->flags |= PTHREAD_EXITING; /* Save the return value: */ curthread->ret = status; while (curthread->cleanup != NULL) { pthread_cleanup_pop(1); } if (curthread->attr.cleanup_attr != NULL) { curthread->attr.cleanup_attr(curthread->attr.arg_attr); } /* Check if there is thread specific data: */ if (curthread->specific_data != NULL) { /* Run the thread-specific data destructors: */ _thread_cleanupspecific(); } /* Free thread-specific poll_data structure, if allocated: */ if (curthread->poll_data.fds != NULL) { free(curthread->poll_data.fds); curthread->poll_data.fds = NULL; } /* * Lock the garbage collector mutex to ensure that the garbage * collector is not using the dead thread list. */ if (pthread_mutex_lock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Add this thread to the list of dead threads. */ TAILQ_INSERT_HEAD(&_dead_list, curthread, dle); /* * Signal the garbage collector thread that there is something * to clean up. */ if (pthread_cond_signal(&_gc_cond) != 0) PANIC("Cannot signal gc cond"); /* * Avoid a race condition where a scheduling signal can occur * causing the garbage collector thread to run. If this happens, * the current thread can be cleaned out from under us. */ _thread_kern_sig_defer(); /* Unlock the garbage collector mutex: */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Check if there are any threads joined to this one: */ while ((pthread = TAILQ_FIRST(&(curthread->join_queue))) != NULL) { /* Remove the thread from the queue: */ TAILQ_REMOVE(&curthread->join_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ; /* * Wake the joined thread and let it * detach this thread: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* * Set the return value for the woken thread: */ if ((curthread->attr.flags & PTHREAD_DETACHED) != 0) pthread->error = ESRCH; else { pthread->ret = curthread->ret; pthread->error = 0; } } /* Remove this thread from the thread list: */ TAILQ_REMOVE(&_thread_list, curthread, tle); /* This thread will never be re-scheduled. */ _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__); /* This point should not be reached. */ PANIC("Dead thread has resumed"); } Index: head/lib/libpthread/thread/thr_private.h =================================================================== --- head/lib/libpthread/thread/thr_private.h (revision 72373) +++ head/lib/libpthread/thread/thr_private.h (revision 72374) @@ -1,1401 +1,1400 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Private thread definitions for the uthread kernel. * * $FreeBSD$ */ #ifndef _PTHREAD_PRIVATE_H #define _PTHREAD_PRIVATE_H /* * Evaluate the storage class specifier. */ #ifdef GLOBAL_PTHREAD_PRIVATE #define SCLASS #else #define SCLASS extern #endif /* * Include files. */ #include #include #include #include #include #include #include #include #include #include /* * Define machine dependent macros to get and set the stack pointer * from the supported contexts. Also define a macro to set the return * address in a jmp_buf context. * * XXX - These need to be moved into architecture dependent support files. */ #if defined(__i386__) #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2])) #define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp)) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk) #define FP_SAVE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("fnsave %0": :"m"(*fdata)); \ } while (0) #define FP_RESTORE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("frstor %0": :"m"(*fdata)); \ } while (0) #define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra) #elif defined(__alpha__) #include #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[R_SP + 4])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[R_SP + 4])) #define GET_STACK_UC(ucp) ((ucp)->uc_mcontext.mc_regs[R_SP]) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk) #define FP_SAVE_UC(ucp) #define FP_RESTORE_UC(ucp) #define SET_RETURN_ADDR_JB(jb, ra) do { \ (jb)[0]._jb[2] = (unsigned long)(ra) + 8UL; \ (jb)[0]._jb[R_RA + 4] = 0; \ (jb)[0]._jb[R_T12 + 4] = (long)(ra); \ } while (0) #else #error "Don't recognize this architecture!" #endif /* * Kernel fatal error handler macro. */ #define PANIC(string) _thread_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ __sys_write(1, buf, strlen(buf)); \ } while (0) #define stderr_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ __sys_write(2, buf, strlen(buf)); \ } while (0) /* * Priority queue manipulation macros (using pqe link): */ #define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd) #define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd) #define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd) #define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq) /* * Waiting queue manipulation macros (using pqe link): */ #define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd) #define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd) #if defined(_PTHREADS_INVARIANTS) #define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive() #define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive() #else #define PTHREAD_WAITQ_CLEARACTIVE() #define PTHREAD_WAITQ_SETACTIVE() #endif /* * Work queue manipulation macros (using qe link): */ #define PTHREAD_WORKQ_INSERT(thrd) do { \ TAILQ_INSERT_TAIL(&_workq,thrd,qe); \ (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \ } while (0) #define PTHREAD_WORKQ_REMOVE(thrd) do { \ TAILQ_REMOVE(&_workq,thrd,qe); \ (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \ } while (0) /* * State change macro without scheduling queue change: */ #define PTHREAD_SET_STATE(thrd, newstate) do { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } while (0) /* * State change macro with scheduling queue change - This must be * called with preemption deferred (see thread_kern_sched_[un]defer). */ #if defined(_PTHREADS_INVARIANTS) #include #define PTHREAD_ASSERT(cond, msg) do { \ if (!(cond)) \ PANIC(msg); \ } while (0) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \ PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \ "Illegal call from signal handler"); #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if (_thread_kern_new_state != 0) \ PANIC("Recursive PTHREAD_NEW_STATE"); \ _thread_kern_new_state = 1; \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ _thread_kern_new_state = 0; \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #else #define PTHREAD_ASSERT(cond, msg) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #endif /* * Define the signals to be used for scheduling. */ #if defined(_PTHREADS_COMPAT_SCHED) #define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL #define _SCHED_SIGNAL SIGVTALRM #else #define _ITIMER_SCHED_TIMER ITIMER_PROF #define _SCHED_SIGNAL SIGPROF #endif /* * Priority queues. * * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. */ typedef struct pq_list { TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ int pl_prio; /* the priority of this list */ int pl_queued; /* is this in the priority queue */ } pq_list_t; typedef struct pq_queue { TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ pq_list_t *pq_lists; /* array of all priority lists */ int pq_size; /* number of priority lists */ } pq_queue_t; /* * TailQ initialization values. */ #define TAILQ_INITIALIZER { NULL, NULL } /* * Mutex definitions. */ union pthread_mutex_data { void *m_ptr; int m_count; }; struct pthread_mutex { enum pthread_mutextype m_type; int m_protocol; TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; union pthread_mutex_data m_data; long m_flags; int m_refcount; /* * Used for priority inheritence and protection. * * m_prio - For priority inheritence, the highest active * priority (threads locking the mutex inherit * this priority). For priority protection, the * ceiling priority of this mutex. * m_saved_prio - mutex owners inherited priority before * taking the mutex, restored when the owner * unlocks the mutex. */ int m_prio; int m_saved_prio; /* * Link for list of all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; /* * Lock for accesses to this structure. */ spinlock_t lock; }; /* * Flags for mutexes. */ #define MUTEX_FLAGS_PRIVATE 0x01 #define MUTEX_FLAGS_INITED 0x02 #define MUTEX_FLAGS_BUSY 0x04 /* * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \ NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \ _SPINLOCK_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; long m_flags; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } /* * Condition variable definitions. */ enum pthread_cond_type { COND_TYPE_FAST, COND_TYPE_MAX }; struct pthread_cond { enum pthread_cond_type c_type; TAILQ_HEAD(cond_head, pthread) c_queue; pthread_mutex_t c_mutex; void *c_data; long c_flags; int c_seqno; /* * Lock for accesses to this structure. */ spinlock_t lock; }; struct pthread_cond_attr { enum pthread_cond_type c_type; long c_flags; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \ 0, 0, _SPINLOCK_INITIALIZER } /* * Semaphore definitions. */ struct sem { #define SEM_MAGIC ((u_int32_t) 0x09fa4012) u_int32_t magic; pthread_mutex_t lock; pthread_cond_t gtzero; u_int32_t count; u_int32_t nwaiters; }; /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *next; void (*routine) (); void *routine_arg; }; struct pthread_attr { int sched_policy; int sched_inherit; int sched_interval; int prio; int suspend; int flags; void *arg_attr; void (*cleanup_attr) (); void *stackaddr_attr; size_t stacksize_attr; }; /* * Thread creation state attributes. */ #define PTHREAD_CREATE_RUNNING 0 #define PTHREAD_CREATE_SUSPENDED 1 /* * Additional state for a thread suspended with pthread_suspend_np(). */ enum pthread_susp { SUSP_NO, /* Not suspended. */ SUSP_YES, /* Suspended. */ SUSP_NOWAIT, /* Suspended, was in a mutex or condition queue. */ SUSP_MUTEX_WAIT,/* Suspended, still in a mutex queue. */ SUSP_COND_WAIT /* Suspended, still in a condition queue. */ }; /* * Miscellaneous definitions. */ #define PTHREAD_STACK_DEFAULT 65536 /* * Size of red zone at the end of each stack. In actuality, this "red zone" is * merely an unmapped region, except in the case of the initial stack. Since * mmap() makes it possible to specify the maximum growth of a MAP_STACK region, * an unmapped gap between thread stacks achieves the same effect as explicitly * mapped red zones. */ #define PTHREAD_STACK_GUARD PAGE_SIZE /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define PTHREAD_STACK_INITIAL 0x100000 /* Size of the scheduler stack: */ #define SCHED_STACK_SIZE PAGE_SIZE /* * Define the different priority ranges. All applications have thread * priorities constrained within 0-31. The threads library raises the * priority when delivering signals in order to ensure that signal * delivery happens (from the POSIX spec) "as soon as possible". * In the future, the threads library will also be able to map specific * threads into real-time (cooperating) processes or kernel threads. * The RT and SIGNAL priorities will be used internally and added to * thread base priorities so that the scheduling queue can handle both * normal and RT priority threads with and without signal handling. * * The approach taken is that, within each class, signal delivery * always has priority over thread execution. */ #define PTHREAD_DEFAULT_PRIORITY 15 #define PTHREAD_MIN_PRIORITY 0 #define PTHREAD_MAX_PRIORITY 31 /* 0x1F */ #define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */ #define PTHREAD_RT_PRIORITY 64 /* 0x40 */ #define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY #define PTHREAD_LAST_PRIORITY \ (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY) #define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY) /* * Clock resolution in microseconds. */ #define CLOCK_RES_USEC 10000 /* * Time slice period in microseconds. */ #define TIMESLICE_USEC 20000 /* * Define a thread-safe macro to get the current time of day * which is updated at regular intervals by the scheduling signal * handler. */ #define GET_CURRENT_TOD(tv) \ do { \ tv.tv_sec = _sched_tod.tv_sec; \ tv.tv_usec = _sched_tod.tv_usec; \ } while (tv.tv_sec != _sched_tod.tv_sec) struct pthread_key { spinlock_t lock; volatile int allocated; volatile int count; void (*destructor) (); }; struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ int state; /* 0 = idle >0 = # of readers -1 = writer */ pthread_cond_t read_signal; pthread_cond_t write_signal; int blocked_writers; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_SIGTHREAD, PS_MUTEX_WAIT, PS_COND_WAIT, PS_FDLR_WAIT, PS_FDLW_WAIT, PS_FDR_WAIT, PS_FDW_WAIT, PS_FILE_WAIT, PS_POLL_WAIT, PS_SELECT_WAIT, PS_SLEEP_WAIT, PS_WAIT_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, PS_SPINBLOCK, PS_JOIN, PS_SUSPENDED, PS_DEAD, PS_DEADLOCK, PS_STATE_MAX }; /* * File descriptor locking definitions. */ #define FD_READ 0x1 #define FD_WRITE 0x2 #define FD_RDWR (FD_READ | FD_WRITE) /* * File descriptor table structure. */ struct fd_table_entry { /* * Lock for accesses to this file descriptor table * entry. This is passed to _spinlock() to provide atomic * access to this structure. It does *not* represent the * state of the lock on the file descriptor. */ spinlock_t lock; TAILQ_HEAD(, pthread) r_queue; /* Read queue. */ TAILQ_HEAD(, pthread) w_queue; /* Write queue. */ struct pthread *r_owner; /* Ptr to thread owning read lock. */ struct pthread *w_owner; /* Ptr to thread owning write lock. */ char *r_fname; /* Ptr to read lock source file name */ int r_lineno; /* Read lock source line number. */ char *w_fname; /* Ptr to write lock source file name */ int w_lineno; /* Write lock source line number. */ int r_lockcount; /* Count for FILE read locks. */ int w_lockcount; /* Count for FILE write locks. */ int flags; /* Flags used in open. */ }; struct pthread_poll_data { int nfds; struct pollfd *fds; }; union pthread_wait_data { pthread_mutex_t mutex; pthread_cond_t cond; const sigset_t *sigwait; /* Waiting on a signal in sigwait */ struct { short fd; /* Used when thread waiting on fd */ short branch; /* Line number, for debugging. */ char *fname; /* Source file name for debugging.*/ } fd; FILE *fp; struct pthread_poll_data *poll_data; spinlock_t *spinlock; struct pthread *thread; }; /* * Define a continuation routine that can be used to perform a * transfer of control: */ typedef void (*thread_continuation_t) (void *); struct pthread_signal_frame; struct pthread_state_data { struct pthread_signal_frame *psd_curframe; sigset_t psd_sigmask; struct timespec psd_wakeup_time; union pthread_wait_data psd_wait_data; enum pthread_state psd_state; int psd_flags; int psd_interrupted; int psd_longjmp_val; int psd_sigmask_seqno; int psd_signo; int psd_sig_defer_count; /* XXX - What about thread->timeout and/or thread->error? */ }; /* * Normally thread contexts are stored as jmp_bufs via _setjmp()/_longjmp(), * but they may also be sigjmp_buf and ucontext_t. When a thread is * interrupted by a signal, it's context is saved as a ucontext_t. An * application is also free to use [_]longjmp()/[_]siglongjmp() to jump * between contexts within the same thread. Future support will also * include setcontext()/getcontext(). * * Define an enumerated type that can identify the 4 different context * types. */ typedef enum { CTX_JB_NOSIG, /* context is jmp_buf without saved sigset */ CTX_JB, /* context is jmp_buf (with saved sigset) */ CTX_SJB, /* context is sigjmp_buf (with saved sigset) */ CTX_UC /* context is ucontext_t (with saved sigset) */ } thread_context_t; /* * There are 2 basic contexts that a frame may contain at any * one time: * * o ctx - The context that the thread should return to after normal * completion of the signal handler. * o sig_jb - The context just before the signal handler is invoked. * Attempts at abnormal returns from user supplied signal handlers * will return back to the signal context to perform any necessary * cleanup. */ struct pthread_signal_frame { /* * This stores the threads state before the signal. */ struct pthread_state_data saved_state; /* * Threads return context; ctxtype identifies the type of context. * For signal frame 0, these point to the context storage area * within the pthread structure. When handling signals (frame > 0), * these point to a context storage area that is allocated off the * threads stack. */ union { jmp_buf jb; sigjmp_buf sigjb; ucontext_t uc; } ctx; thread_context_t ctxtype; int longjmp_val; int signo; /* signal, arg 1 to sighandler */ int sig_has_args; /* use signal args if true */ ucontext_t uc; siginfo_t siginfo; }; /* * Thread structure. */ struct pthread { /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; char *name; u_int64_t uniqueid; /* for gdb */ /* * Lock for accesses to this thread structure. */ spinlock_t lock; /* Queue entry for list of all threads: */ TAILQ_ENTRY(pthread) tle; /* Queue entry for list of dead threads: */ TAILQ_ENTRY(pthread) dle; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; void *stack; struct pthread_attr attr; /* * Threads return context; ctxtype identifies the type of context. */ union { jmp_buf jb; sigjmp_buf sigjb; ucontext_t uc; } ctx; thread_context_t ctxtype; int longjmp_val; /* * Used for tracking delivery of signal handlers. */ struct pthread_signal_frame *curframe; /* * Cancelability flags - the lower 2 bits are used by cancel * definitions in pthread.h */ #define PTHREAD_AT_CANCEL_POINT 0x0004 #define PTHREAD_CANCELLING 0x0008 #define PTHREAD_CANCEL_NEEDED 0x0010 int cancelflags; enum pthread_susp suspended; thread_continuation_t continuation; /* * Current signal mask and pending signals. */ sigset_t sigmask; sigset_t sigpend; int sigmask_seqno; int check_pending; /* Thread state: */ enum pthread_state state; /* Scheduling clock when this thread was last made active. */ long last_active; /* Scheduling clock when this thread was last made inactive. */ long last_inactive; /* * Number of microseconds accumulated by this thread when * time slicing is active. */ long slice_usec; /* * Time to wake up thread. This is used for sleeping threads and * for any operation which may time out (such as select). */ struct timespec wakeup_time; /* TRUE if operation has timed out. */ int timeout; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* Join queue head and link for waiting threads: */ TAILQ_HEAD(join_head, pthread) join_queue; /* * The current thread can belong to only one scheduling queue at * a time (ready or waiting queue). It can also belong to: * * o A queue of threads waiting for a mutex * o A queue of threads waiting for a condition variable * o A queue of threads waiting for another thread to terminate * (the join queue above) * o A queue of threads waiting for a file descriptor lock * o A queue of threads needing work done by the kernel thread * (waiting for a spinlock or file I/O) * * It is possible for a thread to belong to more than one of the * above queues if it is handling a signal. A thread may only * enter a mutex, condition variable, or join queue when it is * not being called from a signal handler. If a thread is a * member of one of these queues when a signal handler is invoked, * it must remain in the queue. For this reason, the links for * these queues must not be (re)used for other queues. * * Use pqe for the scheduling queue link (both ready and waiting), * sqe for synchronization (mutex, condition variable, and join) * queue links, and qe for all other links. */ TAILQ_ENTRY(pthread) pqe; /* priority queue link */ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ TAILQ_ENTRY(pthread) qe; /* all other queues link */ /* Wait data. */ union pthread_wait_data data; /* * Allocated for converting select into poll. */ struct pthread_poll_data poll_data; /* * Set to TRUE if a blocking operation was * interrupted by a signal: */ int interrupted; /* Signal number when in state PS_SIGWAIT: */ int signo; /* * Set to non-zero when this thread has deferred signals. * We allow for recursive deferral. */ int sig_defer_count; /* * Set to TRUE if this thread should yield after undeferring * signals. */ int yield_on_sig_undefer; /* Miscellaneous flags; only set with signals deferred. */ int flags; #define PTHREAD_FLAGS_PRIVATE 0x0001 #define PTHREAD_EXITING 0x0002 #define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */ #define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */ #define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */ #define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */ #define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */ #define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/ #define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */ #define PTHREAD_FLAGS_IN_JOINQ 0x0200 /* in join queue using sqe link */ #define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */ #define PTHREAD_FLAGS_IN_SYNCQ \ (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ | PTHREAD_FLAGS_IN_JOINQ) /* * Base priority is the user setable and retrievable priority * of the thread. It is only affected by explicit calls to * set thread priority and upon thread creation via a thread * attribute or default priority. */ char base_priority; /* * Inherited priority is the priority a thread inherits by * taking a priority inheritence or protection mutex. It * is not affected by base priority changes. Inherited * priority defaults to and remains 0 until a mutex is taken * that is being waited on by any other thread whose priority * is non-zero. */ char inherited_priority; /* * Active priority is always the maximum of the threads base * priority and inherited priority. When there is a change * in either the base or inherited priority, the active * priority must be recalculated. */ char active_priority; /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; /* * Queue of currently owned mutexes. */ TAILQ_HEAD(, pthread_mutex) mutexq; void *ret; const void **specific_data; int specific_data_count; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; char *fname; /* Ptr to source file name */ int lineno; /* Source line number. */ }; /* Spare thread stack. */ struct stack { SLIST_ENTRY(stack) qe; /* Queue entry for this stack. */ }; /* * Global variables for the uthread kernel. */ /* Kernel thread structure used when there are no running threads: */ SCLASS struct pthread _thread_kern_thread; /* Ptr to the thread structure for the running thread: */ SCLASS struct pthread * volatile _thread_run #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* Ptr to the thread structure for the last user thread to run: */ SCLASS struct pthread * volatile _last_user_thread #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* * Ptr to the thread running in single-threaded mode or NULL if * running multi-threaded (default POSIX behaviour). */ SCLASS struct pthread * volatile _thread_single #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* List of all threads: */ SCLASS TAILQ_HEAD(, pthread) _thread_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_thread_list); #else ; #endif /* * Array of kernel pipe file descriptors that are used to ensure that * no signals are missed in calls to _select. */ SCLASS int _thread_kern_pipe[2] #ifdef GLOBAL_PTHREAD_PRIVATE = { -1, -1 }; #else ; #endif SCLASS int volatile _queue_signals #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _thread_kern_in_sched #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _sig_in_handler #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Time of day at last scheduling timer signal: */ SCLASS struct timeval volatile _sched_tod #ifdef GLOBAL_PTHREAD_PRIVATE = { 0, 0 }; #else ; #endif /* * Current scheduling timer ticks; used as resource usage. */ SCLASS unsigned int volatile _sched_ticks #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Dead threads: */ SCLASS TAILQ_HEAD(, pthread) _dead_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_dead_list); #else ; #endif /* Initial thread: */ SCLASS struct pthread *_thread_initial #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Default thread attributes: */ SCLASS struct pthread_attr pthread_attr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, PTHREAD_STACK_DEFAULT }; #else ; #endif /* Default mutex attributes: */ SCLASS struct pthread_mutex_attr pthread_mutexattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }; #else ; #endif /* Default condition variable attributes: */ SCLASS struct pthread_cond_attr pthread_condattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { COND_TYPE_FAST, 0 }; #else ; #endif /* * Standard I/O file descriptors need special flag treatment since * setting one to non-blocking does all on *BSD. Sigh. This array * is used to store the initial flag settings. */ SCLASS int _pthread_stdio_flags[3]; /* File table information: */ SCLASS struct fd_table_entry **_thread_fd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Table for polling file descriptors: */ SCLASS struct pollfd *_thread_pfd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif SCLASS const int dtablecount #ifdef GLOBAL_PTHREAD_PRIVATE = 4096/sizeof(struct fd_table_entry); #else ; #endif SCLASS int _thread_dtablesize /* Descriptor table size. */ #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _clock_res_usec /* Clock resolution in usec. */ #ifdef GLOBAL_PTHREAD_PRIVATE = CLOCK_RES_USEC; #else ; #endif /* Garbage collector mutex and condition variable. */ SCLASS pthread_mutex_t _gc_mutex #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; SCLASS pthread_cond_t _gc_cond #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Array of signal actions for this process. */ SCLASS struct sigaction _thread_sigact[NSIG]; /* * Array of counts of dummy handlers for SIG_DFL signals. This is used to * assure that there is always a dummy signal handler installed while there is a * thread sigwait()ing on the corresponding signal. */ SCLASS int _thread_dfl_count[NSIG]; /* * Pending signals and mask for this process: */ SCLASS sigset_t _process_sigpending; SCLASS sigset_t _process_sigmask #ifdef GLOBAL_PTHREAD_PRIVATE = { {0, 0, 0, 0} } #endif ; /* * Scheduling queues: */ SCLASS pq_queue_t _readyq; SCLASS TAILQ_HEAD(, pthread) _waitingq; /* * Work queue: */ SCLASS TAILQ_HEAD(, pthread) _workq; /* Tracks the number of threads blocked while waiting for a spinlock. */ SCLASS volatile int _spinblock_count #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Used to maintain pending and active signals: */ struct sigstatus { int pending; /* Is this a pending signal? */ int blocked; /* * A handler is currently active for * this signal; ignore subsequent * signals until the handler is done. */ int signo; /* arg 1 to signal handler */ siginfo_t siginfo; /* arg 2 to signal handler */ ucontext_t uc; /* arg 3 to signal handler */ }; SCLASS struct sigstatus _thread_sigq[NSIG]; /* Indicates that the signal queue needs to be checked. */ SCLASS volatile int _sigq_check_reqd #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* The signal stack. */ SCLASS struct sigaltstack _thread_sigstack; /* Thread switch hook. */ SCLASS pthread_switch_routine_t _sched_switch_hook #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Spare stack queue. Stacks of default size are cached in order to reduce * thread creation time. Spare stacks are used in LIFO order to increase cache * locality. */ SCLASS SLIST_HEAD(, stack) _stackq; /* * Base address of next unallocated default-size {stack, red zone}. Stacks are * allocated contiguously, starting below the bottom of the main stack. When a * new stack is created, a red zone is created (actually, the red zone is simply * left unmapped) below the bottom of the stack, such that the stack will not be * able to grow all the way to the top of the next stack. This isn't * fool-proof. It is possible for a stack to grow by a large amount, such that * it grows into the next stack, and as long as the memory within the red zone * is never accessed, nothing will prevent one thread stack from trouncing all * over the next. */ SCLASS void * _next_stack #ifdef GLOBAL_PTHREAD_PRIVATE /* main stack top - main stack size - stack size - (red zone + main stack red zone) */ = (void *) USRSTACK - PTHREAD_STACK_INITIAL - PTHREAD_STACK_DEFAULT - (2 * PTHREAD_STACK_GUARD) #endif ; /* * Declare the kernel scheduler jump buffer and stack: */ SCLASS jmp_buf _thread_kern_sched_jb; SCLASS void * _thread_kern_sched_stack #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* Used for _PTHREADS_INVARIANTS checking. */ SCLASS int _thread_kern_new_state #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Undefine the storage class specifier: */ #undef SCLASS #ifdef _LOCK_DEBUG #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \ _ts, __FILE__, __LINE__) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \ __FILE__, __LINE__) #else #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type) #endif /* * Function prototype definitions. */ __BEGIN_DECLS char *__ttyname_basic(int); char *__ttyname_r_basic(int, char *, size_t); char *ttyname_r(int, char *, size_t); void _cond_wait_backout(pthread_t); void _fd_lock_backout(pthread_t); int _find_dead_thread(pthread_t); int _find_thread(pthread_t); -void _flockfile_backout(pthread_t); -void _funlock_owned(pthread_t); struct pthread *_get_curthread(void); void _set_curthread(struct pthread *); void _join_backout(pthread_t); int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t); int _thread_fd_lock(int, int, struct timespec *); int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); void _mutex_lock_backout(pthread_t); void _mutex_notify_priochange(pthread_t); int _mutex_reinit(pthread_mutex_t *); void _mutex_unlock_private(pthread_t); int _cond_reinit(pthread_cond_t *); int _pq_alloc(struct pq_queue *, int, int); int _pq_init(struct pq_queue *); void _pq_remove(struct pq_queue *pq, struct pthread *); void _pq_insert_head(struct pq_queue *pq, struct pthread *); void _pq_insert_tail(struct pq_queue *pq, struct pthread *); struct pthread *_pq_first(struct pq_queue *pq); void *_pthread_getspecific(pthread_key_t); int _pthread_key_create(pthread_key_t *, void (*) (void *)); int _pthread_key_delete(pthread_key_t); int _pthread_mutex_destroy(pthread_mutex_t *); int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); int _pthread_mutex_lock(pthread_mutex_t *); int _pthread_mutex_trylock(pthread_mutex_t *); int _pthread_mutex_unlock(pthread_mutex_t *); int _pthread_mutexattr_init(pthread_mutexattr_t *); int _pthread_mutexattr_destroy(pthread_mutexattr_t *); int _pthread_mutexattr_settype(pthread_mutexattr_t *, int); int _pthread_once(pthread_once_t *, void (*) (void)); +pthread_t _pthread_self(void); int _pthread_setspecific(pthread_key_t, const void *); void _waitq_insert(pthread_t pthread); void _waitq_remove(pthread_t pthread); #if defined(_PTHREADS_INVARIANTS) void _waitq_setactive(void); void _waitq_clearactive(void); #endif void _thread_exit(char *, int, char *); void _thread_exit_cleanup(void); void _thread_fd_unlock(int, int); void _thread_fd_unlock_debug(int, int, char *, int); void _thread_fd_unlock_owned(pthread_t); void *_thread_cleanup(pthread_t); void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_init(void); void _thread_kern_sched(ucontext_t *); void _thread_kern_scheduler(void); void _thread_kern_sched_frame(struct pthread_signal_frame *psf); void _thread_kern_sched_sig(void); void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno); void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno); void _thread_kern_set_timeout(const struct timespec *); void _thread_kern_sig_defer(void); void _thread_kern_sig_undefer(void); void _thread_sig_handler(int, siginfo_t *, ucontext_t *); void _thread_sig_check_pending(pthread_t pthread); void _thread_sig_handle_pending(void); void _thread_sig_send(pthread_t pthread, int sig); void _thread_sig_wrapper(void); void _thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf); void _thread_start(void); void _thread_seterrno(pthread_t, int); int _thread_fd_table_init(int fd); pthread_addr_t _thread_gc(pthread_addr_t); void _thread_enter_cancellation_point(void); void _thread_leave_cancellation_point(void); void _thread_cancellation_point(void); /* #include */ #ifdef _SYS_AIO_H_ int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); #endif /* #include */ #ifdef _SIGNAL_H_ int __sys_sigaction(int, const struct sigaction *, struct sigaction *); int __sys_sigpending(sigset_t *); int __sys_sigprocmask(int, const sigset_t *, sigset_t *); int __sys_sigsuspend(const sigset_t *); int __sys_sigreturn(ucontext_t *); int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); #endif /* #include */ #ifdef _SYS_STAT_H_ int __sys_fchmod(int, mode_t); int __sys_fstat(int, struct stat *); int __sys_fchflags(int, u_long); #endif /* #include */ #ifdef _SYS_MOUNT_H_ int __sys_fstatfs(int, struct statfs *); #endif /* #inclde */ #ifdef _SYS_EVENT_H_ int __sys_kevent(int, const struct kevent *, int, struct kevent *, int, const struct timespec *); #endif /* #include */ #ifdef _SYS_SOCKET_H_ int __sys_accept(int, struct sockaddr *, int *); int __sys_bind(int, const struct sockaddr *, int); int __sys_connect(int, const struct sockaddr *, int); int __sys_getpeername(int, struct sockaddr *, int *); int __sys_getsockname(int, struct sockaddr *, int *); int __sys_getsockopt(int, int, int, void *, int *); int __sys_listen(int, int); int __sys_setsockopt(int, int, int, const void *, int); int __sys_shutdown(int, int); int __sys_socket(int, int, int); int __sys_socketpair(int, int, int, int *); ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, int *); ssize_t __sys_recvmsg(int, struct msghdr *, int); ssize_t __sys_send(int, const void *, size_t, int); int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int); ssize_t __sys_sendmsg(int, const struct msghdr *, int); ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, int); #endif /* #include */ #ifdef _UNISTD_H_ int __sys_close(int); int __sys_dup(int); int __sys_dup2(int, int); int __sys_execve(const char *, char * const *, char * const *); int __sys_fchown(int, uid_t, gid_t); int __sys_fork(void); int __sys_fsync(int); int __sys_pipe(int *); int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); long __sys_fpathconf(int, int); ssize_t __sys_read(int, void *, size_t); ssize_t __sys_write(int, const void *, size_t); void __sys_exit(int); #endif /* #include */ #ifdef _SYS_FCNTL_H_ int __sys_fcntl(int, int, ...); int __sys_flock(int, int); int __sys_open(const char *, int, ...); #endif /* #include */ #ifdef _SYS_IOCTL_H_ int __sys_ioctl(int, unsigned long, ...); #endif /* #include */ #ifdef _DIRENT_H_ int __sys_getdirentries(int, char *, int, long *); #endif /* #include */ #ifdef _SYS_UIO_H_ ssize_t __sys_readv(int, const struct iovec *, int); ssize_t __sys_writev(int, const struct iovec *, int); #endif /* #include */ #ifdef WNOHANG pid_t __sys_wait4(pid_t, int *, int, struct rusage *); #endif /* #include */ #ifdef _SYS_POLL_H_ int __sys_poll(struct pollfd *, unsigned, int); #endif /* #include */ #ifdef _SYS_MMAN_H_ int __sys_msync(void *, size_t, int); #endif /* #include */ #ifdef _SETJMP_H_ extern void __siglongjmp(sigjmp_buf, int) __dead2; extern void __longjmp(jmp_buf, int) __dead2; extern void ___longjmp(jmp_buf, int) __dead2; #endif __END_DECLS #endif /* !_PTHREAD_PRIVATE_H */ Index: head/lib/libpthread/thread/thr_sig.c =================================================================== --- head/lib/libpthread/thread/thr_sig.c (revision 72373) +++ head/lib/libpthread/thread/thr_sig.c (revision 72374) @@ -1,1121 +1,1116 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include "pthread_private.h" /* Prototypes: */ static void thread_sig_add(pthread_t pthread, int sig, int has_args); static void thread_sig_check_state(pthread_t pthread, int sig); static pthread_t thread_sig_find(int sig); static void thread_sig_handle_special(int sig); static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp); static void thread_sigframe_add(pthread_t thread, int sig, int has_args); static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf); /* #define DEBUG_SIGNAL */ #ifdef DEBUG_SIGNAL #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif #if defined(_PTHREADS_INVARIANTS) #define SIG_SET_ACTIVE() _sig_in_handler = 1 #define SIG_SET_INACTIVE() _sig_in_handler = 0 #else #define SIG_SET_ACTIVE() #define SIG_SET_INACTIVE() #endif void _thread_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) { struct pthread *curthread = _get_curthread(); pthread_t pthread, pthread_h; void *stackp; int in_sched = 0; char c; if (ucp == NULL) PANIC("Thread signal handler received null context"); DBG_MSG("Got signal %d, current thread %p\n", sig, curthread); if (_thread_kern_in_sched != 0) in_sched = 1; else { stackp = (void *)GET_STACK_UC(ucp); if ((stackp >= _thread_kern_sched_stack) && (stackp <= _thread_kern_sched_stack + SCHED_STACK_SIZE)) in_sched = 1; } /* Check if an interval timer signal: */ if (sig == _SCHED_SIGNAL) { /* Update the scheduling clock: */ gettimeofday((struct timeval *)&_sched_tod, NULL); _sched_ticks++; if (in_sched != 0) { /* * The scheduler is already running; ignore this * signal. */ } /* * Check if the scheduler interrupt has come when * the currently running thread has deferred thread * signals. */ else if (curthread->sig_defer_count > 0) curthread->yield_on_sig_undefer = 1; else { /* * Save the context of the currently running thread: */ thread_sig_savecontext(curthread, ucp); /* * Schedule the next thread. This function is not * expected to return because it will do a longjmp * instead. */ _thread_kern_sched(ucp); /* * This point should not be reached, so abort the * process: */ PANIC("Returned to signal function from scheduler"); } } /* * Check if the kernel has been interrupted while the scheduler * is accessing the scheduling queues or if there is a currently * running thread that has deferred signals. */ else if ((in_sched != 0) || (curthread->sig_defer_count > 0)) { /* Cast the signal number to a character variable: */ c = sig; /* * Write the signal number to the kernel pipe so that it will * be ready to read when this signal handler returns. */ if (_queue_signals != 0) { __sys_write(_thread_kern_pipe[1], &c, 1); DBG_MSG("Got signal %d, queueing to kernel pipe\n", sig); } if (_thread_sigq[sig - 1].blocked == 0) { DBG_MSG("Got signal %d, adding to _thread_sigq\n", sig); /* * Do not block this signal; it will be blocked * when the pending signals are run down. */ /* _thread_sigq[sig - 1].blocked = 1; */ /* * Queue the signal, saving siginfo and sigcontext * (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); /* Indicate that there are queued signals: */ _thread_sigq[sig - 1].pending = 1; _sigq_check_reqd = 1; } /* These signals need special handling: */ else if (sig == SIGCHLD || sig == SIGTSTP || sig == SIGTTIN || sig == SIGTTOU) { _thread_sigq[sig - 1].pending = 1; _thread_sigq[sig - 1].signo = sig; _sigq_check_reqd = 1; } else DBG_MSG("Got signal %d, ignored.\n", sig); } /* * The signal handlers should have been installed so that they * cannot be interrupted by other signals. */ else if (_thread_sigq[sig - 1].blocked == 0) { /* * The signal is not blocked; handle the signal. * * Ignore subsequent occurrences of this signal * until the current signal is handled: */ _thread_sigq[sig - 1].blocked = 1; /* This signal will be handled; clear the pending flag: */ _thread_sigq[sig - 1].pending = 0; /* * Save siginfo and sigcontext (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); SIG_SET_ACTIVE(); /* Handle special signals: */ thread_sig_handle_special(sig); pthread_h = NULL; if ((pthread = thread_sig_find(sig)) != NULL) { DBG_MSG("Got signal %d, adding frame to thread %p\n", sig, pthread); /* * A thread was found that can handle the signal. * Save the context of the currently running thread * so that we can switch to another thread without * losing track of where the current thread left off. * This also applies if the current thread is the * thread to be signaled. */ thread_sig_savecontext(curthread, ucp); /* Setup the target thread to receive the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); /* Take a peek at the next ready to run thread: */ pthread_h = PTHREAD_PRIOQ_FIRST(); DBG_MSG("Finished adding frame, head of prio list %p\n", pthread_h); } else DBG_MSG("No thread to handle signal %d\n", sig); SIG_SET_INACTIVE(); /* * Switch to a different context if the currently running * thread takes a signal, or if another thread takes a * signal and the currently running thread is not in a * signal handler. */ if ((pthread == curthread) || ((pthread_h != NULL) && (pthread_h->active_priority > curthread->active_priority))) { /* Enter the kernel scheduler: */ _thread_kern_sched(ucp); } } else { SIG_SET_ACTIVE(); thread_sig_handle_special(sig); SIG_SET_INACTIVE(); } } static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp) { memcpy(&pthread->ctx.uc, ucp, sizeof(*ucp)); /* XXX - Save FP registers too? */ FP_SAVE_UC(&pthread->ctx.uc); /* Mark the context saved as a ucontext: */ pthread->ctxtype = CTX_UC; } /* * Find a thread that can handle the signal. */ pthread_t thread_sig_find(int sig) { struct pthread *curthread = _get_curthread(); int handler_installed; pthread_t pthread, pthread_next; pthread_t suspended_thread, signaled_thread; DBG_MSG("Looking for thread to handle signal %d\n", sig); /* Check if the signal requires a dump of thread information: */ if (sig == SIGINFO) { /* Dump thread information to file: */ _thread_dump_info(); /* Unblock this signal to allow further dumps: */ _thread_sigq[sig - 1].blocked = 0; } /* Check if an interval timer signal: */ else if (sig == _SCHED_SIGNAL) { /* * This shouldn't ever occur (should this panic?). */ } else { /* * Enter a loop to look for threads that have the signal * unmasked. POSIX specifies that a thread in a sigwait * will get the signal over any other threads. Second * preference will be threads in in a sigsuspend. Third * preference will be the current thread. If none of the * above, then the signal is delivered to the first thread * that is found. Note that if a custom handler is not * installed, the signal only affects threads in sigwait. */ suspended_thread = NULL; if ((curthread != &_thread_kern_thread) && !sigismember(&curthread->sigmask, sig)) signaled_thread = curthread; else signaled_thread = NULL; if ((_thread_sigact[sig - 1].sa_handler == SIG_IGN) || (_thread_sigact[sig - 1].sa_handler == SIG_DFL)) handler_installed = 0; else handler_installed = 1; for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly destroying * the link entry. */ pthread_next = TAILQ_NEXT(pthread, pqe); if ((pthread->state == PS_SIGWAIT) && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* * A signal handler is not invoked for threads * in sigwait. Clear the blocked and pending * flags. */ _thread_sigq[sig - 1].blocked = 0; _thread_sigq[sig - 1].pending = 0; /* Return the signal number: */ pthread->signo = sig; /* * POSIX doesn't doesn't specify which thread * will get the signal if there are multiple * waiters, so we give it to the first thread * we find. * * Do not attempt to deliver this signal * to other threads and do not add the signal * to the process pending set. */ return (NULL); } else if ((handler_installed != 0) && !sigismember(&pthread->sigmask, sig)) { if (pthread->state == PS_SIGSUSPEND) { if (suspended_thread == NULL) suspended_thread = pthread; } else if (signaled_thread == NULL) signaled_thread = pthread; } } /* * Only perform wakeups and signal delivery if there is a * custom handler installed: */ if (handler_installed == 0) { /* * There is no handler installed. Unblock the * signal so that if a handler _is_ installed, any * subsequent signals can be handled. */ _thread_sigq[sig - 1].blocked = 0; } else { /* * If we didn't find a thread in the waiting queue, * check the all threads queue: */ if (suspended_thread == NULL && signaled_thread == NULL) { /* * Enter a loop to look for other threads * capable of receiving the signal: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { if (!sigismember(&pthread->sigmask, sig)) { signaled_thread = pthread; break; } } } if (suspended_thread == NULL && signaled_thread == NULL) /* * Add it to the set of signals pending * on the process: */ sigaddset(&_process_sigpending, sig); else { /* * We only deliver the signal to one thread; * give preference to the suspended thread: */ if (suspended_thread != NULL) pthread = suspended_thread; else pthread = signaled_thread; return (pthread); } } } /* Returns nothing. */ return (NULL); } void _thread_sig_check_pending(pthread_t pthread) { sigset_t sigset; int i; /* * Check if there are pending signals for the running * thread or process that aren't blocked: */ sigset = pthread->sigpend; SIGSETOR(sigset, _process_sigpending); SIGSETNAND(sigset, pthread->sigmask); if (SIGNOTEMPTY(sigset)) { for (i = 1; i < NSIG; i++) { if (sigismember(&sigset, i) != 0) { if (sigismember(&pthread->sigpend, i) != 0) thread_sig_add(pthread, i, /*has_args*/ 0); else { thread_sig_add(pthread, i, /*has_args*/ 1); sigdelset(&_process_sigpending, i); } } } } } /* * This can only be called from the kernel scheduler. It assumes that * all thread contexts are saved and that a signal frame can safely be * added to any user thread. */ void _thread_sig_handle_pending(void) { pthread_t pthread; int i, sig; PTHREAD_ASSERT(_thread_kern_in_sched != 0, "_thread_sig_handle_pending called from outside kernel schedule"); /* * Check the array of pending signals: */ for (i = 0; i < NSIG; i++) { if (_thread_sigq[i].pending != 0) { /* This signal is no longer pending. */ _thread_sigq[i].pending = 0; sig = _thread_sigq[i].signo; /* Some signals need special handling: */ thread_sig_handle_special(sig); if (_thread_sigq[i].blocked == 0) { /* * Block future signals until this one * is handled: */ _thread_sigq[i].blocked = 1; if ((pthread = thread_sig_find(sig)) != NULL) { /* * Setup the target thread to receive * the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); } } } } } static void thread_sig_handle_special(int sig) { pthread_t pthread, pthread_next; int i; switch (sig) { case SIGCHLD: /* * Go through the file list and set all files * to non-blocking again in case the child * set some of them to block. Sigh. */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file is used: */ if (_thread_fd_table[i] != NULL) { /* * Set the file descriptor to non-blocking: */ __sys_fcntl(i, F_SETFL, _thread_fd_table[i]->flags | O_NONBLOCK); } } /* * Enter a loop to wake up all threads waiting * for a process to complete: */ for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly * destroying the link entry: */ pthread_next = TAILQ_NEXT(pthread, pqe); /* * If this thread is waiting for a child * process to complete, wake it up: */ if (pthread->state == PS_WAIT_WAIT) { /* Make the thread runnable: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } } break; /* * POSIX says that pending SIGCONT signals are * discarded when one of these signals occurs. */ case SIGTSTP: case SIGTTIN: case SIGTTOU: /* * Enter a loop to discard pending SIGCONT * signals: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { sigdelset(&pthread->sigpend, SIGCONT); } break; default: break; } } /* * Perform thread specific actions in response to a signal. * This function is only called if there is a handler installed * for the signal, and if the target thread has the signal * unmasked. */ static void thread_sig_add(pthread_t pthread, int sig, int has_args) { struct pthread *curthread = _get_curthread(); int restart; int suppress_handler = 0; restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: /* * You can't call a signal handler for threads in these * states. */ suppress_handler = 1; break; /* * States which do not need any cleanup handling when signals * occur: */ case PS_RUNNING: /* * Remove the thread from the queue before changing its * priority: */ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) PTHREAD_PRIOQ_REMOVE(pthread); break; case PS_SUSPENDED: break; case PS_SPINBLOCK: /* Remove the thread from the workq and waitq: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); /* Make the thread runnable: */ PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGWAIT: /* The signal handler is not called for threads in SIGWAIT. */ suppress_handler = 1; /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* Change the state of the thread to run: */ PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else { /* * Mark the thread as interrupted only if the * restart flag is not set on the signal action: */ if (restart == 0) pthread->interrupted = 1; PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); } break; /* * States which cannot be interrupted but still require the * signal handler to run: */ case PS_JOIN: /* Only set the interrupted flag for PS_JOIN: */ pthread->interrupted = 1; /* FALLTHROUGH */ case PS_COND_WAIT: case PS_MUTEX_WAIT: /* * Remove the thread from the wait queue. It will * be added back to the wait queue once all signal * handlers have been invoked. */ PTHREAD_WAITQ_REMOVE(pthread); break; /* * States which are interruptible but may need to be removed * from queues before any signal handler is called. * * XXX - We may not need to handle this condition, but will * mark it as a potential problem. */ case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: if (restart == 0) pthread->interrupted = 1; /* * Remove the thread from the wait queue. Our * signal handler hook will remove this thread * from the fd or file queue before invoking * the actual handler. */ PTHREAD_WAITQ_REMOVE(pthread); break; /* * States which are interruptible: */ case PS_FDR_WAIT: case PS_FDW_WAIT: if (restart == 0) { /* * Flag the operation as interrupted and * set the state to running: */ pthread->interrupted = 1; PTHREAD_SET_STATE(pthread, PS_RUNNING); } PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); break; case PS_POLL_WAIT: case PS_SELECT_WAIT: case PS_SLEEP_WAIT: /* * Unmasked signals always cause poll, select, and sleep * to terminate early, regardless of SA_RESTART: */ pthread->interrupted = 1; /* Remove threads in poll and select from the workq: */ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGSUSPEND: PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; } if (suppress_handler == 0) { /* Setup a signal frame and save the current threads state: */ thread_sigframe_add(pthread, sig, has_args); /* * Signals are deferred until just before the threads * signal handler is invoked: */ pthread->sig_defer_count = 1; /* Make sure the thread is runnable: */ if (pthread->state != PS_RUNNING) PTHREAD_SET_STATE(pthread, PS_RUNNING); /* * The thread should be removed from all scheduling * queues at this point. Raise the priority and place * the thread in the run queue. */ pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY; if (pthread != curthread) PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } static void thread_sig_check_state(pthread_t pthread, int sig) { /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: case PS_RUNNING: case PS_SUSPENDED: case PS_SPINBLOCK: case PS_COND_WAIT: case PS_JOIN: case PS_MUTEX_WAIT: break; case PS_SIGWAIT: /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* * Remove the thread from the wait queue and * make it runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } break; case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_SIGSUSPEND: case PS_SLEEP_WAIT: /* * Remove the thread from the wait queue and make it * runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; /* * These states are additionally in the work queue: */ case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_FILE_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* * Remove the thread from the wait and work queues, and * make it runnable: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; } } /* * Send a signal to a specific thread (ala pthread_kill): */ void _thread_sig_send(pthread_t pthread, int sig) { struct pthread *curthread = _get_curthread(); /* Check for signals whose actions are SIG_DFL: */ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) { /* * Check to see if a temporary signal handler is * installed for sigwaiters: */ if (_thread_dfl_count[sig] == 0) /* * Deliver the signal to the process if a handler * is not installed: */ kill(getpid(), sig); /* * Assuming we're still running after the above kill(), * make any necessary state changes to the thread: */ thread_sig_check_state(pthread, sig); } /* * Check that the signal is not being ignored: */ else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) { if (pthread->state == PS_SIGWAIT && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else if (pthread == curthread) { /* Add the signal to the pending set: */ sigaddset(&pthread->sigpend, sig); if (!sigismember(&pthread->sigmask, sig)) { /* * Call the kernel scheduler which will safely * install a signal frame for this thread: */ _thread_kern_sched_sig(); } } else if (!sigismember(&pthread->sigmask, sig)) { /* Protect the scheduling queues: */ _thread_kern_sig_defer(); /* * Perform any state changes due to signal * arrival: */ thread_sig_add(pthread, sig, /* has args */ 0); /* Unprotect the scheduling queues: */ _thread_kern_sig_undefer(); } else { /* Increment the pending signal count. */ sigaddset(&pthread->sigpend,sig); } } } /* * User thread signal handler wrapper. * * thread - current running thread */ void _thread_sig_wrapper(void) { void (*sigfunc)(int, siginfo_t *, void *); struct pthread_signal_frame *psf; struct pthread *thread = _get_curthread(); /* Get the current frame and state: */ psf = thread->curframe; thread->curframe = NULL; PTHREAD_ASSERT(psf != NULL, "Invalid signal frame in signal handler"); /* Check the threads previous state: */ if (psf->saved_state.psd_state != PS_RUNNING) { /* * Do a little cleanup handling for those threads in * queues before calling the signal handler. Signals * for these threads are temporarily blocked until * after cleanup handling. */ switch (psf->saved_state.psd_state) { case PS_FDLR_WAIT: case PS_FDLW_WAIT: _fd_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; - case PS_FILE_WAIT: - _flockfile_backout(thread); - psf->saved_state.psd_state = PS_RUNNING; - break; - case PS_COND_WAIT: _cond_wait_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; case PS_JOIN: _join_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; case PS_MUTEX_WAIT: _mutex_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; default: break; } } /* Unblock the signal in case we don't return from the handler: */ _thread_sigq[psf->signo - 1].blocked = 0; /* * Lower the priority before calling the handler in case * it never returns (longjmps back): */ thread->active_priority &= ~PTHREAD_SIGNAL_PRIORITY; /* * Reenable interruptions without checking for the need to * context switch: */ thread->sig_defer_count = 0; /* * Check that a custom handler is installed and if the signal * is not blocked: */ sigfunc = _thread_sigact[psf->signo - 1].sa_sigaction; if (((__sighandler_t *)sigfunc != SIG_DFL) && ((__sighandler_t *)sigfunc != SIG_IGN)) { DBG_MSG("_thread_sig_wrapper: Calling signal handler for " "thread 0x%p\n", thread); /* * Dispatch the signal via the custom signal * handler: */ if (psf->sig_has_args == 0) (*(sigfunc))(psf->signo, NULL, NULL); else if ((_thread_sigact[psf->signo - 1].sa_flags & SA_SIGINFO) != 0) (*(sigfunc))(psf->signo, &psf->siginfo, &psf->uc); else (*(sigfunc))(psf->signo, (siginfo_t *)psf->siginfo.si_code, &psf->uc); } /* * Call the kernel scheduler to safely restore the frame and * schedule the next thread: */ _thread_kern_sched_frame(psf); } static void thread_sigframe_add(pthread_t thread, int sig, int has_args) { struct pthread_signal_frame *psf = NULL; unsigned long stackp = 0; /* Get the top of the threads stack: */ switch (thread->ctxtype) { case CTX_JB: case CTX_JB_NOSIG: stackp = GET_STACK_JB(thread->ctx.jb); break; case CTX_SJB: stackp = GET_STACK_SJB(thread->ctx.sigjb); break; case CTX_UC: stackp = GET_STACK_UC(&thread->ctx.uc); break; default: PANIC("Invalid thread context type"); break; } /* * Leave a little space on the stack and round down to the * nearest aligned word: */ stackp -= sizeof(double); stackp &= ~0x3UL; /* Allocate room on top of the stack for a new signal frame: */ stackp -= sizeof(struct pthread_signal_frame); psf = (struct pthread_signal_frame *) stackp; /* Save the current context in the signal frame: */ thread_sigframe_save(thread, psf); /* Set handler specific information: */ psf->sig_has_args = has_args; psf->signo = sig; if (has_args) { /* Copy the signal handler arguments to the signal frame: */ memcpy(&psf->uc, &_thread_sigq[psf->signo - 1].uc, sizeof(psf->uc)); memcpy(&psf->siginfo, &_thread_sigq[psf->signo - 1].siginfo, sizeof(psf->siginfo)); } /* Setup the signal mask: */ SIGSETOR(thread->sigmask, _thread_sigact[sig - 1].sa_mask); sigaddset(&thread->sigmask, sig); /* Set up the new frame: */ thread->curframe = psf; thread->ctxtype = CTX_JB_NOSIG; thread->longjmp_val = 1; thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE | PTHREAD_FLAGS_IN_SYNCQ; /* * Set up the context: */ stackp -= sizeof(double); _setjmp(thread->ctx.jb); SET_STACK_JB(thread->ctx.jb, stackp); SET_RETURN_ADDR_JB(thread->ctx.jb, _thread_sig_wrapper); } void _thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf) { thread->ctxtype = psf->ctxtype; memcpy(&thread->ctx.uc, &psf->ctx.uc, sizeof(thread->ctx.uc)); /* * Only restore the signal mask if it hasn't been changed * by the application during invocation of the signal handler: */ if (thread->sigmask_seqno == psf->saved_state.psd_sigmask_seqno) thread->sigmask = psf->saved_state.psd_sigmask; thread->curframe = psf->saved_state.psd_curframe; thread->wakeup_time = psf->saved_state.psd_wakeup_time; thread->data = psf->saved_state.psd_wait_data; thread->state = psf->saved_state.psd_state; thread->flags = psf->saved_state.psd_flags; thread->interrupted = psf->saved_state.psd_interrupted; thread->longjmp_val = psf->saved_state.psd_longjmp_val; thread->signo = psf->saved_state.psd_signo; thread->sig_defer_count = psf->saved_state.psd_sig_defer_count; } static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf) { psf->ctxtype = thread->ctxtype; memcpy(&psf->ctx.uc, &thread->ctx.uc, sizeof(thread->ctx.uc)); psf->saved_state.psd_sigmask = thread->sigmask; psf->saved_state.psd_curframe = thread->curframe; psf->saved_state.psd_wakeup_time = thread->wakeup_time; psf->saved_state.psd_wait_data = thread->data; psf->saved_state.psd_state = thread->state; psf->saved_state.psd_flags = thread->flags & (PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE); psf->saved_state.psd_interrupted = thread->interrupted; psf->saved_state.psd_longjmp_val = thread->longjmp_val; psf->saved_state.psd_sigmask_seqno = thread->sigmask_seqno; psf->saved_state.psd_signo = thread->signo; psf->saved_state.psd_sig_defer_count = thread->sig_defer_count; }