Index: head/lib/libc_r/uthread/pthread_private.h =================================================================== --- head/lib/libc_r/uthread/pthread_private.h (revision 68515) +++ head/lib/libc_r/uthread/pthread_private.h (revision 68516) @@ -1,1450 +1,1457 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Private thread definitions for the uthread kernel. * * $FreeBSD$ */ #ifndef _PTHREAD_PRIVATE_H #define _PTHREAD_PRIVATE_H /* * Evaluate the storage class specifier. */ #ifdef GLOBAL_PTHREAD_PRIVATE #define SCLASS #else #define SCLASS extern #endif /* * Include files. */ #include #include #include #include #include #include #include #include #include #include /* * Define machine dependent macros to get and set the stack pointer * from the supported contexts. Also define a macro to set the return * address in a jmp_buf context. * * XXX - These need to be moved into architecture dependent support files. */ #if defined(__i386__) #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2])) #define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp)) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk) #define FP_SAVE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("fnsave %0": :"m"(*fdata)); \ } while (0) #define FP_RESTORE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("frstor %0": :"m"(*fdata)); \ } while (0) #define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra) #elif defined(__alpha__) #include #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[R_SP + 4])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[R_SP + 4])) #define GET_STACK_UC(ucp) ((ucp)->uc_mcontext.mc_regs[R_SP]) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk) #define FP_SAVE_UC(ucp) #define FP_RESTORE_UC(ucp) #define SET_RETURN_ADDR_JB(jb, ra) do { \ (jb)[0]._jb[2] = (long)(ra); \ (jb)[0]._jb[R_RA + 4] = 0; \ (jb)[0]._jb[R_T12 + 4] = (long)(ra); \ } while (0) #else #error "Don't recognize this architecture!" #endif /* * Kernel fatal error handler macro. */ #define PANIC(string) _thread_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ _thread_sys_write(1, buf, strlen(buf)); \ } while (0) #define stderr_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ _thread_sys_write(2, buf, strlen(buf)); \ } while (0) /* * Priority queue manipulation macros (using pqe link): */ #define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd) #define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd) #define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd) #define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq) /* * Waiting queue manipulation macros (using pqe link): */ #define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd) #define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd) #if defined(_PTHREADS_INVARIANTS) #define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive() #define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive() #else #define PTHREAD_WAITQ_CLEARACTIVE() #define PTHREAD_WAITQ_SETACTIVE() #endif /* * Work queue manipulation macros (using qe link): */ #define PTHREAD_WORKQ_INSERT(thrd) do { \ TAILQ_INSERT_TAIL(&_workq,thrd,qe); \ (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \ } while (0) #define PTHREAD_WORKQ_REMOVE(thrd) do { \ TAILQ_REMOVE(&_workq,thrd,qe); \ (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \ } while (0) /* * State change macro without scheduling queue change: */ #define PTHREAD_SET_STATE(thrd, newstate) do { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } while (0) /* * State change macro with scheduling queue change - This must be * called with preemption deferred (see thread_kern_sched_[un]defer). */ #if defined(_PTHREADS_INVARIANTS) #include #define PTHREAD_ASSERT(cond, msg) do { \ if (!(cond)) \ PANIC(msg); \ } while (0) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \ PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \ "Illegal call from signal handler"); #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if (_thread_kern_new_state != 0) \ PANIC("Recursive PTHREAD_NEW_STATE"); \ _thread_kern_new_state = 1; \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ _thread_kern_new_state = 0; \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #else #define PTHREAD_ASSERT(cond, msg) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #endif /* * Define the signals to be used for scheduling. */ #if defined(_PTHREADS_COMPAT_SCHED) #define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL #define _SCHED_SIGNAL SIGVTALRM #else #define _ITIMER_SCHED_TIMER ITIMER_PROF #define _SCHED_SIGNAL SIGPROF #endif /* * Priority queues. * * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. */ typedef struct pq_list { TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ int pl_prio; /* the priority of this list */ int pl_queued; /* is this in the priority queue */ } pq_list_t; typedef struct pq_queue { TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ pq_list_t *pq_lists; /* array of all priority lists */ int pq_size; /* number of priority lists */ } pq_queue_t; /* * TailQ initialization values. */ #define TAILQ_INITIALIZER { NULL, NULL } /* * Mutex definitions. */ union pthread_mutex_data { void *m_ptr; int m_count; }; struct pthread_mutex { enum pthread_mutextype m_type; int m_protocol; TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; union pthread_mutex_data m_data; long m_flags; int m_refcount; /* * Used for priority inheritence and protection. * * m_prio - For priority inheritence, the highest active * priority (threads locking the mutex inherit * this priority). For priority protection, the * ceiling priority of this mutex. * m_saved_prio - mutex owners inherited priority before * taking the mutex, restored when the owner * unlocks the mutex. */ int m_prio; int m_saved_prio; /* * Link for list of all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; /* * Lock for accesses to this structure. */ spinlock_t lock; }; /* * Flags for mutexes. */ #define MUTEX_FLAGS_PRIVATE 0x01 #define MUTEX_FLAGS_INITED 0x02 #define MUTEX_FLAGS_BUSY 0x04 /* * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \ NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \ _SPINLOCK_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; long m_flags; }; /* * Condition variable definitions. */ enum pthread_cond_type { COND_TYPE_FAST, COND_TYPE_MAX }; struct pthread_cond { enum pthread_cond_type c_type; TAILQ_HEAD(cond_head, pthread) c_queue; pthread_mutex_t c_mutex; void *c_data; long c_flags; + int c_seqno; /* * Lock for accesses to this structure. */ spinlock_t lock; }; struct pthread_cond_attr { enum pthread_cond_type c_type; long c_flags; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \ - 0, _SPINLOCK_INITIALIZER } + 0, 0, _SPINLOCK_INITIALIZER } /* * Semaphore definitions. */ struct sem { #define SEM_MAGIC ((u_int32_t) 0x09fa4012) u_int32_t magic; pthread_mutex_t lock; pthread_cond_t gtzero; u_int32_t count; u_int32_t nwaiters; }; /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *next; void (*routine) (); void *routine_arg; }; struct pthread_attr { int sched_policy; int sched_inherit; int sched_interval; int prio; int suspend; int flags; void *arg_attr; void (*cleanup_attr) (); void *stackaddr_attr; size_t stacksize_attr; }; /* * Thread creation state attributes. */ #define PTHREAD_CREATE_RUNNING 0 #define PTHREAD_CREATE_SUSPENDED 1 /* * Additional state for a thread suspended with pthread_suspend_np(). */ enum pthread_susp { SUSP_NO, /* Not suspended. */ SUSP_YES, /* Suspended. */ SUSP_NOWAIT, /* Suspended, was in a mutex or condition queue. */ SUSP_MUTEX_WAIT,/* Suspended, still in a mutex queue. */ SUSP_COND_WAIT /* Suspended, still in a condition queue. */ }; /* * Miscellaneous definitions. */ #define PTHREAD_STACK_DEFAULT 65536 /* * Size of red zone at the end of each stack. In actuality, this "red zone" is * merely an unmapped region, except in the case of the initial stack. Since * mmap() makes it possible to specify the maximum growth of a MAP_STACK region, * an unmapped gap between thread stacks achieves the same effect as explicitly * mapped red zones. */ #define PTHREAD_STACK_GUARD PAGE_SIZE /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define PTHREAD_STACK_INITIAL 0x100000 +/* Size of the scheduler stack: */ +#define SCHED_STACK_SIZE PAGE_SIZE + /* * Define the different priority ranges. All applications have thread * priorities constrained within 0-31. The threads library raises the * priority when delivering signals in order to ensure that signal * delivery happens (from the POSIX spec) "as soon as possible". * In the future, the threads library will also be able to map specific * threads into real-time (cooperating) processes or kernel threads. * The RT and SIGNAL priorities will be used internally and added to * thread base priorities so that the scheduling queue can handle both * normal and RT priority threads with and without signal handling. * * The approach taken is that, within each class, signal delivery * always has priority over thread execution. */ #define PTHREAD_DEFAULT_PRIORITY 15 #define PTHREAD_MIN_PRIORITY 0 #define PTHREAD_MAX_PRIORITY 31 /* 0x1F */ #define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */ #define PTHREAD_RT_PRIORITY 64 /* 0x40 */ #define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY #define PTHREAD_LAST_PRIORITY \ (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY) #define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY) /* * Clock resolution in microseconds. */ #define CLOCK_RES_USEC 10000 /* * Time slice period in microseconds. */ #define TIMESLICE_USEC 20000 /* * Define a thread-safe macro to get the current time of day * which is updated at regular intervals by the scheduling signal * handler. */ #define GET_CURRENT_TOD(tv) \ do { \ tv.tv_sec = _sched_tod.tv_sec; \ tv.tv_usec = _sched_tod.tv_usec; \ } while (tv.tv_sec != _sched_tod.tv_sec) struct pthread_key { spinlock_t lock; volatile int allocated; volatile int count; void (*destructor) (); }; struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ int state; /* 0 = idle >0 = # of readers -1 = writer */ pthread_cond_t read_signal; pthread_cond_t write_signal; int blocked_writers; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_SIGTHREAD, PS_MUTEX_WAIT, PS_COND_WAIT, PS_FDLR_WAIT, PS_FDLW_WAIT, PS_FDR_WAIT, PS_FDW_WAIT, PS_FILE_WAIT, PS_POLL_WAIT, PS_SELECT_WAIT, PS_SLEEP_WAIT, PS_WAIT_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, PS_SPINBLOCK, PS_JOIN, PS_SUSPENDED, PS_DEAD, PS_DEADLOCK, PS_STATE_MAX }; /* * File descriptor locking definitions. */ #define FD_READ 0x1 #define FD_WRITE 0x2 #define FD_RDWR (FD_READ | FD_WRITE) /* * File descriptor table structure. */ struct fd_table_entry { /* * Lock for accesses to this file descriptor table * entry. This is passed to _spinlock() to provide atomic * access to this structure. It does *not* represent the * state of the lock on the file descriptor. */ spinlock_t lock; TAILQ_HEAD(, pthread) r_queue; /* Read queue. */ TAILQ_HEAD(, pthread) w_queue; /* Write queue. */ struct pthread *r_owner; /* Ptr to thread owning read lock. */ struct pthread *w_owner; /* Ptr to thread owning write lock. */ char *r_fname; /* Ptr to read lock source file name */ int r_lineno; /* Read lock source line number. */ char *w_fname; /* Ptr to write lock source file name */ int w_lineno; /* Write lock source line number. */ int r_lockcount; /* Count for FILE read locks. */ int w_lockcount; /* Count for FILE write locks. */ int flags; /* Flags used in open. */ }; struct pthread_poll_data { int nfds; struct pollfd *fds; }; union pthread_wait_data { pthread_mutex_t mutex; pthread_cond_t cond; const sigset_t *sigwait; /* Waiting on a signal in sigwait */ struct { short fd; /* Used when thread waiting on fd */ short branch; /* Line number, for debugging. */ char *fname; /* Source file name for debugging.*/ } fd; FILE *fp; struct pthread_poll_data *poll_data; spinlock_t *spinlock; struct pthread *thread; }; /* * Define a continuation routine that can be used to perform a * transfer of control: */ typedef void (*thread_continuation_t) (void *); +struct pthread_signal_frame; + struct pthread_state_data { - int psd_interrupted; + struct pthread_signal_frame *psd_curframe; sigset_t psd_sigmask; - enum pthread_state psd_state; - int psd_flags; struct timespec psd_wakeup_time; union pthread_wait_data psd_wait_data; + enum pthread_state psd_state; + int psd_flags; + int psd_interrupted; + int psd_longjmp_val; + int psd_sigmask_seqno; + int psd_signo; + int psd_sig_defer_count; /* XXX - What about thread->timeout and/or thread->error? */ }; /* * Normally thread contexts are stored as jmp_bufs via _setjmp()/_longjmp(), * but they may also be sigjmp_buf and ucontext_t. When a thread is * interrupted by a signal, it's context is saved as a ucontext_t. An * application is also free to use [_]longjmp()/[_]siglongjmp() to jump * between contexts within the same thread. Future support will also * include setcontext()/getcontext(). * * Define an enumerated type that can identify the 4 different context * types. */ typedef enum { CTX_JB_NOSIG, /* context is jmp_buf without saved sigset */ CTX_JB, /* context is jmp_buf (with saved sigset) */ CTX_SJB, /* context is sigjmp_buf (with saved sigset) */ CTX_UC /* context is ucontext_t (with saved sigset) */ } thread_context_t; /* * There are 2 basic contexts that a frame may contain at any * one time: * * o ctx - The context that the thread should return to after normal * completion of the signal handler. * o sig_jb - The context just before the signal handler is invoked. * Attempts at abnormal returns from user supplied signal handlers * will return back to the signal context to perform any necessary * cleanup. */ struct pthread_signal_frame { /* * This stores the threads state before the signal. */ struct pthread_state_data saved_state; - /* Beginning (bottom) of threads stack frame for this signal. */ - unsigned long stackp; - /* * Threads return context; ctxtype identifies the type of context. * For signal frame 0, these point to the context storage area * within the pthread structure. When handling signals (frame > 0), * these point to a context storage area that is allocated off the * threads stack. */ union { jmp_buf jb; sigjmp_buf sigjb; ucontext_t uc; } ctx; thread_context_t ctxtype; int longjmp_val; - - /* Threads "jump out of signal handler" destination frame. */ - int dst_frame; - - /* - * Used to return back to the signal handling frame in case - * the application tries to change contexts from the handler. - */ - jmp_buf *sig_jb; - int signo; /* signal, arg 1 to sighandler */ int sig_has_args; /* use signal args if true */ + ucontext_t uc; + siginfo_t siginfo; }; /* * Thread structure. */ struct pthread { /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; char *name; u_int64_t uniqueid; /* for gdb */ /* * Lock for accesses to this thread structure. */ spinlock_t lock; /* Queue entry for list of all threads: */ TAILQ_ENTRY(pthread) tle; /* Queue entry for list of dead threads: */ TAILQ_ENTRY(pthread) dle; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; void *stack; struct pthread_attr attr; /* - * Used for tracking delivery of nested signal handlers. - * Signal frame 0 is used for normal context (when no - * signal handlers are active for the thread). Frame - * 1 is used as the context for the first signal, and - * frames 2 .. NSIG-1 are used when additional signals - * arrive interrupting already active signal handlers. + * Threads return context; ctxtype identifies the type of context. */ - struct pthread_signal_frame *sigframes[NSIG]; - struct pthread_signal_frame sigframe0; + union { + jmp_buf jb; + sigjmp_buf sigjb; + ucontext_t uc; + } ctx; + thread_context_t ctxtype; + int longjmp_val; + + /* + * Used for tracking delivery of signal handlers. + */ struct pthread_signal_frame *curframe; - int sigframe_count; - int sigframe_done; /* * Cancelability flags - the lower 2 bits are used by cancel * definitions in pthread.h */ #define PTHREAD_AT_CANCEL_POINT 0x0004 #define PTHREAD_CANCELLING 0x0008 #define PTHREAD_CANCEL_NEEDED 0x0010 int cancelflags; enum pthread_susp suspended; thread_continuation_t continuation; /* * Current signal mask and pending signals. */ sigset_t sigmask; sigset_t sigpend; + int sigmask_seqno; int check_pending; /* Thread state: */ enum pthread_state state; /* Scheduling clock when this thread was last made active. */ long last_active; /* Scheduling clock when this thread was last made inactive. */ long last_inactive; /* * Number of microseconds accumulated by this thread when * time slicing is active. */ long slice_usec; /* * Time to wake up thread. This is used for sleeping threads and * for any operation which may time out (such as select). */ struct timespec wakeup_time; /* TRUE if operation has timed out. */ int timeout; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* Join queue head and link for waiting threads: */ TAILQ_HEAD(join_head, pthread) join_queue; /* * The current thread can belong to only one scheduling queue at * a time (ready or waiting queue). It can also belong to: * * o A queue of threads waiting for a mutex * o A queue of threads waiting for a condition variable * o A queue of threads waiting for another thread to terminate * (the join queue above) * o A queue of threads waiting for a file descriptor lock * o A queue of threads needing work done by the kernel thread * (waiting for a spinlock or file I/O) * * It is possible for a thread to belong to more than one of the * above queues if it is handling a signal. A thread may only * enter a mutex, condition variable, or join queue when it is * not being called from a signal handler. If a thread is a * member of one of these queues when a signal handler is invoked, * it must remain in the queue. For this reason, the links for * these queues must not be (re)used for other queues. * * Use pqe for the scheduling queue link (both ready and waiting), * sqe for synchronization (mutex, condition variable, and join) * queue links, and qe for all other links. */ TAILQ_ENTRY(pthread) pqe; /* priority queue link */ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ TAILQ_ENTRY(pthread) qe; /* all other queues link */ /* Wait data. */ union pthread_wait_data data; /* * Allocated for converting select into poll. */ struct pthread_poll_data poll_data; /* * Set to TRUE if a blocking operation was * interrupted by a signal: */ int interrupted; /* Signal number when in state PS_SIGWAIT: */ int signo; /* * Set to non-zero when this thread has deferred signals. * We allow for recursive deferral. */ int sig_defer_count; /* * Set to TRUE if this thread should yield after undeferring * signals. */ int yield_on_sig_undefer; /* Miscellaneous flags; only set with signals deferred. */ int flags; #define PTHREAD_FLAGS_PRIVATE 0x0001 #define PTHREAD_EXITING 0x0002 #define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */ #define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */ #define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */ #define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */ #define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */ #define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/ #define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */ #define PTHREAD_FLAGS_IN_JOINQ 0x0200 /* in join queue using sqe link */ #define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */ #define PTHREAD_FLAGS_IN_SYNCQ \ (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ | PTHREAD_FLAGS_IN_JOINQ) /* * Base priority is the user setable and retrievable priority * of the thread. It is only affected by explicit calls to * set thread priority and upon thread creation via a thread * attribute or default priority. */ char base_priority; /* * Inherited priority is the priority a thread inherits by * taking a priority inheritence or protection mutex. It * is not affected by base priority changes. Inherited * priority defaults to and remains 0 until a mutex is taken * that is being waited on by any other thread whose priority * is non-zero. */ char inherited_priority; /* * Active priority is always the maximum of the threads base * priority and inherited priority. When there is a change * in either the base or inherited priority, the active * priority must be recalculated. */ char active_priority; /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; /* * Queue of currently owned mutexes. */ TAILQ_HEAD(, pthread_mutex) mutexq; void *ret; const void **specific_data; int specific_data_count; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; char *fname; /* Ptr to source file name */ int lineno; /* Source line number. */ }; /* Spare thread stack. */ struct stack { SLIST_ENTRY(stack) qe; /* Queue entry for this stack. */ }; /* * Global variables for the uthread kernel. */ /* Kernel thread structure used when there are no running threads: */ SCLASS struct pthread _thread_kern_thread; /* Ptr to the thread structure for the running thread: */ SCLASS struct pthread * volatile _thread_run #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* Ptr to the thread structure for the last user thread to run: */ SCLASS struct pthread * volatile _last_user_thread #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* * Ptr to the thread running in single-threaded mode or NULL if * running multi-threaded (default POSIX behaviour). */ SCLASS struct pthread * volatile _thread_single #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* List of all threads: */ SCLASS TAILQ_HEAD(, pthread) _thread_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_thread_list); #else ; #endif /* * Array of kernel pipe file descriptors that are used to ensure that * no signals are missed in calls to _select. */ SCLASS int _thread_kern_pipe[2] #ifdef GLOBAL_PTHREAD_PRIVATE = { -1, -1 }; #else ; #endif SCLASS int volatile _queue_signals #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _thread_kern_in_sched #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _sig_in_handler #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Time of day at last scheduling timer signal: */ SCLASS struct timeval volatile _sched_tod #ifdef GLOBAL_PTHREAD_PRIVATE = { 0, 0 }; #else ; #endif /* * Current scheduling timer ticks; used as resource usage. */ SCLASS unsigned int volatile _sched_ticks #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Dead threads: */ SCLASS TAILQ_HEAD(, pthread) _dead_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_dead_list); #else ; #endif /* Initial thread: */ SCLASS struct pthread *_thread_initial #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Default thread attributes: */ SCLASS struct pthread_attr pthread_attr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, PTHREAD_STACK_DEFAULT }; #else ; #endif /* Default mutex attributes: */ SCLASS struct pthread_mutex_attr pthread_mutexattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }; #else ; #endif /* Default condition variable attributes: */ SCLASS struct pthread_cond_attr pthread_condattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { COND_TYPE_FAST, 0 }; #else ; #endif /* * Standard I/O file descriptors need special flag treatment since * setting one to non-blocking does all on *BSD. Sigh. This array * is used to store the initial flag settings. */ SCLASS int _pthread_stdio_flags[3]; /* File table information: */ SCLASS struct fd_table_entry **_thread_fd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Table for polling file descriptors: */ SCLASS struct pollfd *_thread_pfd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif SCLASS const int dtablecount #ifdef GLOBAL_PTHREAD_PRIVATE = 4096/sizeof(struct fd_table_entry); #else ; #endif SCLASS int _thread_dtablesize /* Descriptor table size. */ #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _clock_res_usec /* Clock resolution in usec. */ #ifdef GLOBAL_PTHREAD_PRIVATE = CLOCK_RES_USEC; #else ; #endif /* Garbage collector mutex and condition variable. */ SCLASS pthread_mutex_t _gc_mutex #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; SCLASS pthread_cond_t _gc_cond #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Array of signal actions for this process. */ SCLASS struct sigaction _thread_sigact[NSIG]; /* * Array of counts of dummy handlers for SIG_DFL signals. This is used to * assure that there is always a dummy signal handler installed while there is a * thread sigwait()ing on the corresponding signal. */ SCLASS int _thread_dfl_count[NSIG]; /* * Pending signals and mask for this process: */ SCLASS sigset_t _process_sigpending; -SCLASS sigset_t _process_sigmask; +SCLASS sigset_t _process_sigmask +#ifdef GLOBAL_PTHREAD_PRIVATE += { {0, 0, 0, 0} } +#endif +; /* * Scheduling queues: */ SCLASS pq_queue_t _readyq; SCLASS TAILQ_HEAD(, pthread) _waitingq; /* * Work queue: */ SCLASS TAILQ_HEAD(, pthread) _workq; /* Tracks the number of threads blocked while waiting for a spinlock. */ SCLASS volatile int _spinblock_count #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Used to maintain pending and active signals: */ struct sigstatus { int pending; /* Is this a pending signal? */ int blocked; /* * A handler is currently active for * this signal; ignore subsequent * signals until the handler is done. */ int signo; /* arg 1 to signal handler */ siginfo_t siginfo; /* arg 2 to signal handler */ ucontext_t uc; /* arg 3 to signal handler */ }; SCLASS struct sigstatus _thread_sigq[NSIG]; /* Indicates that the signal queue needs to be checked. */ SCLASS volatile int _sigq_check_reqd #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Thread switch hook. */ SCLASS pthread_switch_routine_t _sched_switch_hook #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Spare stack queue. Stacks of default size are cached in order to reduce * thread creation time. Spare stacks are used in LIFO order to increase cache * locality. */ SCLASS SLIST_HEAD(, stack) _stackq; /* * Base address of next unallocated default-size {stack, red zone}. Stacks are * allocated contiguously, starting below the bottom of the main stack. When a * new stack is created, a red zone is created (actually, the red zone is simply * left unmapped) below the bottom of the stack, such that the stack will not be * able to grow all the way to the top of the next stack. This isn't * fool-proof. It is possible for a stack to grow by a large amount, such that * it grows into the next stack, and as long as the memory within the red zone * is never accessed, nothing will prevent one thread stack from trouncing all * over the next. */ SCLASS void * _next_stack #ifdef GLOBAL_PTHREAD_PRIVATE /* main stack top - main stack size - stack size - (red zone + main stack red zone) */ = (void *) USRSTACK - PTHREAD_STACK_INITIAL - PTHREAD_STACK_DEFAULT - (2 * PTHREAD_STACK_GUARD) #endif ; /* * Declare the kernel scheduler jump buffer and stack: */ SCLASS jmp_buf _thread_kern_sched_jb; SCLASS void * _thread_kern_sched_stack #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* Used for _PTHREADS_INVARIANTS checking. */ SCLASS int _thread_kern_new_state #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Undefine the storage class specifier: */ #undef SCLASS #ifdef _LOCK_DEBUG #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \ _ts, __FILE__, __LINE__) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \ __FILE__, __LINE__) #else #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type) #endif /* * Function prototype definitions. */ __BEGIN_DECLS char *__ttyname_basic(int); char *__ttyname_r_basic(int, char *, size_t); char *ttyname_r(int, char *, size_t); void _cond_wait_backout(pthread_t); void _fd_lock_backout(pthread_t); int _find_dead_thread(pthread_t); int _find_thread(pthread_t); void _flockfile_backout(pthread_t); void _funlock_owned(pthread_t); void _join_backout(pthread_t); int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t); int _thread_fd_lock(int, int, struct timespec *); int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); void _mutex_lock_backout(pthread_t); void _mutex_notify_priochange(pthread_t); int _mutex_reinit(pthread_mutex_t *); void _mutex_unlock_private(pthread_t); int _cond_reinit(pthread_cond_t *); int _pq_alloc(struct pq_queue *, int, int); int _pq_init(struct pq_queue *); void _pq_remove(struct pq_queue *pq, struct pthread *); void _pq_insert_head(struct pq_queue *pq, struct pthread *); void _pq_insert_tail(struct pq_queue *pq, struct pthread *); struct pthread *_pq_first(struct pq_queue *pq); void _waitq_insert(pthread_t pthread); void _waitq_remove(pthread_t pthread); #if defined(_PTHREADS_INVARIANTS) void _waitq_setactive(void); void _waitq_clearactive(void); #endif void _thread_exit(char *, int, char *); void _thread_exit_cleanup(void); -void _thread_exit_finish(void); void _thread_fd_unlock(int, int); void _thread_fd_unlock_debug(int, int, char *, int); void _thread_fd_unlock_owned(pthread_t); void *_thread_cleanup(pthread_t); void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_init(void); void _thread_kern_sched(ucontext_t *); void _thread_kern_scheduler(void); -void _thread_kern_sched_frame(int frame); +void _thread_kern_sched_frame(struct pthread_signal_frame *psf); void _thread_kern_sched_sig(void); void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno); void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno); void _thread_kern_set_timeout(const struct timespec *); void _thread_kern_sig_defer(void); void _thread_kern_sig_undefer(void); void _thread_sig_handler(int, siginfo_t *, ucontext_t *); void _thread_sig_check_pending(pthread_t pthread); void _thread_sig_handle_pending(void); void _thread_sig_send(pthread_t pthread, int sig); void _thread_sig_wrapper(void); -int _thread_sigframe_find(pthread_t pthread, void *stackp); +void _thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf); void _thread_start(void); void _thread_seterrno(pthread_t, int); int _thread_fd_table_init(int fd); pthread_addr_t _thread_gc(pthread_addr_t); void _thread_enter_cancellation_point(void); void _thread_leave_cancellation_point(void); void _thread_cancellation_point(void); /* #include */ int _thread_sys_sigaction(int, const struct sigaction *, struct sigaction *); int _thread_sys_sigpending(sigset_t *); int _thread_sys_sigprocmask(int, const sigset_t *, sigset_t *); int _thread_sys_sigsuspend(const sigset_t *); int _thread_sys_siginterrupt(int, int); int _thread_sys_sigpause(int); int _thread_sys_sigreturn(ucontext_t *); +int _thread_sys_sigaltstack(const struct sigaltstack *, struct sigstack *); int _thread_sys_sigstack(const struct sigstack *, struct sigstack *); int _thread_sys_sigvec(int, struct sigvec *, struct sigvec *); void _thread_sys_psignal(unsigned int, const char *); void (*_thread_sys_signal(int, void (*)(int)))(int); /* #include */ #ifdef _SYS_STAT_H_ int _thread_sys_fchmod(int, mode_t); int _thread_sys_fstat(int, struct stat *); int _thread_sys_fchflags(int, u_long); #endif /* #include */ #ifdef _SYS_MOUNT_H_ int _thread_sys_fstatfs(int, struct statfs *); #endif int _thread_sys_pipe(int *); /* #include */ #ifdef _SYS_SOCKET_H_ int _thread_sys_accept(int, struct sockaddr *, int *); int _thread_sys_bind(int, const struct sockaddr *, int); int _thread_sys_connect(int, const struct sockaddr *, int); int _thread_sys_getpeername(int, struct sockaddr *, int *); int _thread_sys_getsockname(int, struct sockaddr *, int *); int _thread_sys_getsockopt(int, int, int, void *, int *); int _thread_sys_listen(int, int); int _thread_sys_setsockopt(int, int, int, const void *, int); int _thread_sys_shutdown(int, int); int _thread_sys_socket(int, int, int); int _thread_sys_socketpair(int, int, int, int *); ssize_t _thread_sys_recv(int, void *, size_t, int); ssize_t _thread_sys_recvfrom(int, void *, size_t, int, struct sockaddr *, int *); ssize_t _thread_sys_recvmsg(int, struct msghdr *, int); ssize_t _thread_sys_send(int, const void *, size_t, int); ssize_t _thread_sys_sendmsg(int, const struct msghdr *, int); ssize_t _thread_sys_sendto(int, const void *,size_t, int, const struct sockaddr *, int); #endif /* #include */ #ifdef _STDIO_H_ FILE *_thread_sys_fdopen(int, const char *); FILE *_thread_sys_fopen(const char *, const char *); FILE *_thread_sys_freopen(const char *, const char *, FILE *); FILE *_thread_sys_popen(const char *, const char *); FILE *_thread_sys_tmpfile(void); char *_thread_sys_ctermid(char *); char *_thread_sys_cuserid(char *); char *_thread_sys_fgetln(FILE *, size_t *); char *_thread_sys_fgets(char *, int, FILE *); char *_thread_sys_gets(char *); char *_thread_sys_tempnam(const char *, const char *); char *_thread_sys_tmpnam(char *); int _thread_sys_fclose(FILE *); int _thread_sys_feof(FILE *); int _thread_sys_ferror(FILE *); int _thread_sys_fflush(FILE *); int _thread_sys_fgetc(FILE *); int _thread_sys_fgetpos(FILE *, fpos_t *); int _thread_sys_fileno(FILE *); int _thread_sys_fprintf(FILE *, const char *, ...); int _thread_sys_fpurge(FILE *); int _thread_sys_fputc(int, FILE *); int _thread_sys_fputs(const char *, FILE *); int _thread_sys_fscanf(FILE *, const char *, ...); int _thread_sys_fseek(FILE *, long, int); int _thread_sys_fsetpos(FILE *, const fpos_t *); int _thread_sys_getc(FILE *); int _thread_sys_getchar(void); int _thread_sys_getw(FILE *); int _thread_sys_pclose(FILE *); int _thread_sys_printf(const char *, ...); int _thread_sys_putc(int, FILE *); int _thread_sys_putchar(int); int _thread_sys_puts(const char *); int _thread_sys_putw(int, FILE *); int _thread_sys_remove(const char *); int _thread_sys_rename (const char *, const char *); int _thread_sys_scanf(const char *, ...); int _thread_sys_setlinebuf(FILE *); int _thread_sys_setvbuf(FILE *, char *, int, size_t); int _thread_sys_snprintf(char *, size_t, const char *, ...); int _thread_sys_sprintf(char *, const char *, ...); int _thread_sys_sscanf(const char *, const char *, ...); int _thread_sys_ungetc(int, FILE *); int _thread_sys_vfprintf(FILE *, const char *, _BSD_VA_LIST_); int _thread_sys_vprintf(const char *, _BSD_VA_LIST_); int _thread_sys_vscanf(const char *, _BSD_VA_LIST_); int _thread_sys_vsnprintf(char *, size_t, const char *, _BSD_VA_LIST_); int _thread_sys_vsprintf(char *, const char *, _BSD_VA_LIST_); int _thread_sys_vsscanf(const char *, const char *, _BSD_VA_LIST_); long _thread_sys_ftell(FILE *); size_t _thread_sys_fread(void *, size_t, size_t, FILE *); size_t _thread_sys_fwrite(const void *, size_t, size_t, FILE *); void _thread_sys_clearerr(FILE *); void _thread_sys_perror(const char *); void _thread_sys_rewind(FILE *); void _thread_sys_setbuf(FILE *, char *); void _thread_sys_setbuffer(FILE *, char *, int); #endif /* #include */ #ifdef _UNISTD_H_ char *_thread_sys_ttyname(int); int _thread_sys_close(int); int _thread_sys_dup(int); int _thread_sys_dup2(int, int); int _thread_sys_exect(const char *, char * const *, char * const *); int _thread_sys_execve(const char *, char * const *, char * const *); int _thread_sys_fchdir(int); int _thread_sys_fchown(int, uid_t, gid_t); int _thread_sys_fsync(int); int _thread_sys_ftruncate(int, off_t); int _thread_sys_pause(void); int _thread_sys_pipe(int *); int _thread_sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); long _thread_sys_fpathconf(int, int); off_t _thread_sys_lseek(int, off_t, int); pid_t _thread_sys_fork(void); pid_t _thread_sys_tcgetpgrp(int); ssize_t _thread_sys_read(int, void *, size_t); ssize_t _thread_sys_write(int, const void *, size_t); void _thread_sys__exit(int); #endif /* #include */ #ifdef _SYS_FCNTL_H_ int _thread_sys_creat(const char *, mode_t); int _thread_sys_fcntl(int, int, ...); int _thread_sys_flock(int, int); int _thread_sys_open(const char *, int, ...); #endif /* #include */ #ifdef _SYS_IOCTL_H_ int _thread_sys_ioctl(int, unsigned long, ...); #endif /* #include */ #ifdef _DIRENT_H_ DIR *___thread_sys_opendir2(const char *, int); DIR *_thread_sys_opendir(const char *); int _thread_sys_alphasort(const void *, const void *); int _thread_sys_scandir(const char *, struct dirent ***, int (*)(struct dirent *), int (*)(const void *, const void *)); int _thread_sys_closedir(DIR *); int _thread_sys_getdirentries(int, char *, int, long *); long _thread_sys_telldir(const DIR *); struct dirent *_thread_sys_readdir(DIR *); void _thread_sys_rewinddir(DIR *); void _thread_sys_seekdir(DIR *, long); #endif /* #include */ #ifdef _SYS_UIO_H_ ssize_t _thread_sys_readv(int, const struct iovec *, int); ssize_t _thread_sys_writev(int, const struct iovec *, int); #endif /* #include */ #ifdef WNOHANG pid_t _thread_sys_wait(int *); pid_t _thread_sys_waitpid(pid_t, int *, int); pid_t _thread_sys_wait3(int *, int, struct rusage *); pid_t _thread_sys_wait4(pid_t, int *, int, struct rusage *); #endif /* #include */ #ifdef _SYS_POLL_H_ int _thread_sys_poll(struct pollfd *, unsigned, int); #endif /* #include */ #ifdef _SYS_MMAN_H_ int _thread_sys_msync(void *, size_t, int); #endif /* #include */ #ifdef _SETJMP_H_ extern void __siglongjmp(sigjmp_buf, int) __dead2; extern void __longjmp(jmp_buf, int) __dead2; extern void ___longjmp(jmp_buf, int) __dead2; #endif __END_DECLS #endif /* !_PTHREAD_PRIVATE_H */ Index: head/lib/libc_r/uthread/uthread_cond.c =================================================================== --- head/lib/libc_r/uthread/uthread_cond.c (revision 68515) +++ head/lib/libc_r/uthread/uthread_cond.c (revision 68516) @@ -1,696 +1,755 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" /* * Prototypes */ static inline pthread_t cond_queue_deq(pthread_cond_t); static inline void cond_queue_remove(pthread_cond_t, pthread_t); static inline void cond_queue_enq(pthread_cond_t, pthread_t); /* Reinitialize a condition variable to defaults. */ int -_cond_reinit(pthread_cond_t * cond) +_cond_reinit(pthread_cond_t *cond) { int ret = 0; if (cond == NULL) ret = EINVAL; else if (*cond == NULL) ret = pthread_cond_init(cond, NULL); else { /* * Initialize the condition variable structure: */ TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags = COND_FLAGS_INITED; (*cond)->c_type = COND_TYPE_FAST; (*cond)->c_mutex = NULL; + (*cond)->c_seqno = 0; memset(&(*cond)->lock, 0, sizeof((*cond)->lock)); } return (ret); } int -pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr) +pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { enum pthread_cond_type type; pthread_cond_t pcond; int rval = 0; if (cond == NULL) rval = EINVAL; else { /* * Check if a pointer to a condition variable attribute * structure was passed by the caller: */ if (cond_attr != NULL && *cond_attr != NULL) { /* Default to a fast condition variable: */ type = (*cond_attr)->c_type; } else { /* Default to a fast condition variable: */ type = COND_TYPE_FAST; } /* Process according to condition variable type: */ switch (type) { /* Fast condition variable: */ case COND_TYPE_FAST: /* Nothing to do here. */ break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Check for no errors: */ if (rval == 0) { if ((pcond = (pthread_cond_t) malloc(sizeof(struct pthread_cond))) == NULL) { rval = ENOMEM; } else { /* * Initialise the condition variable * structure: */ TAILQ_INIT(&pcond->c_queue); pcond->c_flags |= COND_FLAGS_INITED; pcond->c_type = type; pcond->c_mutex = NULL; + pcond->c_seqno = 0; memset(&pcond->lock,0,sizeof(pcond->lock)); *cond = pcond; } } } /* Return the completion status: */ return (rval); } int -pthread_cond_destroy(pthread_cond_t * cond) +pthread_cond_destroy(pthread_cond_t *cond) { int rval = 0; if (cond == NULL || *cond == NULL) rval = EINVAL; else { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * Free the memory allocated for the condition * variable structure: */ free(*cond); /* * NULL the caller's pointer now that the condition * variable has been destroyed: */ *cond = NULL; } /* Return the completion status: */ return (rval); } int -pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex) +pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { int rval = 0; + int done = 0; int interrupted = 0; + int unlock_mutex = 1; + int seqno; _thread_enter_cancellation_point(); if (cond == NULL) - rval = EINVAL; + return (EINVAL); /* * If the condition variable is statically initialized, * perform the dynamic initialization: */ - else if (*cond != NULL || - (rval = pthread_cond_init(cond, NULL)) == 0) { + if (*cond == NULL && + (rval = pthread_cond_init(cond, NULL)) != 0) + return (rval); + + /* + * Enter a loop waiting for a condition signal or broadcast + * to wake up this thread. A loop is needed in case the waiting + * thread is interrupted by a signal to execute a signal handler. + * It is not (currently) possible to remain in the waiting queue + * while running a handler. Instead, the thread is interrupted + * and backed out of the waiting queue prior to executing the + * signal handler. + */ + do { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * If the condvar was statically allocated, properly * initialize the tail queue. */ if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) { TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags |= COND_FLAGS_INITED; } /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return invalid argument error: */ rval = EINVAL; } else { /* Reset the timeout and interrupted flags: */ _thread_run->timeout = 0; _thread_run->interrupted = 0; /* * Queue the running thread for the condition * variable: */ cond_queue_enq(*cond, _thread_run); - /* Remember the mutex that is being used: */ + /* Remember the mutex and sequence number: */ (*cond)->c_mutex = *mutex; + seqno = (*cond)->c_seqno; /* Wait forever: */ _thread_run->wakeup_time.tv_sec = -1; /* Unlock the mutex: */ - if ((rval = _mutex_cv_unlock(mutex)) != 0) { + if ((unlock_mutex != 0) && + ((rval = _mutex_cv_unlock(mutex)) != 0)) { /* * Cannot unlock the mutex, so remove * the running thread from the condition * variable queue: */ cond_queue_remove(*cond, _thread_run); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* + * Don't unlock the mutex in the event + * this thread has to be requeued in + * condition variable queue: + */ + unlock_mutex = 0; + + /* * Schedule the next thread and unlock * the condition variable structure: */ _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); - if (_thread_run->interrupted != 0) { - /* - * Remember that this thread - * was interrupted: - */ - interrupted = 1; + done = (seqno != (*cond)->c_seqno); + if ((_thread_run->flags & + PTHREAD_FLAGS_IN_CONDQ) != 0) { /* * Lock the condition variable * while removing the thread. */ _SPINLOCK(&(*cond)->lock); cond_queue_remove(*cond, _thread_run); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; _SPINUNLOCK(&(*cond)->lock); } /* + * Save the interrupted flag; locking + * the mutex will destroy it. + */ + interrupted = _thread_run->interrupted; + + /* * Note that even though this thread may have * been canceled, POSIX requires that the mutex * be reaquired prior to cancellation. */ rval = _mutex_cv_lock(mutex); } } break; /* Trap invalid condition variable types: */ default: /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return an invalid argument error: */ rval = EINVAL; break; } - if (interrupted != 0) { - if (_thread_run->continuation != NULL) - _thread_run->continuation((void *) _thread_run); - } - } + if ((interrupted != 0) && (_thread_run->continuation != NULL)) + _thread_run->continuation((void *) _thread_run); + } while ((done == 0) && (rval == 0)); _thread_leave_cancellation_point(); /* Return the completion status: */ return (rval); } int pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, const struct timespec * abstime) { int rval = 0; + int done = 0; int interrupted = 0; + int unlock_mutex = 1; + int seqno; _thread_enter_cancellation_point(); if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) - rval = EINVAL; + return (EINVAL); /* * If the condition variable is statically initialized, perform dynamic * initialization. */ - else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) { + if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) + return (rval); + + /* + * Enter a loop waiting for a condition signal or broadcast + * to wake up this thread. A loop is needed in case the waiting + * thread is interrupted by a signal to execute a signal handler. + * It is not (currently) possible to remain in the waiting queue + * while running a handler. Instead, the thread is interrupted + * and backed out of the waiting queue prior to executing the + * signal handler. + */ + do { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * If the condvar was statically allocated, properly * initialize the tail queue. */ if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) { TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags |= COND_FLAGS_INITED; } /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { /* Return invalid argument error: */ rval = EINVAL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* Set the wakeup time: */ _thread_run->wakeup_time.tv_sec = abstime->tv_sec; _thread_run->wakeup_time.tv_nsec = abstime->tv_nsec; /* Reset the timeout and interrupted flags: */ _thread_run->timeout = 0; _thread_run->interrupted = 0; /* * Queue the running thread for the condition * variable: */ cond_queue_enq(*cond, _thread_run); - /* Remember the mutex that is being used: */ + /* Remember the mutex and sequence number: */ (*cond)->c_mutex = *mutex; + seqno = (*cond)->c_seqno; /* Unlock the mutex: */ - if ((rval = _mutex_cv_unlock(mutex)) != 0) { + if ((unlock_mutex != 0) && + ((rval = _mutex_cv_unlock(mutex)) != 0)) { /* * Cannot unlock the mutex, so remove * the running thread from the condition * variable queue: */ cond_queue_remove(*cond, _thread_run); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* + * Don't unlock the mutex in the event + * this thread has to be requeued in + * condition variable queue: + */ + unlock_mutex = 0; + + /* * Schedule the next thread and unlock * the condition variable structure: */ _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); + done = (seqno != (*cond)->c_seqno); + /* - * Check if the wait timedout or was - * interrupted (canceled): + * Check if the wait timedout, was + * interrupted (canceled), or needs to + * be resumed after handling a signal. */ if ((_thread_run->timeout == 0) && - (_thread_run->interrupted == 0)) { + (_thread_run->interrupted == 0) && + (done != 0)) { /* Lock the mutex: */ rval = _mutex_cv_lock(mutex); - } else { - /* - * Remember if this thread was - * interrupted: - */ - interrupted = _thread_run->interrupted; - - /* Lock the condition variable structure: */ + /* Lock the CV structure: */ _SPINLOCK(&(*cond)->lock); /* * The wait timed out; remove * the thread from the condition - * variable queue: + * variable queue: */ cond_queue_remove(*cond, _thread_run); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; - /* Unock the condition variable structure: */ + /* Unock the CV structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return a timeout error: */ - rval = ETIMEDOUT; + if (_thread_run->timeout != 0) + rval = ETIMEDOUT; + /* + * Save the interrupted flag; + * locking the mutex will + * destroy it. + */ + interrupted = _thread_run->interrupted; /* * Lock the mutex and ignore any * errors. Note that even though * this thread may have been * canceled, POSIX requires that * the mutex be reaquired prior * to cancellation. */ (void)_mutex_cv_lock(mutex); } } } break; /* Trap invalid condition variable types: */ default: /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return an invalid argument error: */ rval = EINVAL; break; } - if (interrupted != 0) { - if (_thread_run->continuation != NULL) - _thread_run->continuation((void *) _thread_run); - } - } + if ((interrupted != 0) && (_thread_run->continuation != NULL)) + _thread_run->continuation((void *) _thread_run); + } while ((done == 0) && (rval == 0)); _thread_leave_cancellation_point(); /* Return the completion status: */ return (rval); } int pthread_cond_signal(pthread_cond_t * cond) { int rval = 0; pthread_t pthread; if (cond == NULL) rval = EINVAL; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL) == 0)) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: + /* Increment the sequence number: */ + (*cond)->c_seqno++; + if ((pthread = cond_queue_deq(*cond)) != NULL) { /* * Unless the thread is currently suspended, * allow it to run. If the thread is suspended, * make a note that the thread isn't in a wait * queue any more. */ if (pthread->state != PS_SUSPENDED) PTHREAD_NEW_STATE(pthread,PS_RUNNING); else pthread->suspended = SUSP_NOWAIT; } /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (rval); } int pthread_cond_broadcast(pthread_cond_t * cond) { int rval = 0; pthread_t pthread; if (cond == NULL) rval = EINVAL; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL) == 0)) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: + /* Increment the sequence number: */ + (*cond)->c_seqno++; + /* * Enter a loop to bring all threads off the * condition queue: */ while ((pthread = cond_queue_deq(*cond)) != NULL) { /* * Unless the thread is currently suspended, * allow it to run. If the thread is suspended, * make a note that the thread isn't in a wait * queue any more. */ if (pthread->state != PS_SUSPENDED) PTHREAD_NEW_STATE(pthread,PS_RUNNING); else pthread->suspended = SUSP_NOWAIT; } /* There are no more waiting threads: */ (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (rval); } void _cond_wait_backout(pthread_t pthread) { pthread_cond_t cond; cond = pthread->data.cond; if (cond != NULL) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&cond->lock); /* Process according to condition variable type: */ switch (cond->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: cond_queue_remove(cond, pthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&cond->c_queue) == NULL) cond->c_mutex = NULL; break; default: break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&cond->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } } /* * Dequeue a waiting thread from the head of a condition queue in * descending priority order. */ static inline pthread_t cond_queue_deq(pthread_cond_t cond) { pthread_t pthread; while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) { TAILQ_REMOVE(&cond->c_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ; if ((pthread->timeout == 0) && (pthread->interrupted == 0)) /* * Only exit the loop when we find a thread * that hasn't timed out or been canceled; * those threads are already running and don't * need their run state changed. */ break; } return(pthread); } /* * Remove a waiting thread from a condition queue in descending priority * order. */ static inline void cond_queue_remove(pthread_cond_t cond, pthread_t pthread) { /* * Because pthread_cond_timedwait() can timeout as well * as be signaled by another thread, it is necessary to * guard against removing the thread from the queue if * it isn't in the queue. */ if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) { TAILQ_REMOVE(&cond->c_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ; } } /* * Enqueue a waiting thread to a condition queue in descending priority * order. */ static inline void cond_queue_enq(pthread_cond_t cond, pthread_t pthread) { pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head); PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); /* * For the common case of all threads having equal priority, * we perform a quick check against the priority of the thread * at the tail of the queue. */ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe); else { tid = TAILQ_FIRST(&cond->c_queue); while (pthread->active_priority <= tid->active_priority) tid = TAILQ_NEXT(tid, sqe); TAILQ_INSERT_BEFORE(tid, pthread, sqe); } pthread->flags |= PTHREAD_FLAGS_IN_CONDQ; pthread->data.cond = cond; } #endif Index: head/lib/libc_r/uthread/uthread_create.c =================================================================== --- head/lib/libc_r/uthread/uthread_create.c (revision 68515) +++ head/lib/libc_r/uthread/uthread_create.c (revision 68516) @@ -1,324 +1,327 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include #include "pthread_private.h" #include "libc_private.h" static u_int64_t next_uniqueid = 1; #define OFF(f) offsetof(struct pthread, f) -#define SIGFRAME_OFF(f) offsetof(struct pthread_signal_frame, f) int _thread_next_offset = OFF(tle.tqe_next); int _thread_uniqueid_offset = OFF(uniqueid); int _thread_state_offset = OFF(state); int _thread_name_offset = OFF(name); -int _thread_curframe_offset = OFF(curframe); -int _thread_sigframe_ctx_offset = SIGFRAME_OFF(ctx); -int _thread_sigframe_ctxtype_offset = SIGFRAME_OFF(ctxtype); +int _thread_ctxtype_offset = OFF(ctxtype); +int _thread_ctx_offset = OFF(ctx); #undef OFF -#undef SIGFRAME_OFF int _thread_PS_RUNNING_value = PS_RUNNING; int _thread_PS_DEAD_value = PS_DEAD; int _thread_CTX_JB_NOSIG_value = CTX_JB_NOSIG; int _thread_CTX_JB_value = CTX_JB; int _thread_CTX_SJB_value = CTX_SJB; int _thread_CTX_UC_value = CTX_UC; -int _thread_sigframe_size_value = sizeof(struct pthread_signal_frame); int pthread_create(pthread_t * thread, const pthread_attr_t * attr, void *(*start_routine) (void *), void *arg) { + struct itimerval itimer; int f_gc = 0; int ret = 0; pthread_t gc_thread; pthread_t new_thread; pthread_attr_t pattr; void *stack; /* * Locking functions in libc are required when there are * threads other than the initial thread. */ __isthreaded = 1; /* Allocate memory for the thread structure: */ if ((new_thread = (pthread_t) malloc(sizeof(struct pthread))) == NULL) { /* Insufficient memory to create a thread: */ ret = EAGAIN; } else { /* Check if default thread attributes are required: */ if (attr == NULL || *attr == NULL) { /* Use the default thread attributes: */ pattr = &pthread_attr_default; } else { pattr = *attr; } /* Check if a stack was specified in the thread attributes: */ if ((stack = pattr->stackaddr_attr) != NULL) { } /* Allocate memory for a default-size stack: */ else if (pattr->stacksize_attr == PTHREAD_STACK_DEFAULT) { struct stack *spare_stack; /* Allocate or re-use a default-size stack. */ /* * Use the garbage collector mutex for synchronization * of the spare stack list. */ if (pthread_mutex_lock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); if ((spare_stack = SLIST_FIRST(&_stackq)) != NULL) { /* Use the spare stack. */ SLIST_REMOVE_HEAD(&_stackq, qe); /* Unlock the garbage collector mutex. */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot unlock gc mutex"); stack = sizeof(struct stack) + (void *) spare_stack - PTHREAD_STACK_DEFAULT; } else { /* Allocate a new stack. */ stack = _next_stack + PTHREAD_STACK_GUARD; - + /* * Even if stack allocation fails, we don't want * to try to use this location again, so * unconditionally decrement _next_stack. Under * normal operating conditions, the most likely * reason for an mmap() error is a stack * overflow of the adjacent thread stack. */ _next_stack -= (PTHREAD_STACK_DEFAULT + PTHREAD_STACK_GUARD); /* Unlock the garbage collector mutex. */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot unlock gc mutex"); /* Stack: */ if (mmap(stack, PTHREAD_STACK_DEFAULT, PROT_READ | PROT_WRITE, MAP_STACK, -1, 0) == MAP_FAILED) { ret = EAGAIN; free(new_thread); } } } /* * The user wants a stack of a particular size. Lets hope they * really know what they want, and simply malloc the stack. */ else if ((stack = (void *) malloc(pattr->stacksize_attr)) == NULL) { /* Insufficient memory to create a thread: */ ret = EAGAIN; free(new_thread); } /* Check for errors: */ if (ret != 0) { } else { /* Initialise the thread structure: */ memset(new_thread, 0, sizeof(struct pthread)); new_thread->slice_usec = -1; new_thread->stack = stack; new_thread->start_routine = start_routine; new_thread->arg = arg; new_thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; /* * Write a magic value to the thread structure * to help identify valid ones: */ new_thread->magic = PTHREAD_MAGIC; /* Initialise the thread for signals: */ new_thread->sigmask = _thread_run->sigmask; + new_thread->sigmask_seqno = 0; - /* Initialize the first signal frame: */ - new_thread->sigframes[0] = &new_thread->sigframe0; - new_thread->curframe = &new_thread->sigframe0; + /* Initialize the signal frame: */ + new_thread->curframe = NULL; /* Initialise the jump buffer: */ - _setjmp(new_thread->curframe->ctx.jb); + _setjmp(new_thread->ctx.jb); /* * Set up new stack frame so that it looks like it * returned from a longjmp() to the beginning of * _thread_start(). */ - SET_RETURN_ADDR_JB(new_thread->curframe->ctx.jb, - _thread_start); + SET_RETURN_ADDR_JB(new_thread->ctx.jb, _thread_start); /* The stack starts high and builds down: */ - SET_STACK_JB(new_thread->curframe->ctx.jb, + SET_STACK_JB(new_thread->ctx.jb, (long)new_thread->stack + pattr->stacksize_attr - sizeof(double)); /* Initialize the rest of the frame: */ - new_thread->curframe->ctxtype = CTX_JB_NOSIG; - /* Set the base of the stack: */ - new_thread->curframe->stackp = - GET_STACK_JB(new_thread->curframe->ctx.jb); - new_thread->sigframe_count = 0; + new_thread->ctxtype = CTX_JB_NOSIG; /* Copy the thread attributes: */ memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr)); /* * Check if this thread is to inherit the scheduling - * attributes from its parent: + * attributes from its parent: */ if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) { /* Copy the scheduling attributes: */ new_thread->base_priority = _thread_run->base_priority & ~PTHREAD_SIGNAL_PRIORITY; new_thread->attr.prio = _thread_run->base_priority & ~PTHREAD_SIGNAL_PRIORITY; new_thread->attr.sched_policy = _thread_run->attr.sched_policy; } else { /* * Use just the thread priority, leaving the * other scheduling attributes as their - * default values: + * default values: */ new_thread->base_priority = new_thread->attr.prio; } new_thread->active_priority = new_thread->base_priority; new_thread->inherited_priority = 0; /* Initialise the join queue for the new thread: */ TAILQ_INIT(&(new_thread->join_queue)); /* Initialize the mutex queue: */ TAILQ_INIT(&new_thread->mutexq); /* Initialise hooks in the thread structure: */ new_thread->specific_data = NULL; new_thread->cleanup = NULL; new_thread->flags = 0; new_thread->poll_data.nfds = 0; new_thread->poll_data.fds = NULL; new_thread->continuation = NULL; /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* * Initialise the unique id which GDB uses to * track threads. */ new_thread->uniqueid = next_uniqueid++; /* * Check if the garbage collector thread * needs to be started. */ f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial); /* Add the thread to the linked list of all threads: */ TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle); if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) new_thread->state = PS_SUSPENDED; else { new_thread->state = PS_RUNNING; PTHREAD_PRIOQ_INSERT_TAIL(new_thread); } /* * Undefer and handle pending signals, yielding * if necessary. */ _thread_kern_sig_undefer(); /* Return a pointer to the thread structure: */ (*thread) = new_thread; + if (f_gc != 0) { + /* Install the scheduling timer: */ + itimer.it_interval.tv_sec = 0; + itimer.it_interval.tv_usec = _clock_res_usec; + itimer.it_value = itimer.it_interval; + if (setitimer(_ITIMER_SCHED_TIMER, &itimer, + NULL) != 0) + PANIC("Cannot set interval timer"); + } + /* Schedule the new user thread: */ _thread_kern_sched(NULL); + /* * Start a garbage collector thread * if necessary. */ if (f_gc && pthread_create(&gc_thread,NULL, _thread_gc,NULL) != 0) PANIC("Can't create gc thread"); } } /* Return the status: */ return (ret); } void _thread_start(void) { /* We just left the scheduler via longjmp: */ _thread_kern_in_sched = 0; /* Run the current thread's start routine with argument: */ pthread_exit(_thread_run->start_routine(_thread_run->arg)); /* This point should never be reached. */ PANIC("Thread has resumed after exit"); } #endif Index: head/lib/libc_r/uthread/uthread_detach.c =================================================================== --- head/lib/libc_r/uthread/uthread_detach.c (revision 68515) +++ head/lib/libc_r/uthread/uthread_detach.c (revision 68516) @@ -1,83 +1,88 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" int pthread_detach(pthread_t pthread) { int rval = 0; pthread_t next_thread; /* Check for invalid calling parameters: */ if (pthread == NULL || pthread->magic != PTHREAD_MAGIC) /* Return an invalid argument error: */ rval = EINVAL; /* Check if the thread has not been detached: */ else if ((pthread->attr.flags & PTHREAD_DETACHED) == 0) { /* Flag the thread as detached: */ pthread->attr.flags |= PTHREAD_DETACHED; /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Enter a loop to bring all threads off the join queue: */ while ((next_thread = TAILQ_FIRST(&pthread->join_queue)) != NULL) { /* Remove the thread from the queue: */ TAILQ_REMOVE(&pthread->join_queue, next_thread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ; /* Make the thread runnable: */ - PTHREAD_NEW_STATE(next_thread,PS_RUNNING); + PTHREAD_NEW_STATE(next_thread, PS_RUNNING); + + /* + * Set the return value for the woken thread: + */ + next_thread->error = ESRCH; } /* * Undefer and handle pending signals, yielding if a * scheduling signal occurred while in the critical region. */ _thread_kern_sig_undefer(); } else /* Return an error: */ rval = EINVAL; /* Return the completion status: */ return (rval); } #endif Index: head/lib/libc_r/uthread/uthread_exit.c =================================================================== --- head/lib/libc_r/uthread/uthread_exit.c (revision 68515) +++ head/lib/libc_r/uthread/uthread_exit.c (revision 68516) @@ -1,244 +1,237 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" #define FLAGS_IN_SCHEDQ \ (PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ) void __exit(int status) { int flags; int i; struct itimerval itimer; /* Disable the interval timer: */ itimer.it_interval.tv_sec = 0; itimer.it_interval.tv_usec = 0; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = 0; setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL); /* Close the pthread kernel pipe: */ _thread_sys_close(_thread_kern_pipe[0]); _thread_sys_close(_thread_kern_pipe[1]); /* * Enter a loop to set all file descriptors to blocking * if they were not created as non-blocking: */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file descriptor is in use: */ if (_thread_fd_table[i] != NULL && !(_thread_fd_table[i]->flags & O_NONBLOCK)) { /* Get the current flags: */ flags = _thread_sys_fcntl(i, F_GETFL, NULL); /* Clear the nonblocking file descriptor flag: */ _thread_sys_fcntl(i, F_SETFL, flags & ~O_NONBLOCK); } } /* Call the _exit syscall: */ _thread_sys__exit(status); } __strong_reference(__exit, _exit); void _thread_exit(char *fname, int lineno, char *string) { char s[256]; /* Prepare an error message string: */ strcpy(s, "Fatal error '"); strcat(s, string); strcat(s, "' at line ? "); strcat(s, "in file "); strcat(s, fname); strcat(s, " (errno = ?"); strcat(s, ")\n"); /* Write the string to the standard error file descriptor: */ _thread_sys_write(2, s, strlen(s)); /* Force this process to exit: */ /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */ #if defined(_PTHREADS_INVARIANTS) abort(); #else _exit(1); #endif } /* * Only called when a thread is cancelled. It may be more useful * to call it from pthread_exit() if other ways of asynchronous or * abnormal thread termination can be found. */ void _thread_exit_cleanup(void) { /* * POSIX states that cancellation/termination of a thread should * not release any visible resources (such as mutexes) and that * it is the applications responsibility. Resources that are * internal to the threads library, including file and fd locks, * are not visible to the application and need to be released. */ /* Unlock all owned fd locks: */ _thread_fd_unlock_owned(_thread_run); /* Unlock all owned file locks: */ _funlock_owned(_thread_run); /* Unlock all private mutexes: */ _mutex_unlock_private(_thread_run); /* * This still isn't quite correct because we don't account * for held spinlocks (see libc/stdlib/malloc.c). */ } void pthread_exit(void *status) { - int frame; + pthread_t pthread; /* Check if this thread is already in the process of exiting: */ if ((_thread_run->flags & PTHREAD_EXITING) != 0) { char msg[128]; snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",_thread_run); PANIC(msg); } /* Flag this thread as exiting: */ _thread_run->flags |= PTHREAD_EXITING; /* Save the return value: */ _thread_run->ret = status; while (_thread_run->cleanup != NULL) { pthread_cleanup_pop(1); } - if (_thread_run->attr.cleanup_attr != NULL) { _thread_run->attr.cleanup_attr(_thread_run->attr.arg_attr); } /* Check if there is thread specific data: */ if (_thread_run->specific_data != NULL) { /* Run the thread-specific data destructors: */ _thread_cleanupspecific(); } /* Free thread-specific poll_data structure, if allocated: */ if (_thread_run->poll_data.fds != NULL) { free(_thread_run->poll_data.fds); _thread_run->poll_data.fds = NULL; } - if ((frame = _thread_run->sigframe_count) == 0) - _thread_exit_finish(); - else { - /* - * Jump back and unwind the signal frames to gracefully - * cleanup. - */ - ___longjmp(*_thread_run->sigframes[frame]->sig_jb, 1); - } - - /* This point should not be reached. */ - PANIC("Dead thread has resumed"); -} - -void -_thread_exit_finish(void) -{ - pthread_t pthread; - /* * Lock the garbage collector mutex to ensure that the garbage * collector is not using the dead thread list. */ if (pthread_mutex_lock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Add this thread to the list of dead threads. */ TAILQ_INSERT_HEAD(&_dead_list, _thread_run, dle); /* * Signal the garbage collector thread that there is something * to clean up. */ if (pthread_cond_signal(&_gc_cond) != 0) PANIC("Cannot signal gc cond"); /* * Avoid a race condition where a scheduling signal can occur * causing the garbage collector thread to run. If this happens, * the current thread can be cleaned out from under us. */ _thread_kern_sig_defer(); /* Unlock the garbage collector mutex: */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Check if there are any threads joined to this one: */ while ((pthread = TAILQ_FIRST(&(_thread_run->join_queue))) != NULL) { /* Remove the thread from the queue: */ TAILQ_REMOVE(&_thread_run->join_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ; /* * Wake the joined thread and let it * detach this thread: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); + + /* + * Set the return value for the woken thread: + */ + if ((_thread_run->attr.flags & PTHREAD_DETACHED) != 0) + pthread->error = ESRCH; + else { + pthread->ret = _thread_run->ret; + pthread->error = 0; + } } /* Remove this thread from the thread list: */ TAILQ_REMOVE(&_thread_list, _thread_run, tle); /* This thread will never be re-scheduled. */ _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__); + + /* This point should not be reached. */ + PANIC("Dead thread has resumed"); } #endif Index: head/lib/libc_r/uthread/uthread_info.c =================================================================== --- head/lib/libc_r/uthread/uthread_info.c (revision 68515) +++ head/lib/libc_r/uthread/uthread_info.c (revision 68516) @@ -1,315 +1,290 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #ifdef _THREAD_SAFE #include #include #include "pthread_private.h" +#ifndef NELEMENTS +#define NELEMENTS(arr) (sizeof(arr) / sizeof(arr[0])) +#endif + +static void dump_thread(int fd, pthread_t pthread, int long_version); + + struct s_thread_info { enum pthread_state state; char *name; }; /* Static variables: */ static const struct s_thread_info thread_info[] = { {PS_RUNNING , "Running"}, {PS_SIGTHREAD , "Waiting on signal thread"}, {PS_MUTEX_WAIT , "Waiting on a mutex"}, {PS_COND_WAIT , "Waiting on a condition variable"}, {PS_FDLR_WAIT , "Waiting for a file read lock"}, {PS_FDLW_WAIT , "Waiting for a file write lock"}, {PS_FDR_WAIT , "Waiting for read"}, {PS_FDW_WAIT , "Waiting for write"}, {PS_FILE_WAIT , "Waiting for FILE lock"}, {PS_POLL_WAIT , "Waiting on poll"}, {PS_SELECT_WAIT , "Waiting on select"}, {PS_SLEEP_WAIT , "Sleeping"}, {PS_WAIT_WAIT , "Waiting process"}, {PS_SIGSUSPEND , "Suspended, waiting for a signal"}, {PS_SIGWAIT , "Waiting for a signal"}, {PS_SPINBLOCK , "Waiting for a spinlock"}, {PS_JOIN , "Waiting to join"}, {PS_SUSPENDED , "Suspended"}, {PS_DEAD , "Dead"}, {PS_DEADLOCK , "Deadlocked"}, {PS_STATE_MAX , "Not a real state!"} }; void _thread_dump_info(void) { char s[512]; int fd; int i; - int j; pthread_t pthread; char tmpfile[128]; pq_list_t *pq_list; - for (i = 0; i < 100000; i++) { + for (i = 0; i < 100000; i++) { snprintf(tmpfile, sizeof(tmpfile), "/tmp/uthread.dump.%u.%i", getpid(), i); /* Open the dump file for append and create it if necessary: */ if ((fd = _thread_sys_open(tmpfile, O_RDWR | O_CREAT | O_EXCL, 0666)) < 0) { /* Can't open the dump file. */ if (errno == EEXIST) continue; /* * We only need to continue in case of * EEXIT error. Most other error * codes means that we will fail all * the times. */ return; } else { break; } } if (i==100000) { /* all 100000 possibilities are in use :( */ return; } else { /* Output a header for active threads: */ strcpy(s, "\n\n=============\nACTIVE THREADS\n\n"); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report each thread in the global list: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { - /* Find the state: */ - for (j = 0; j < (sizeof(thread_info) / - sizeof(struct s_thread_info)) - 1; j++) - if (thread_info[j].state == pthread->state) - break; - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", - pthread, (pthread->name == NULL) ? - "":pthread->name, pthread->base_priority, - thread_info[j].name, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); - - /* Check if this is the running thread: */ - if (pthread == _thread_run) { - /* Output a record for the running thread: */ - strcpy(s, "This is the running thread\n"); - _thread_sys_write(fd, s, strlen(s)); - } - /* Check if this is the initial thread: */ - if (pthread == _thread_initial) { - /* Output a record for the initial thread: */ - strcpy(s, "This is the initial thread\n"); - _thread_sys_write(fd, s, strlen(s)); - } - /* Process according to thread state: */ - switch (pthread->state) { - /* File descriptor read lock wait: */ - case PS_FDLR_WAIT: - case PS_FDLW_WAIT: - case PS_FDR_WAIT: - case PS_FDW_WAIT: - /* Write the lock details: */ - snprintf(s, sizeof(s), "fd %d[%s:%d]", - pthread->data.fd.fd, - pthread->data.fd.fname, - pthread->data.fd.branch); - _thread_sys_write(fd, s, strlen(s)); - snprintf(s, sizeof(s), "owner %pr/%pw\n", - _thread_fd_table[pthread->data.fd.fd]->r_owner, - _thread_fd_table[pthread->data.fd.fd]->w_owner); - _thread_sys_write(fd, s, strlen(s)); - break; - case PS_SIGWAIT: - snprintf(s, sizeof(s), "sigmask (hi)"); - _thread_sys_write(fd, s, strlen(s)); - for (i = _SIG_WORDS - 1; i >= 0; i--) { - snprintf(s, sizeof(s), "%08x\n", - pthread->sigmask.__bits[i]); - _thread_sys_write(fd, s, strlen(s)); - } - snprintf(s, sizeof(s), "(lo)\n"); - _thread_sys_write(fd, s, strlen(s)); - break; - - /* - * Trap other states that are not explicitly - * coded to dump information: - */ - default: - /* Nothing to do here. */ - break; - } + dump_thread(fd, pthread, /*long_verson*/ 1); } /* Output a header for ready threads: */ strcpy(s, "\n\n=============\nREADY THREADS\n\n"); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report each thread in the ready queue: */ TAILQ_FOREACH (pq_list, &_readyq.pq_queue, pl_link) { TAILQ_FOREACH(pthread, &pq_list->pl_head, pqe) { - /* Find the state: */ - for (j = 0; j < (sizeof(thread_info) / - sizeof(struct s_thread_info)) - 1; j++) - if (thread_info[j].state == pthread->state) - break; - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", - pthread, (pthread->name == NULL) ? - "":pthread->name, pthread->base_priority, - thread_info[j].name, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); + dump_thread(fd, pthread, /*long_version*/ 0); } } /* Output a header for waiting threads: */ strcpy(s, "\n\n=============\nWAITING THREADS\n\n"); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report each thread in the waiting queue: */ TAILQ_FOREACH (pthread, &_waitingq, pqe) { - /* Find the state: */ - for (j = 0; j < (sizeof(thread_info) / - sizeof(struct s_thread_info)) - 1; j++) - if (thread_info[j].state == pthread->state) - break; - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", - pthread, (pthread->name == NULL) ? - "":pthread->name, pthread->base_priority, - thread_info[j].name, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); + dump_thread(fd, pthread, /*long_version*/ 0); } /* Output a header for threads in the work queue: */ strcpy(s, "\n\n=============\nTHREADS IN WORKQ\n\n"); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report each thread in the waiting queue: */ TAILQ_FOREACH (pthread, &_workq, qe) { - /* Find the state: */ - for (j = 0; j < (sizeof(thread_info) / - sizeof(struct s_thread_info)) - 1; j++) - if (thread_info[j].state == pthread->state) - break; - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", - pthread, (pthread->name == NULL) ? - "":pthread->name, pthread->base_priority, - thread_info[j].name, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); + dump_thread(fd, pthread, /*long_version*/ 0); } /* Check if there are no dead threads: */ if (TAILQ_FIRST(&_dead_list) == NULL) { /* Output a record: */ strcpy(s, "\n\nTHERE ARE NO DEAD THREADS\n"); _thread_sys_write(fd, s, strlen(s)); } else { /* Output a header for dead threads: */ strcpy(s, "\n\nDEAD THREADS\n\n"); _thread_sys_write(fd, s, strlen(s)); /* * Enter a loop to report each thread in the global - * dead thread list: + * dead thread list: */ TAILQ_FOREACH(pthread, &_dead_list, dle) { - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "Thread %p prio %3d [%s:%d]\n", - pthread, pthread->base_priority, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); + dump_thread(fd, pthread, /*long_version*/ 0); } } /* Output a header for file descriptors: */ - snprintf(s, sizeof(s), "\n\n=============\nFILE DESCRIPTOR TABLE (table size %d)\n\n",_thread_dtablesize); + snprintf(s, sizeof(s), "\n\n=============\nFILE DESCRIPTOR " + "TABLE (table size %d)\n\n", _thread_dtablesize); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report file descriptor lock usage: */ for (i = 0; i < _thread_dtablesize; i++) { /* * Check if memory is allocated for this file - * descriptor: + * descriptor: */ if (_thread_fd_table[i] != NULL) { /* Report the file descriptor lock status: */ snprintf(s, sizeof(s), - "fd[%3d] read owner %p count %d [%s:%d]\n write owner %p count %d [%s:%d]\n", - i, - _thread_fd_table[i]->r_owner, - _thread_fd_table[i]->r_lockcount, - _thread_fd_table[i]->r_fname, - _thread_fd_table[i]->r_lineno, - _thread_fd_table[i]->w_owner, - _thread_fd_table[i]->w_lockcount, - _thread_fd_table[i]->w_fname, - _thread_fd_table[i]->w_lineno); - _thread_sys_write(fd, s, strlen(s)); + "fd[%3d] read owner %p count %d [%s:%d]\n" + " write owner %p count %d [%s:%d]\n", + i, _thread_fd_table[i]->r_owner, + _thread_fd_table[i]->r_lockcount, + _thread_fd_table[i]->r_fname, + _thread_fd_table[i]->r_lineno, + _thread_fd_table[i]->w_owner, + _thread_fd_table[i]->w_lockcount, + _thread_fd_table[i]->w_fname, + _thread_fd_table[i]->w_lineno); + _thread_sys_write(fd, s, strlen(s)); } } /* Close the dump file: */ _thread_sys_close(fd); + } +} + +static void +dump_thread(int fd, pthread_t pthread, int long_version) +{ + char s[512]; + int i; + + /* Find the state: */ + for (i = 0; i < NELEMENTS(thread_info) - 1; i++) + if (thread_info[i].state == pthread->state) + break; + + /* Output a record for the thread: */ + snprintf(s, sizeof(s), + "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", + pthread, (pthread->name == NULL) ? "" : pthread->name, + pthread->active_priority, thread_info[i].name, pthread->fname, + pthread->lineno); + _thread_sys_write(fd, s, strlen(s)); + + if (long_version != 0) { + /* Check if this is the running thread: */ + if (pthread == _thread_run) { + /* Output a record for the running thread: */ + strcpy(s, "This is the running thread\n"); + _thread_sys_write(fd, s, strlen(s)); + } + /* Check if this is the initial thread: */ + if (pthread == _thread_initial) { + /* Output a record for the initial thread: */ + strcpy(s, "This is the initial thread\n"); + _thread_sys_write(fd, s, strlen(s)); + } + /* Process according to thread state: */ + switch (pthread->state) { + /* File descriptor read lock wait: */ + case PS_FDLR_WAIT: + case PS_FDLW_WAIT: + case PS_FDR_WAIT: + case PS_FDW_WAIT: + /* Write the lock details: */ + snprintf(s, sizeof(s), "fd %d[%s:%d]", + pthread->data.fd.fd, + pthread->data.fd.fname, + pthread->data.fd.branch); + _thread_sys_write(fd, s, strlen(s)); + snprintf(s, sizeof(s), "owner %pr/%pw\n", + _thread_fd_table[pthread->data.fd.fd]->r_owner, + _thread_fd_table[pthread->data.fd.fd]->w_owner); + _thread_sys_write(fd, s, strlen(s)); + break; + case PS_SIGWAIT: + snprintf(s, sizeof(s), "sigmask (hi)"); + _thread_sys_write(fd, s, strlen(s)); + for (i = _SIG_WORDS - 1; i >= 0; i--) { + snprintf(s, sizeof(s), "%08x\n", + pthread->sigmask.__bits[i]); + _thread_sys_write(fd, s, strlen(s)); + } + snprintf(s, sizeof(s), "(lo)\n"); + _thread_sys_write(fd, s, strlen(s)); + break; + /* + * Trap other states that are not explicitly + * coded to dump information: + */ + default: + /* Nothing to do here. */ + break; + } } } /* Set the thread name for debug: */ void pthread_set_name_np(pthread_t thread, char *name) { /* Check if the caller has specified a valid thread: */ if (thread != NULL && thread->magic == PTHREAD_MAGIC) { if (thread->name != NULL) { /* Free space for previous name. */ free(thread->name); } thread->name = strdup(name); } } #endif Index: head/lib/libc_r/uthread/uthread_init.c =================================================================== --- head/lib/libc_r/uthread/uthread_init.c (revision 68515) +++ head/lib/libc_r/uthread/uthread_init.c (revision 68516) @@ -1,425 +1,421 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* Allocate space for global thread variables here: */ #define GLOBAL_PTHREAD_PRIVATE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include #include "pthread_private.h" #ifdef GCC_2_8_MADE_THREAD_AWARE typedef void *** (*dynamic_handler_allocator)(); extern void __set_dynamic_handler_allocator(dynamic_handler_allocator); static pthread_key_t except_head_key; typedef struct { void **__dynamic_handler_chain; void *top_elt[2]; } except_struct; static void ***dynamic_allocator_handler_fn() { except_struct *dh = (except_struct *)pthread_getspecific(except_head_key); if(dh == NULL) { dh = (except_struct *)malloc( sizeof(except_struct) ); memset(dh, '\0', sizeof(except_struct)); dh->__dynamic_handler_chain= dh->top_elt; pthread_setspecific(except_head_key, (void *)dh); } return &dh->__dynamic_handler_chain; } #endif /* GCC_2_8_MADE_THREAD_AWARE */ /* * Threaded process initialization */ void _thread_init(void) { int fd; int flags; int i; size_t len; int mib[2]; struct clockinfo clockinfo; struct sigaction act; - struct itimerval itimer; + struct sigaltstack alt; /* Check if this function has already been called: */ if (_thread_initial) /* Only initialise the threaded application once. */ return; /* * Check for the special case of this process running as * or in place of init as pid = 1: */ if (getpid() == 1) { /* * Setup a new session for this process which is * assumed to be running as root. */ if (setsid() == -1) PANIC("Can't set session ID"); if (revoke(_PATH_CONSOLE) != 0) PANIC("Can't revoke console"); if ((fd = _thread_sys_open(_PATH_CONSOLE, O_RDWR)) < 0) PANIC("Can't open console"); if (setlogin("root") == -1) PANIC("Can't set login to root"); if (_thread_sys_ioctl(fd,TIOCSCTTY, (char *) NULL) == -1) PANIC("Can't set controlling terminal"); if (_thread_sys_dup2(fd,0) == -1 || _thread_sys_dup2(fd,1) == -1 || _thread_sys_dup2(fd,2) == -1) PANIC("Can't dup2"); } /* Get the standard I/O flags before messing with them : */ for (i = 0; i < 3; i++) if (((_pthread_stdio_flags[i] = _thread_sys_fcntl(i,F_GETFL, NULL)) == -1) && (errno != EBADF)) PANIC("Cannot get stdio flags"); /* * Create a pipe that is written to by the signal handler to prevent - * signals being missed in calls to _select: + * signals being missed in calls to _select: */ if (_thread_sys_pipe(_thread_kern_pipe) != 0) { /* Cannot create pipe, so abort: */ PANIC("Cannot create kernel pipe"); } /* Get the flags for the read pipe: */ else if ((flags = _thread_sys_fcntl(_thread_kern_pipe[0], F_GETFL, NULL)) == -1) { /* Abort this application: */ PANIC("Cannot get kernel read pipe flags"); } /* Make the read pipe non-blocking: */ else if (_thread_sys_fcntl(_thread_kern_pipe[0], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ PANIC("Cannot make kernel read pipe non-blocking"); } /* Get the flags for the write pipe: */ else if ((flags = _thread_sys_fcntl(_thread_kern_pipe[1], F_GETFL, NULL)) == -1) { /* Abort this application: */ PANIC("Cannot get kernel write pipe flags"); } /* Make the write pipe non-blocking: */ else if (_thread_sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ PANIC("Cannot get kernel write pipe flags"); } /* Allocate and initialize the ready queue: */ else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) != 0) { /* Abort this application: */ PANIC("Cannot allocate priority ready queue."); } /* Allocate memory for the thread structure of the initial thread: */ else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) { /* * Insufficient memory to initialise this application, so - * abort: + * abort: */ PANIC("Cannot allocate memory for initial thread"); } /* Allocate memory for the scheduler stack: */ - else if ((_thread_kern_sched_stack = malloc(PAGE_SIZE * 10)) == NULL) + else if ((_thread_kern_sched_stack = malloc(SCHED_STACK_SIZE)) == NULL) PANIC("Failed to allocate stack for scheduler"); else { /* Zero the global kernel thread structure: */ memset(&_thread_kern_thread, 0, sizeof(struct pthread)); _thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE; memset(_thread_initial, 0, sizeof(struct pthread)); /* Initialize the waiting and work queues: */ TAILQ_INIT(&_waitingq); TAILQ_INIT(&_workq); /* Initialize the scheduling switch hook routine: */ _sched_switch_hook = NULL; /* Give this thread default attributes: */ memcpy((void *) &_thread_initial->attr, &pthread_attr_default, sizeof(struct pthread_attr)); /* Initialize the thread stack cache: */ SLIST_INIT(&_stackq); /* * Create a red zone below the main stack. All other stacks are * constrained to a maximum size by the paramters passed to * mmap(), but this stack is only limited by resource limits, so * this stack needs an explicitly mapped red zone to protect the * thread stack that is just beyond. */ if (mmap((void *) USRSTACK - PTHREAD_STACK_INITIAL - PTHREAD_STACK_GUARD, PTHREAD_STACK_GUARD, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* Set the main thread stack pointer. */ _thread_initial->stack = (void *) USRSTACK - PTHREAD_STACK_INITIAL; /* Set the stack attributes: */ _thread_initial->attr.stackaddr_attr = _thread_initial->stack; _thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL; /* Setup the context for the scheduler: */ _setjmp(_thread_kern_sched_jb); - SET_STACK_JB(_thread_kern_sched_jb, - _thread_kern_sched_stack + PAGE_SIZE*10 - sizeof(double)); + SET_STACK_JB(_thread_kern_sched_jb, _thread_kern_sched_stack + + SCHED_STACK_SIZE - sizeof(double)); SET_RETURN_ADDR_JB(_thread_kern_sched_jb, _thread_kern_scheduler); /* * Write a magic value to the thread structure * to help identify valid ones: */ _thread_initial->magic = PTHREAD_MAGIC; /* Set the initial cancel state */ _thread_initial->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; /* Default the priority of the initial thread: */ _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY; _thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY; _thread_initial->inherited_priority = 0; /* Initialise the state of the initial thread: */ _thread_initial->state = PS_RUNNING; /* Initialise the queue: */ TAILQ_INIT(&(_thread_initial->join_queue)); /* Initialize the owned mutex queue and count: */ TAILQ_INIT(&(_thread_initial->mutexq)); _thread_initial->priority_mutex_count = 0; /* Initialize the global scheduling time: */ _sched_ticks = 0; gettimeofday((struct timeval *) &_sched_tod, NULL); /* Initialize last active: */ _thread_initial->last_active = (long) _sched_ticks; - /* Initialize the initial signal frame: */ - _thread_initial->sigframes[0] = &_thread_initial->sigframe0; - _thread_initial->curframe = &_thread_initial->sigframe0; - _thread_initial->curframe->ctxtype = CTX_JB_NOSIG; - /* Set the base of the stack: */ - _thread_initial->curframe->stackp = (unsigned long) USRSTACK; + /* Initialize the initial context: */ + _thread_initial->curframe = NULL; + _thread_initial->ctxtype = CTX_JB_NOSIG; /* Initialise the rest of the fields: */ _thread_initial->poll_data.nfds = 0; _thread_initial->poll_data.fds = NULL; _thread_initial->sig_defer_count = 0; _thread_initial->yield_on_sig_undefer = 0; _thread_initial->specific_data = NULL; _thread_initial->cleanup = NULL; _thread_initial->flags = 0; _thread_initial->error = 0; TAILQ_INIT(&_thread_list); TAILQ_INSERT_HEAD(&_thread_list, _thread_initial, tle); _thread_run = _thread_initial; /* Initialise the global signal action structure: */ sigfillset(&act.sa_mask); act.sa_handler = (void (*) ()) _thread_sig_handler; - act.sa_flags = SA_SIGINFO; + act.sa_flags = SA_SIGINFO | SA_ONSTACK; /* Clear pending signals for the process: */ sigemptyset(&_process_sigpending); /* Clear the signal queue: */ memset(_thread_sigq, 0, sizeof(_thread_sigq)); + /* Create and install an alternate signal stack: */ + alt.ss_sp = malloc(SIGSTKSZ); /* recommended stack size */ + alt.ss_size = SIGSTKSZ; + alt.ss_flags = 0; + if (_thread_sys_sigaltstack(&alt, NULL) != 0) + PANIC("Unable to install alternate signal stack"); + /* Enter a loop to get the existing signal status: */ for (i = 1; i < NSIG; i++) { /* Check for signals which cannot be trapped: */ if (i == SIGKILL || i == SIGSTOP) { } /* Get the signal handler details: */ else if (_thread_sys_sigaction(i, NULL, &_thread_sigact[i - 1]) != 0) { /* * Abort this process if signal - * initialisation fails: + * initialisation fails: */ PANIC("Cannot read signal handler info"); } /* Initialize the SIG_DFL dummy handler count. */ _thread_dfl_count[i] = 0; } /* * Install the signal handler for the most important * signals that the user-thread kernel needs. Actually * SIGINFO isn't really needed, but it is nice to have. */ if (_thread_sys_sigaction(_SCHED_SIGNAL, &act, NULL) != 0 || _thread_sys_sigaction(SIGINFO, &act, NULL) != 0 || _thread_sys_sigaction(SIGCHLD, &act, NULL) != 0) { /* - * Abort this process if signal initialisation fails: + * Abort this process if signal initialisation fails: */ PANIC("Cannot initialise signal handler"); } _thread_sigact[_SCHED_SIGNAL - 1].sa_flags = SA_SIGINFO; _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO; _thread_sigact[SIGCHLD - 1].sa_flags = SA_SIGINFO; /* Get the process signal mask: */ _thread_sys_sigprocmask(SIG_SETMASK, NULL, &_process_sigmask); /* Get the kernel clockrate: */ mib[0] = CTL_KERN; mib[1] = KERN_CLOCKRATE; len = sizeof (struct clockinfo); if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0) _clock_res_usec = clockinfo.tick; /* Get the table size: */ if ((_thread_dtablesize = getdtablesize()) < 0) { /* * Cannot get the system defined table size, so abort - * this process. + * this process. */ PANIC("Cannot get dtablesize"); } /* Allocate memory for the file descriptor table: */ if ((_thread_fd_table = (struct fd_table_entry **) malloc(sizeof(struct fd_table_entry *) * _thread_dtablesize)) == NULL) { /* Avoid accesses to file descriptor table on exit: */ _thread_dtablesize = 0; /* * Cannot allocate memory for the file descriptor - * table, so abort this process. + * table, so abort this process. */ PANIC("Cannot allocate memory for file descriptor table"); } /* Allocate memory for the pollfd table: */ if ((_thread_pfd_table = (struct pollfd *) malloc(sizeof(struct pollfd) * _thread_dtablesize)) == NULL) { /* * Cannot allocate memory for the file descriptor - * table, so abort this process. + * table, so abort this process. */ PANIC("Cannot allocate memory for pollfd table"); } else { /* * Enter a loop to initialise the file descriptor - * table: + * table: */ for (i = 0; i < _thread_dtablesize; i++) { /* Initialise the file descriptor table: */ _thread_fd_table[i] = NULL; } /* Initialize stdio file descriptor table entries: */ for (i = 0; i < 3; i++) { if ((_thread_fd_table_init(i) != 0) && (errno != EBADF)) PANIC("Cannot initialize stdio file " "descriptor table entry"); } - - /* Install the scheduling timer: */ - itimer.it_interval.tv_sec = 0; - itimer.it_interval.tv_usec = _clock_res_usec; - itimer.it_value = itimer.it_interval; - if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0) - PANIC("Cannot set interval timer"); - } } #ifdef GCC_2_8_MADE_THREAD_AWARE /* Create the thread-specific data for the exception linked list. */ if(pthread_key_create(&except_head_key, NULL) != 0) PANIC("Failed to create thread specific execption head"); /* Setup the gcc exception handler per thread. */ __set_dynamic_handler_allocator( dynamic_allocator_handler_fn ); #endif /* GCC_2_8_MADE_THREAD_AWARE */ /* Initialise the garbage collector mutex and condition variable. */ if (pthread_mutex_init(&_gc_mutex,NULL) != 0 || pthread_cond_init(&_gc_cond,NULL) != 0) PANIC("Failed to initialise garbage collector mutex or condvar"); } /* - * Special start up code for NetBSD/Alpha + * Special start up code for NetBSD/Alpha */ #if defined(__NetBSD__) && defined(__alpha__) -int +int main(int argc, char *argv[], char *env); int _thread_main(int argc, char *argv[], char *env) { _thread_init(); return (main(argc, argv, env)); } #endif #else /* * A stub for non-threaded programs. */ void _thread_init(void) { } #endif Index: head/lib/libc_r/uthread/uthread_jmp.c =================================================================== --- head/lib/libc_r/uthread/uthread_jmp.c (revision 68515) +++ head/lib/libc_r/uthread/uthread_jmp.c (revision 68516) @@ -1,202 +1,111 @@ /* * Copyright (C) 2000 Jason Evans . * All rights reserved. * Copyright (C) 2000 Daniel M. Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice(s), this list of conditions and the following disclaimer as * the first lines of this file unmodified other than the possible * addition of one or more copyright notices. * 2. Redistributions in binary form must reproduce the above copyright * notice(s), this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #ifdef _THREAD_SAFE #include #include #include "pthread_private.h" +/* Prototypes: */ +static inline int check_stack(pthread_t thread, void *stackp); + void siglongjmp(sigjmp_buf env, int savemask) { - void *jmp_stackp; - void *stack_begin, *stack_end; - int frame, dst_frame; - - if ((frame = _thread_run->sigframe_count) == 0) - __siglongjmp(env, savemask); - - /* Get the stack pointer from the jump buffer. */ - jmp_stackp = (void *) GET_STACK_SJB(env); - - /* Get the bounds of the current threads stack. */ - PTHREAD_ASSERT(_thread_run->stack != NULL, - "Thread stack pointer is null"); - stack_begin = _thread_run->stack; - stack_end = stack_begin + _thread_run->attr.stacksize_attr; - - /* - * Make sure we aren't jumping to a different stack. Make sure - * jmp_stackp is between stack_begin and stack end, to correctly detect - * this condition regardless of whether the stack grows up or down. - */ - if (((jmp_stackp < stack_begin) && (jmp_stackp < stack_end)) || - ((jmp_stackp > stack_begin) && (jmp_stackp > stack_end))) + if (check_stack(_thread_run, (void *) GET_STACK_SJB(env))) PANIC("siglongjmp()ing between thread contexts is undefined by " "POSIX 1003.1"); - if ((dst_frame = _thread_sigframe_find(_thread_run, jmp_stackp)) < 0) - /* - * The stack pointer was verified above, so this - * shouldn't happen. Let's be anal anyways. - */ - PANIC("Error locating signal frame"); - else if (dst_frame == frame) { - /* - * The stack pointer is somewhere within the current - * frame. Jump to the users context. - */ - __siglongjmp(env, savemask); - } /* - * Copy the users context to the return context of the - * destination frame. + * The stack pointer is somewhere within the threads stack. + * Jump to the users context. */ - memcpy(&_thread_run->sigframes[dst_frame]->ctx.sigjb, env, sizeof(*env)); - _thread_run->sigframes[dst_frame]->ctxtype = CTX_SJB; - _thread_run->sigframes[dst_frame]->longjmp_val = savemask; - _thread_run->curframe->dst_frame = dst_frame; - ___longjmp(*_thread_run->curframe->sig_jb, 1); + __siglongjmp(env, savemask); } void longjmp(jmp_buf env, int val) { - void *jmp_stackp; - void *stack_begin, *stack_end; - int frame, dst_frame; - - if ((frame = _thread_run->sigframe_count) == 0) - __longjmp(env, val); - - /* Get the stack pointer from the jump buffer. */ - jmp_stackp = (void *) GET_STACK_JB(env); - - /* Get the bounds of the current threads stack. */ - PTHREAD_ASSERT(_thread_run->stack != NULL, - "Thread stack pointer is null"); - stack_begin = _thread_run->stack; - stack_end = stack_begin + _thread_run->attr.stacksize_attr; - - /* - * Make sure we aren't jumping to a different stack. Make sure - * jmp_stackp is between stack_begin and stack end, to correctly detect - * this condition regardless of whether the stack grows up or down. - */ - if (((jmp_stackp < stack_begin) && (jmp_stackp < stack_end)) || - ((jmp_stackp > stack_begin) && (jmp_stackp > stack_end))) + if (check_stack(_thread_run, (void *) GET_STACK_JB(env))) PANIC("longjmp()ing between thread contexts is undefined by " "POSIX 1003.1"); - if ((dst_frame = _thread_sigframe_find(_thread_run, jmp_stackp)) < 0) - /* - * The stack pointer was verified above, so this - * shouldn't happen. Let's be anal anyways. - */ - PANIC("Error locating signal frame"); - else if (dst_frame == frame) { - /* - * The stack pointer is somewhere within the current - * frame. Jump to the users context. - */ - __longjmp(env, val); - } - /* - * Copy the users context to the return context of the - * destination frame. + * The stack pointer is somewhere within the threads stack. + * Jump to the users context. */ - memcpy(&_thread_run->sigframes[dst_frame]->ctx.jb, env, sizeof(*env)); - _thread_run->sigframes[dst_frame]->ctxtype = CTX_JB; - _thread_run->sigframes[dst_frame]->longjmp_val = val; - _thread_run->curframe->dst_frame = dst_frame; - ___longjmp(*_thread_run->curframe->sig_jb, 1); + __longjmp(env, val); } void _longjmp(jmp_buf env, int val) { - void *jmp_stackp; - void *stack_begin, *stack_end; - int frame, dst_frame; + if (check_stack(_thread_run, (void *) GET_STACK_JB(env))) + PANIC("_longjmp()ing between thread contexts is undefined by " + "POSIX 1003.1"); - if ((frame = _thread_run->sigframe_count) == 0) - ___longjmp(env, val); + /* + * The stack pointer is somewhere within the threads stack. + * Jump to the users context. + */ + ___longjmp(env, val); +} - /* Get the stack pointer from the jump buffer. */ - jmp_stackp = (void *) GET_STACK_JB(env); +/* Returns 0 if stack check is OK, non-zero otherwise. */ +static inline int +check_stack(pthread_t thread, void *stackp) +{ + void *stack_begin, *stack_end; /* Get the bounds of the current threads stack. */ - PTHREAD_ASSERT(_thread_run->stack != NULL, + PTHREAD_ASSERT(thread->stack != NULL, "Thread stack pointer is null"); - stack_begin = _thread_run->stack; - stack_end = stack_begin + _thread_run->attr.stacksize_attr; + stack_begin = thread->stack; + stack_end = stack_begin + thread->attr.stacksize_attr; /* * Make sure we aren't jumping to a different stack. Make sure * jmp_stackp is between stack_begin and stack end, to correctly detect * this condition regardless of whether the stack grows up or down. */ - if (((jmp_stackp < stack_begin) && (jmp_stackp < stack_end)) || - ((jmp_stackp > stack_begin) && (jmp_stackp > stack_end))) - PANIC("_longjmp()ing between thread contexts is undefined by " - "POSIX 1003.1"); - - if ((dst_frame = _thread_sigframe_find(_thread_run, jmp_stackp)) < 0) - /* - * The stack pointer was verified above, so this - * shouldn't happen. Let's be anal anyways. - */ - PANIC("Error locating signal frame"); - else if (dst_frame == frame) { - /* - * The stack pointer is somewhere within the current - * frame. Jump to the users context. - */ - ___longjmp(env, val); - } - /* - * Copy the users context to the return context of the - * destination frame. - */ - memcpy(&_thread_run->sigframes[dst_frame]->ctx.jb, env, sizeof(*env)); - _thread_run->sigframes[dst_frame]->ctxtype = CTX_JB_NOSIG; - _thread_run->sigframes[dst_frame]->longjmp_val = val; - _thread_run->curframe->dst_frame = dst_frame; - ___longjmp(*_thread_run->curframe->sig_jb, 1); + if (((stackp < stack_begin) && (stackp < stack_end)) || + ((stackp > stack_begin) && (stackp > stack_end))) + return (1); + else + return (0); } #endif Index: head/lib/libc_r/uthread/uthread_join.c =================================================================== --- head/lib/libc_r/uthread/uthread_join.c (revision 68515) +++ head/lib/libc_r/uthread/uthread_join.c (revision 68516) @@ -1,138 +1,166 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" int pthread_join(pthread_t pthread, void **thread_return) { int ret = 0; _thread_enter_cancellation_point(); /* Check if the caller has specified an invalid thread: */ if (pthread == NULL || pthread->magic != PTHREAD_MAGIC) { /* Invalid thread: */ _thread_leave_cancellation_point(); return(EINVAL); } /* Check if the caller has specified itself: */ if (pthread == _thread_run) { /* Avoid a deadlock condition: */ _thread_leave_cancellation_point(); return(EDEADLK); } /* * Find the thread in the list of active threads or in the * list of dead threads: */ if ((_find_thread(pthread) != 0) && (_find_dead_thread(pthread) != 0)) /* Return an error: */ ret = ESRCH; /* Check if this thread has been detached: */ else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) /* Return an error: */ ret = ESRCH; /* Check if the thread is not dead: */ else if (pthread->state != PS_DEAD) { PTHREAD_ASSERT_NOT_IN_SYNCQ(_thread_run); - /* Clear the interrupted flag: */ - _thread_run->interrupted = 0; - /* - * Protect against being context switched out while - * adding this thread to the join queue. + * Enter a loop in case this thread is woken prematurely + * in order to invoke a signal handler: */ - _thread_kern_sig_defer(); + for (;;) { + /* Clear the interrupted flag: */ + _thread_run->interrupted = 0; - /* Add the running thread to the join queue: */ - TAILQ_INSERT_TAIL(&(pthread->join_queue), _thread_run, sqe); - _thread_run->flags |= PTHREAD_FLAGS_IN_JOINQ; - _thread_run->data.thread = pthread; + /* + * Protect against being context switched out while + * adding this thread to the join queue. + */ + _thread_kern_sig_defer(); - /* Schedule the next thread: */ - _thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__); + /* Add the running thread to the join queue: */ + TAILQ_INSERT_TAIL(&(pthread->join_queue), + _thread_run, sqe); + _thread_run->flags |= PTHREAD_FLAGS_IN_JOINQ; + _thread_run->data.thread = pthread; - if (_thread_run->interrupted != 0) { - TAILQ_REMOVE(&(pthread->join_queue), _thread_run, sqe); - _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ; - } - _thread_run->data.thread = NULL; + /* Schedule the next thread: */ + _thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__); - _thread_kern_sig_undefer(); + if ((_thread_run->flags & PTHREAD_FLAGS_IN_JOINQ) != 0) { + TAILQ_REMOVE(&(pthread->join_queue), + _thread_run, sqe); + _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ; + } + _thread_run->data.thread = NULL; - if (_thread_run->interrupted != 0 && - _thread_run->continuation != NULL) - _thread_run->continuation(_thread_run); + _thread_kern_sig_undefer(); - /* Check if the thread is not detached: */ - if ((pthread->attr.flags & PTHREAD_DETACHED) == 0) { - /* Check if the return value is required: */ - if (thread_return) - /* Return the thread's return value: */ - *thread_return = pthread->ret; - } - else - /* Return an error: */ - ret = ESRCH; + if (_thread_run->interrupted != 0) { + if (_thread_run->continuation != NULL) + _thread_run->continuation(_thread_run); + /* + * This thread was interrupted, probably to + * invoke a signal handler. Make sure the + * target thread is still joinable. + */ + if (((_find_thread(pthread) != 0) && + (_find_dead_thread(pthread) != 0)) || + ((pthread->attr.flags & + PTHREAD_DETACHED) != 0)) { + /* Return an error: */ + ret = ESRCH; + /* We're done; break out of the loop. */ + break; + } + else if (pthread->state == PS_DEAD) { + /* We're done; break out of the loop. */ + break; + } + } else { + /* + * The thread return value and error are set + * by the thread we're joining to when it + * exits or detaches: + */ + ret = _thread_run->error; + if ((ret == 0) && (thread_return != NULL)) + *thread_return = _thread_run->ret; + + /* We're done; break out of the loop. */ + break; + } + } /* Check if the return value is required: */ } else if (thread_return != NULL) /* Return the thread's return value: */ *thread_return = pthread->ret; _thread_leave_cancellation_point(); /* Return the completion status: */ return (ret); } void _join_backout(pthread_t pthread) { _thread_kern_sig_defer(); - if (pthread->state == PS_JOIN) { + if ((pthread->flags & PTHREAD_FLAGS_IN_JOINQ) != 0) { TAILQ_REMOVE(&pthread->data.thread->join_queue, pthread, sqe); _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ; } _thread_kern_sig_undefer(); } #endif Index: head/lib/libc_r/uthread/uthread_kern.c =================================================================== --- head/lib/libc_r/uthread/uthread_kern.c (revision 68515) +++ head/lib/libc_r/uthread/uthread_kern.c (revision 68516) @@ -1,1108 +1,1111 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" /* #define DEBUG_THREAD_KERN */ #ifdef DEBUG_THREAD_KERN #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif /* Static function prototype definitions: */ -static void +static void thread_kern_poll(int wait_reqd); static void dequeue_signals(void); static inline void thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in); /* Static variables: */ static int last_tick = 0; /* * This is called when a signal handler finishes and wants to * return to a previous frame. */ void -_thread_kern_sched_frame(int frame) +_thread_kern_sched_frame(struct pthread_signal_frame *psf) { /* * Flag the pthread kernel as executing scheduler code * to avoid a signal from interrupting this execution and * corrupting the (soon-to-be) current frame. */ _thread_kern_in_sched = 1; - /* Return to the specified frame: */ - _thread_run->curframe = _thread_run->sigframes[frame]; - _thread_run->sigframe_count = frame; + /* Restore the signal frame: */ + _thread_sigframe_restore(_thread_run, psf); - if (_thread_run->sigframe_count == 0) - /* Restore the threads priority: */ - _thread_run->active_priority &= ~PTHREAD_SIGNAL_PRIORITY; - /* Switch to the thread scheduler: */ ___longjmp(_thread_kern_sched_jb, 1); } void _thread_kern_sched(ucontext_t *scp) { /* * Flag the pthread kernel as executing scheduler code * to avoid a scheduler signal from interrupting this * execution and calling the scheduler again. */ _thread_kern_in_sched = 1; /* Check if this function was called from the signal handler: */ if (scp != NULL) { /* * The signal handler should have saved the state of * the current thread. Restore the process signal * mask. */ if (_thread_sys_sigprocmask(SIG_SETMASK, &_process_sigmask, NULL) != 0) PANIC("Unable to restore process mask after signal"); /* * We're running on the signal stack; just call the * kernel scheduler directly. */ DBG_MSG("Entering scheduler due to signal\n"); _thread_kern_scheduler(); } else { /* Save the state of the current thread: */ - if (_setjmp(_thread_run->curframe->ctx.jb) == 0) { + if (_setjmp(_thread_run->ctx.jb) == 0) { /* Flag the jump buffer was the last state saved: */ - _thread_run->curframe->ctxtype = CTX_JB_NOSIG; - _thread_run->curframe->longjmp_val = 1; + _thread_run->ctxtype = CTX_JB_NOSIG; + _thread_run->longjmp_val = 1; } else { DBG_MSG("Returned from ___longjmp, thread %p\n", _thread_run); /* * This point is reached when a longjmp() is called - * to restore the state of a thread. + * to restore the state of a thread. * * This is the normal way out of the scheduler. */ _thread_kern_in_sched = 0; if (_thread_run->sig_defer_count == 0) { if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) && ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) - /* + /* * Cancellations override signals. * * Stick a cancellation point at the * start of each async-cancellable * thread's resumption. * * We allow threads woken at cancel * points to do their own checks. */ pthread_testcancel(); } if (_sched_switch_hook != NULL) { /* Run the installed switch hook: */ thread_run_switch_hook(_last_user_thread, _thread_run); } return; } /* Switch to the thread scheduler: */ ___longjmp(_thread_kern_sched_jb, 1); } } void _thread_kern_sched_sig(void) { _thread_run->check_pending = 1; _thread_kern_sched(NULL); } void _thread_kern_scheduler(void) { - struct pthread_signal_frame *psf; struct timespec ts; struct timeval tv; pthread_t pthread, pthread_h; unsigned int current_tick; int add_to_prioq; /* If the currently running thread is a user thread, save it: */ if ((_thread_run->flags & PTHREAD_FLAGS_PRIVATE) == 0) _last_user_thread = _thread_run; /* Are there pending signals for this thread? */ if (_thread_run->check_pending != 0) { _thread_run->check_pending = 0; _thread_sig_check_pending(_thread_run); } /* * Enter a scheduling loop that finds the next thread that is * ready to run. This loop completes when there are no more threads * in the global list or when a thread has its state restored by * either a sigreturn (if the state was saved as a sigcontext) or a - * longjmp (if the state was saved by a setjmp). + * longjmp (if the state was saved by a setjmp). */ while (!(TAILQ_EMPTY(&_thread_list))) { /* Get the current time of day: */ GET_CURRENT_TOD(tv); TIMEVAL_TO_TIMESPEC(&tv, &ts); current_tick = _sched_ticks; /* * Protect the scheduling queues from access by the signal * handler. */ _queue_signals = 1; add_to_prioq = 0; if (_thread_run != &_thread_kern_thread) { /* * This thread no longer needs to yield the CPU. */ _thread_run->yield_on_sig_undefer = 0; if (_thread_run->state != PS_RUNNING) { /* * Save the current time as the time that the - * thread became inactive: + * thread became inactive: */ _thread_run->last_inactive = (long)current_tick; if (_thread_run->last_inactive < _thread_run->last_active) { /* Account for a rollover: */ _thread_run->last_inactive =+ UINT_MAX + 1; } } /* * Place the currently running thread into the * appropriate queue(s). */ switch (_thread_run->state) { case PS_DEAD: case PS_STATE_MAX: /* to silence -Wall */ case PS_SUSPENDED: /* * Dead and suspended threads are not placed * in any queue: */ break; case PS_RUNNING: /* * Runnable threads can't be placed in the * priority queue until after waiting threads * are polled (to preserve round-robin * scheduling). */ add_to_prioq = 1; break; /* * States which do not depend on file descriptor I/O - * operations or timeouts: + * operations or timeouts: */ case PS_DEADLOCK: case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: case PS_JOIN: case PS_MUTEX_WAIT: case PS_SIGSUSPEND: case PS_SIGTHREAD: case PS_SIGWAIT: case PS_WAIT_WAIT: /* No timeouts for these states: */ _thread_run->wakeup_time.tv_sec = -1; _thread_run->wakeup_time.tv_nsec = -1; /* Restart the time slice: */ _thread_run->slice_usec = -1; /* Insert into the waiting queue: */ PTHREAD_WAITQ_INSERT(_thread_run); break; /* States which can timeout: */ case PS_COND_WAIT: case PS_SLEEP_WAIT: /* Restart the time slice: */ _thread_run->slice_usec = -1; /* Insert into the waiting queue: */ PTHREAD_WAITQ_INSERT(_thread_run); break; /* States that require periodic work: */ case PS_SPINBLOCK: /* No timeouts for this state: */ _thread_run->wakeup_time.tv_sec = -1; _thread_run->wakeup_time.tv_nsec = -1; /* Increment spinblock count: */ _spinblock_count++; /* FALLTHROUGH */ case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* Restart the time slice: */ _thread_run->slice_usec = -1; /* Insert into the waiting queue: */ PTHREAD_WAITQ_INSERT(_thread_run); /* Insert into the work queue: */ PTHREAD_WORKQ_INSERT(_thread_run); break; } } /* + * Avoid polling file descriptors if there are none + * waiting: + */ + if (TAILQ_EMPTY(&_workq) == 0) { + } + /* * Poll file descriptors only if a new scheduling signal * has occurred or if we have no more runnable threads. */ - if (((current_tick = _sched_ticks) != last_tick) || + else if (((current_tick = _sched_ticks) != last_tick) || ((_thread_run->state != PS_RUNNING) && (PTHREAD_PRIOQ_FIRST() == NULL))) { /* Unprotect the scheduling queues: */ _queue_signals = 0; /* * Poll file descriptors to update the state of threads - * waiting on file I/O where data may be available: + * waiting on file I/O where data may be available: */ thread_kern_poll(0); /* Protect the scheduling queues: */ _queue_signals = 1; } last_tick = current_tick; /* * Wake up threads that have timedout. This has to be * done after polling in case a thread does a poll or * select with zero time. */ PTHREAD_WAITQ_SETACTIVE(); while (((pthread = TAILQ_FIRST(&_waitingq)) != NULL) && (pthread->wakeup_time.tv_sec != -1) && (((pthread->wakeup_time.tv_sec == 0) && (pthread->wakeup_time.tv_nsec == 0)) || (pthread->wakeup_time.tv_sec < ts.tv_sec) || ((pthread->wakeup_time.tv_sec == ts.tv_sec) && (pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) { switch (pthread->state) { case PS_POLL_WAIT: case PS_SELECT_WAIT: /* Return zero file descriptors ready: */ pthread->data.poll_data->nfds = 0; /* fall through */ default: /* * Remove this thread from the waiting queue * (and work queue if necessary) and place it * in the ready queue. */ PTHREAD_WAITQ_CLEARACTIVE(); if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ) PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread, PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); break; } /* * Flag the timeout in the thread structure: */ pthread->timeout = 1; } PTHREAD_WAITQ_CLEARACTIVE(); /* * Check to see if the current thread needs to be added * to the priority queue: */ if (add_to_prioq != 0) { /* * Save the current time as the time that the - * thread became inactive: + * thread became inactive: */ current_tick = _sched_ticks; _thread_run->last_inactive = (long)current_tick; if (_thread_run->last_inactive < _thread_run->last_active) { /* Account for a rollover: */ _thread_run->last_inactive =+ UINT_MAX + 1; } if ((_thread_run->slice_usec != -1) && (_thread_run->attr.sched_policy != SCHED_FIFO)) { /* * Accumulate the number of microseconds for * which the current thread has run: */ _thread_run->slice_usec += (_thread_run->last_inactive - _thread_run->last_active) * (long)_clock_res_usec; /* Check for time quantum exceeded: */ if (_thread_run->slice_usec > TIMESLICE_USEC) _thread_run->slice_usec = -1; } if (_thread_run->slice_usec == -1) { /* * The thread exceeded its time * quantum or it yielded the CPU; * place it at the tail of the * queue for its priority. */ PTHREAD_PRIOQ_INSERT_TAIL(_thread_run); } else { /* * The thread hasn't exceeded its * interval. Place it at the head * of the queue for its priority. */ PTHREAD_PRIOQ_INSERT_HEAD(_thread_run); } } /* * Get the highest priority thread in the ready queue. */ pthread_h = PTHREAD_PRIOQ_FIRST(); /* Check if there are no threads ready to run: */ if (pthread_h == NULL) { /* * Lock the pthread kernel by changing the pointer to * the running thread to point to the global kernel - * thread structure: + * thread structure: */ _thread_run = &_thread_kern_thread; DBG_MSG("No runnable threads, using kernel thread %p\n", _thread_run); /* Unprotect the scheduling queues: */ _queue_signals = 0; /* * There are no threads ready to run, so wait until - * something happens that changes this condition: + * something happens that changes this condition: */ thread_kern_poll(1); /* * This process' usage will likely be very small * while waiting in a poll. Since the scheduling * clock is based on the profiling timer, it is * unlikely that the profiling timer will fire * and update the time of day. To account for this, * get the time of day after polling with a timeout. */ gettimeofday((struct timeval *) &_sched_tod, NULL); /* Check once more for a runnable thread: */ _queue_signals = 1; pthread_h = PTHREAD_PRIOQ_FIRST(); _queue_signals = 0; } if (pthread_h != NULL) { /* Remove the thread from the ready queue: */ PTHREAD_PRIOQ_REMOVE(pthread_h); /* Unprotect the scheduling queues: */ _queue_signals = 0; /* * Check for signals queued while the scheduling * queues were protected: */ while (_sigq_check_reqd != 0) { /* Clear before handling queued signals: */ _sigq_check_reqd = 0; /* Protect the scheduling queues again: */ _queue_signals = 1; dequeue_signals(); /* * Check for a higher priority thread that * became runnable due to signal handling. */ if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) && (pthread->active_priority > pthread_h->active_priority)) { /* Remove the thread from the ready queue: */ PTHREAD_PRIOQ_REMOVE(pthread); /* * Insert the lower priority thread * at the head of its priority list: */ PTHREAD_PRIOQ_INSERT_HEAD(pthread_h); /* There's a new thread in town: */ pthread_h = pthread; } /* Unprotect the scheduling queues: */ _queue_signals = 0; } /* Make the selected thread the current thread: */ _thread_run = pthread_h; /* * Save the current time as the time that the thread - * became active: + * became active: */ current_tick = _sched_ticks; _thread_run->last_active = (long) current_tick; /* * Check if this thread is running for the first time * or running again after using its full time slice - * allocation: + * allocation: */ if (_thread_run->slice_usec == -1) { /* Reset the accumulated time slice period: */ _thread_run->slice_usec = 0; } /* * If we had a context switch, run any * installed switch hooks. */ if ((_sched_switch_hook != NULL) && (_last_user_thread != _thread_run)) { thread_run_switch_hook(_last_user_thread, _thread_run); } /* * Continue the thread at its current frame: */ - psf = _thread_run->curframe; - switch(psf->ctxtype) { + switch(_thread_run->ctxtype) { case CTX_JB_NOSIG: - ___longjmp(psf->ctx.jb, psf->longjmp_val); + ___longjmp(_thread_run->ctx.jb, + _thread_run->longjmp_val); break; case CTX_JB: - __longjmp(psf->ctx.jb, psf->longjmp_val); + __longjmp(_thread_run->ctx.jb, + _thread_run->longjmp_val); break; case CTX_SJB: - __siglongjmp(psf->ctx.sigjb, psf->longjmp_val); + __siglongjmp(_thread_run->ctx.sigjb, + _thread_run->longjmp_val); break; case CTX_UC: /* XXX - Restore FP regsisters? */ - FP_RESTORE_UC(&psf->ctx.uc); + FP_RESTORE_UC(&_thread_run->ctx.uc); /* * Do a sigreturn to restart the thread that - * was interrupted by a signal: + * was interrupted by a signal: */ _thread_kern_in_sched = 0; #if NOT_YET - _setcontext(&psf->ctx.uc); + _setcontext(&_thread_run->ctx.uc); #else /* * Ensure the process signal mask is set * correctly: */ - psf->ctx.uc.uc_sigmask = _process_sigmask; - _thread_sys_sigreturn(&psf->ctx.uc); + _thread_run->ctx.uc.uc_sigmask = + _process_sigmask; + _thread_sys_sigreturn(&_thread_run->ctx.uc); #endif break; } /* This point should not be reached. */ PANIC("Thread has returned from sigreturn or longjmp"); } } /* There are no more threads, so exit this process: */ exit(0); } void _thread_kern_sched_state(enum pthread_state state, char *fname, int lineno) { /* * Flag the pthread kernel as executing scheduler code * to avoid a scheduler signal from interrupting this * execution and calling the scheduler again. */ _thread_kern_in_sched = 1; /* * Prevent the signal handler from fiddling with this thread * before its state is set and is placed into the proper queue. */ _queue_signals = 1; /* Change the state of the current thread: */ _thread_run->state = state; _thread_run->fname = fname; _thread_run->lineno = lineno; /* Schedule the next thread that is ready: */ _thread_kern_sched(NULL); } void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno) { /* * Flag the pthread kernel as executing scheduler code * to avoid a scheduler signal from interrupting this * execution and calling the scheduler again. */ _thread_kern_in_sched = 1; /* * Prevent the signal handler from fiddling with this thread * before its state is set and it is placed into the proper * queue(s). */ _queue_signals = 1; /* Change the state of the current thread: */ _thread_run->state = state; _thread_run->fname = fname; _thread_run->lineno = lineno; _SPINUNLOCK(lock); /* Schedule the next thread that is ready: */ _thread_kern_sched(NULL); } static void thread_kern_poll(int wait_reqd) { int count = 0; int i, found; int kern_pipe_added = 0; int nfds = 0; int timeout_ms = 0; struct pthread *pthread; struct timespec ts; struct timeval tv; /* Check if the caller wants to wait: */ if (wait_reqd == 0) { timeout_ms = 0; } else { /* Get the current time of day: */ GET_CURRENT_TOD(tv); TIMEVAL_TO_TIMESPEC(&tv, &ts); _queue_signals = 1; pthread = TAILQ_FIRST(&_waitingq); _queue_signals = 0; if ((pthread == NULL) || (pthread->wakeup_time.tv_sec == -1)) { /* * Either there are no threads in the waiting queue, * or there are no threads that can timeout. */ timeout_ms = INFTIM; } else { /* * Calculate the time left for the next thread to * timeout: */ timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) * 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec) / 1000000); /* * Don't allow negative timeouts: */ if (timeout_ms < 0) timeout_ms = 0; } } /* Protect the scheduling queues: */ _queue_signals = 1; /* * Check to see if the signal queue needs to be walked to look * for threads awoken by a signal while in the scheduler. */ if (_sigq_check_reqd != 0) { /* Reset flag before handling queued signals: */ _sigq_check_reqd = 0; dequeue_signals(); } /* * Check for a thread that became runnable due to a signal: */ if (PTHREAD_PRIOQ_FIRST() != NULL) { /* * Since there is at least one runnable thread, * disable the wait. */ timeout_ms = 0; } /* * Form the poll table: */ nfds = 0; if (timeout_ms != 0) { /* Add the kernel pipe to the poll table: */ _thread_pfd_table[nfds].fd = _thread_kern_pipe[0]; _thread_pfd_table[nfds].events = POLLRDNORM; _thread_pfd_table[nfds].revents = 0; nfds++; kern_pipe_added = 1; } PTHREAD_WAITQ_SETACTIVE(); TAILQ_FOREACH(pthread, &_workq, qe) { switch (pthread->state) { case PS_SPINBLOCK: /* * If the lock is available, let the thread run. */ if (pthread->data.spinlock->access_lock == 0) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); /* One less thread in a spinblock state: */ _spinblock_count--; /* * Since there is at least one runnable * thread, disable the wait. */ timeout_ms = 0; } break; /* File descriptor read wait: */ case PS_FDR_WAIT: /* Limit number of polled files to table size: */ if (nfds < _thread_dtablesize) { _thread_pfd_table[nfds].events = POLLRDNORM; _thread_pfd_table[nfds].fd = pthread->data.fd.fd; nfds++; } break; /* File descriptor write wait: */ case PS_FDW_WAIT: /* Limit number of polled files to table size: */ if (nfds < _thread_dtablesize) { _thread_pfd_table[nfds].events = POLLWRNORM; _thread_pfd_table[nfds].fd = pthread->data.fd.fd; nfds++; } break; /* File descriptor poll or select wait: */ case PS_POLL_WAIT: case PS_SELECT_WAIT: /* Limit number of polled files to table size: */ if (pthread->data.poll_data->nfds + nfds < _thread_dtablesize) { for (i = 0; i < pthread->data.poll_data->nfds; i++) { _thread_pfd_table[nfds + i].fd = pthread->data.poll_data->fds[i].fd; _thread_pfd_table[nfds + i].events = pthread->data.poll_data->fds[i].events; } nfds += pthread->data.poll_data->nfds; } break; /* Other states do not depend on file I/O. */ default: break; } } PTHREAD_WAITQ_CLEARACTIVE(); /* * Wait for a file descriptor to be ready for read, write, or - * an exception, or a timeout to occur: + * an exception, or a timeout to occur: */ count = _thread_sys_poll(_thread_pfd_table, nfds, timeout_ms); if (kern_pipe_added != 0) /* * Remove the pthread kernel pipe file descriptor - * from the pollfd table: + * from the pollfd table: */ nfds = 1; else nfds = 0; /* * Check if it is possible that there are bytes in the kernel * read pipe waiting to be read: */ if (count < 0 || ((kern_pipe_added != 0) && (_thread_pfd_table[0].revents & POLLRDNORM))) { /* * If the kernel read pipe was included in the - * count: + * count: */ if (count > 0) { /* Decrement the count of file descriptors: */ count--; } if (_sigq_check_reqd != 0) { /* Reset flag before handling signals: */ _sigq_check_reqd = 0; dequeue_signals(); } } /* * Check if any file descriptors are ready: */ if (count > 0) { /* * Enter a loop to look for threads waiting on file * descriptors that are flagged as available by the - * _poll syscall: + * _poll syscall: */ PTHREAD_WAITQ_SETACTIVE(); TAILQ_FOREACH(pthread, &_workq, qe) { switch (pthread->state) { case PS_SPINBLOCK: /* * If the lock is available, let the thread run. */ if (pthread->data.spinlock->access_lock == 0) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); /* * One less thread in a spinblock state: */ _spinblock_count--; } break; /* File descriptor read wait: */ case PS_FDR_WAIT: if ((nfds < _thread_dtablesize) && (_thread_pfd_table[nfds].revents & POLLRDNORM)) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); } nfds++; break; /* File descriptor write wait: */ case PS_FDW_WAIT: if ((nfds < _thread_dtablesize) && (_thread_pfd_table[nfds].revents & POLLWRNORM)) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); } nfds++; break; /* File descriptor poll or select wait: */ case PS_POLL_WAIT: case PS_SELECT_WAIT: if (pthread->data.poll_data->nfds + nfds < _thread_dtablesize) { /* * Enter a loop looking for I/O * readiness: */ found = 0; for (i = 0; i < pthread->data.poll_data->nfds; i++) { if (_thread_pfd_table[nfds + i].revents != 0) { pthread->data.poll_data->fds[i].revents = _thread_pfd_table[nfds + i].revents; found++; } } /* Increment before destroying: */ nfds += pthread->data.poll_data->nfds; if (found != 0) { pthread->data.poll_data->nfds = found; PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); } } else nfds += pthread->data.poll_data->nfds; break; /* Other states do not depend on file I/O. */ default: break; } } PTHREAD_WAITQ_CLEARACTIVE(); } else if (_spinblock_count != 0) { /* * Enter a loop to look for threads waiting on a spinlock * that is now available. */ PTHREAD_WAITQ_SETACTIVE(); TAILQ_FOREACH(pthread, &_workq, qe) { if (pthread->state == PS_SPINBLOCK) { /* * If the lock is available, let the thread run. */ if (pthread->data.spinlock->access_lock == 0) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); /* * One less thread in a spinblock state: */ _spinblock_count--; } } } PTHREAD_WAITQ_CLEARACTIVE(); } /* Unprotect the scheduling queues: */ _queue_signals = 0; while (_sigq_check_reqd != 0) { /* Handle queued signals: */ _sigq_check_reqd = 0; /* Protect the scheduling queues: */ _queue_signals = 1; dequeue_signals(); /* Unprotect the scheduling queues: */ _queue_signals = 0; } } void _thread_kern_set_timeout(const struct timespec * timeout) { struct timespec current_time; struct timeval tv; /* Reset the timeout flag for the running thread: */ _thread_run->timeout = 0; /* Check if the thread is to wait forever: */ if (timeout == NULL) { /* * Set the wakeup time to something that can be recognised as - * different to an actual time of day: + * different to an actual time of day: */ _thread_run->wakeup_time.tv_sec = -1; _thread_run->wakeup_time.tv_nsec = -1; } /* Check if no waiting is required: */ else if (timeout->tv_sec == 0 && timeout->tv_nsec == 0) { /* Set the wake up time to 'immediately': */ _thread_run->wakeup_time.tv_sec = 0; _thread_run->wakeup_time.tv_nsec = 0; } else { /* Get the current time: */ GET_CURRENT_TOD(tv); TIMEVAL_TO_TIMESPEC(&tv, ¤t_time); /* Calculate the time for the current thread to wake up: */ _thread_run->wakeup_time.tv_sec = current_time.tv_sec + timeout->tv_sec; _thread_run->wakeup_time.tv_nsec = current_time.tv_nsec + timeout->tv_nsec; /* Check if the nanosecond field needs to wrap: */ if (_thread_run->wakeup_time.tv_nsec >= 1000000000) { /* Wrap the nanosecond field: */ _thread_run->wakeup_time.tv_sec += 1; _thread_run->wakeup_time.tv_nsec -= 1000000000; } } } void _thread_kern_sig_defer(void) { /* Allow signal deferral to be recursive. */ _thread_run->sig_defer_count++; } void _thread_kern_sig_undefer(void) { /* * Perform checks to yield only if we are about to undefer * signals. */ if (_thread_run->sig_defer_count > 1) { /* Decrement the signal deferral count. */ _thread_run->sig_defer_count--; } else if (_thread_run->sig_defer_count == 1) { /* Reenable signals: */ _thread_run->sig_defer_count = 0; /* * Check if there are queued signals: */ if (_sigq_check_reqd != 0) _thread_kern_sched(NULL); - /* + /* * Check for asynchronous cancellation before delivering any * pending signals: */ if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) && ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) pthread_testcancel(); /* * If there are pending signals or this thread has * to yield the CPU, call the kernel scheduler: * * XXX - Come back and revisit the pending signal problem */ if ((_thread_run->yield_on_sig_undefer != 0) || SIGNOTEMPTY(_thread_run->sigpend)) { _thread_run->yield_on_sig_undefer = 0; _thread_kern_sched(NULL); } } } static void dequeue_signals(void) { char bufr[128]; int num; /* - * Enter a loop to clear the pthread kernel pipe: + * Enter a loop to clear the pthread kernel pipe: */ while (((num = _thread_sys_read(_thread_kern_pipe[0], bufr, sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) { } if ((num < 0) && (errno != EAGAIN)) { /* * The only error we should expect is if there is * no data to read. */ PANIC("Unable to read from thread kernel pipe"); } /* Handle any pending signals: */ _thread_sig_handle_pending(); } static inline void thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in) { pthread_t tid_out = thread_out; pthread_t tid_in = thread_in; if ((tid_out != NULL) && (tid_out->flags & PTHREAD_FLAGS_PRIVATE) != 0) tid_out = NULL; if ((tid_in != NULL) && (tid_in->flags & PTHREAD_FLAGS_PRIVATE) != 0) tid_in = NULL; if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) { /* Run the scheduler switch hook: */ _sched_switch_hook(tid_out, tid_in); } } #endif Index: head/lib/libc_r/uthread/uthread_mutex.c =================================================================== --- head/lib/libc_r/uthread/uthread_mutex.c (revision 68515) +++ head/lib/libc_r/uthread/uthread_mutex.c (revision 68516) @@ -1,1467 +1,1480 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" #if defined(_PTHREADS_INVARIANTS) #define _MUTEX_INIT_LINK(m) do { \ (m)->m_qe.tqe_prev = NULL; \ (m)->m_qe.tqe_next = NULL; \ } while (0) #define _MUTEX_ASSERT_IS_OWNED(m) do { \ if ((m)->m_qe.tqe_prev == NULL) \ PANIC("mutex is not on list"); \ } while (0) #define _MUTEX_ASSERT_NOT_OWNED(m) do { \ if (((m)->m_qe.tqe_prev != NULL) || \ ((m)->m_qe.tqe_next != NULL)) \ PANIC("mutex is on list"); \ } while (0) #else #define _MUTEX_INIT_LINK(m) #define _MUTEX_ASSERT_IS_OWNED(m) #define _MUTEX_ASSERT_NOT_OWNED(m) #endif /* * Prototypes */ static inline int mutex_self_trylock(pthread_mutex_t); static inline int mutex_self_lock(pthread_mutex_t); static inline int mutex_unlock_common(pthread_mutex_t *, int); static void mutex_priority_adjust(pthread_mutex_t); static void mutex_rescan_owned (pthread_t, pthread_mutex_t); static inline pthread_t mutex_queue_deq(pthread_mutex_t); static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; /* Reinitialize a mutex to defaults. */ int _mutex_reinit(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; else if (*mutex == NULL) ret = pthread_mutex_init(mutex, NULL); else { /* * Initialize the mutex structure: */ (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT; (*mutex)->m_protocol = PTHREAD_PRIO_NONE; TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_owner = NULL; (*mutex)->m_data.m_count = 0; (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE; (*mutex)->m_flags |= MUTEX_FLAGS_INITED; (*mutex)->m_refcount = 0; (*mutex)->m_prio = 0; (*mutex)->m_saved_prio = 0; _MUTEX_INIT_LINK(*mutex); memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock)); } return (ret); } int pthread_mutex_init(pthread_mutex_t * mutex, const pthread_mutexattr_t * mutex_attr) { enum pthread_mutextype type; int protocol; int ceiling; pthread_mutex_t pmutex; int ret = 0; if (mutex == NULL) ret = EINVAL; /* Check if default mutex attributes: */ else if (mutex_attr == NULL || *mutex_attr == NULL) { /* Default to a (error checking) POSIX mutex: */ type = PTHREAD_MUTEX_ERRORCHECK; protocol = PTHREAD_PRIO_NONE; ceiling = PTHREAD_MAX_PRIORITY; } /* Check mutex type: */ else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) /* Return an invalid argument error: */ ret = EINVAL; /* Check mutex protocol: */ else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) /* Return an invalid argument error: */ ret = EINVAL; else { /* Use the requested mutex type and protocol: */ type = (*mutex_attr)->m_type; protocol = (*mutex_attr)->m_protocol; ceiling = (*mutex_attr)->m_ceiling; } /* Check no errors so far: */ if (ret == 0) { if ((pmutex = (pthread_mutex_t) malloc(sizeof(struct pthread_mutex))) == NULL) ret = ENOMEM; else { /* Reset the mutex flags: */ pmutex->m_flags = 0; /* Process according to mutex type: */ switch (type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: /* Nothing to do here. */ break; /* Single UNIX Spec 2 recursive mutex: */ case PTHREAD_MUTEX_RECURSIVE: /* Reset the mutex count: */ pmutex->m_data.m_count = 0; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } if (ret == 0) { /* Initialise the rest of the mutex: */ TAILQ_INIT(&pmutex->m_queue); pmutex->m_flags |= MUTEX_FLAGS_INITED; pmutex->m_owner = NULL; pmutex->m_type = type; pmutex->m_protocol = protocol; pmutex->m_refcount = 0; if (protocol == PTHREAD_PRIO_PROTECT) pmutex->m_prio = ceiling; else pmutex->m_prio = 0; pmutex->m_saved_prio = 0; _MUTEX_INIT_LINK(pmutex); memset(&pmutex->lock, 0, sizeof(pmutex->lock)); *mutex = pmutex; } else { free(pmutex); *mutex = NULL; } } } /* Return the completion status: */ return(ret); } int pthread_mutex_destroy(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL || *mutex == NULL) ret = EINVAL; else { /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * Check to see if this mutex is in use: */ if (((*mutex)->m_owner != NULL) || (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || ((*mutex)->m_refcount != 0)) { ret = EBUSY; /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); } else { /* * Free the memory allocated for the mutex * structure: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); free(*mutex); /* * Leave the caller's pointer NULL now that * the mutex has been destroyed: */ *mutex = NULL; } } /* Return the completion status: */ return (ret); } static int init_static(pthread_mutex_t *mutex) { int ret; _SPINLOCK(&static_init_lock); if (*mutex == NULL) ret = pthread_mutex_init(mutex, NULL); else ret = 0; _SPINUNLOCK(&static_init_lock); return(ret); } int pthread_mutex_trylock(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization: */ else if (*mutex != NULL || (ret = init_static(mutex)) == 0) { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); _MUTEX_INIT_LINK(*mutex); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; } /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = _thread_run; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = _thread_run; /* Track number of priority mutexes owned: */ _thread_run->priority_mutex_count++; /* * The mutex takes on the attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = _thread_run->active_priority; (*mutex)->m_saved_prio = _thread_run->inherited_priority; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (_thread_run->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = _thread_run; /* Track number of priority mutexes owned: */ _thread_run->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority. */ _thread_run->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = _thread_run->inherited_priority; _thread_run->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (ret); } int pthread_mutex_lock(pthread_mutex_t * mutex) { int ret = 0; if (_thread_initial == NULL) _thread_init(); if (mutex == NULL) - ret = EINVAL; + return (EINVAL); /* * If the mutex is statically initialized, perform the dynamic * initialization: */ - else if (*mutex != NULL || (ret = init_static(mutex)) == 0) { + if ((*mutex == NULL) && + ((ret = init_static(mutex)) != 0)) + return (ret); + + /* Reset the interrupted flag: */ + _thread_run->interrupted = 0; + + /* + * Enter a loop waiting to become the mutex owner. We need a + * loop in case the waiting thread is interrupted by a signal + * to execute a signal handler. It is not (currently) possible + * to remain in the waiting queue while running a handler. + * Instead, the thread is interrupted and backed out of the + * waiting queue prior to executing the signal handler. + */ + while (((*mutex)->m_owner != _thread_run) && (ret == 0) && + (_thread_run->interrupted == 0)) { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; _MUTEX_INIT_LINK(*mutex); } - /* Reset the interrupted flag: */ - _thread_run->interrupted = 0; - /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = _thread_run; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, _thread_run); /* * Keep a pointer to the mutex this thread * is waiting on: */ _thread_run->data.mutex = *mutex; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = _thread_run; /* Track number of priority mutexes owned: */ _thread_run->priority_mutex_count++; /* * The mutex takes on attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = _thread_run->active_priority; (*mutex)->m_saved_prio = _thread_run->inherited_priority; _thread_run->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, _thread_run); /* * Keep a pointer to the mutex this thread * is waiting on: */ _thread_run->data.mutex = *mutex; if (_thread_run->active_priority > (*mutex)->m_prio) /* Adjust priorities: */ mutex_priority_adjust(*mutex); /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (_thread_run->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* * Lock the mutex for the running * thread: */ (*mutex)->m_owner = _thread_run; /* Track number of priority mutexes owned: */ _thread_run->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority: */ _thread_run->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = _thread_run->inherited_priority; _thread_run->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, _thread_run); /* * Keep a pointer to the mutex this thread * is waiting on: */ _thread_run->data.mutex = *mutex; /* Clear any previous error: */ _thread_run->error = 0; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); /* * The threads priority may have changed while * waiting for the mutex causing a ceiling * violation. */ ret = _thread_run->error; _thread_run->error = 0; } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* * Check to see if this thread was interrupted and * is still in the mutex queue of waiting threads: */ if (_thread_run->interrupted != 0) mutex_queue_remove(*mutex, _thread_run); /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); - - if (_thread_run->interrupted != 0 && - _thread_run->continuation != NULL) - _thread_run->continuation((void *) _thread_run); } + if (_thread_run->interrupted != 0 && + _thread_run->continuation != NULL) + _thread_run->continuation((void *) _thread_run); + /* Return the completion status: */ return (ret); } int pthread_mutex_unlock(pthread_mutex_t * mutex) { return (mutex_unlock_common(mutex, /* add reference */ 0)); } int _mutex_cv_unlock(pthread_mutex_t * mutex) { return (mutex_unlock_common(mutex, /* add reference */ 1)); } int _mutex_cv_lock(pthread_mutex_t * mutex) { int ret; if ((ret = pthread_mutex_lock(mutex)) == 0) (*mutex)->m_refcount--; return (ret); } static inline int mutex_self_trylock(pthread_mutex_t mutex) { int ret = 0; switch (mutex->m_type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: /* * POSIX specifies that mutexes should return EDEADLK if a * recursive lock is detected. */ ret = EBUSY; break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ mutex->m_data.m_count++; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return(ret); } static inline int mutex_self_lock(pthread_mutex_t mutex) { int ret = 0; switch (mutex->m_type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: /* * POSIX specifies that mutexes should return EDEADLK if a * recursive lock is detected. */ ret = EDEADLK; break; case PTHREAD_MUTEX_NORMAL: /* * What SS2 define as a 'normal' mutex. Intentionally * deadlock on attempts to get a lock you already own. */ _thread_kern_sched_state_unlock(PS_DEADLOCK, &mutex->lock, __FILE__, __LINE__); break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ mutex->m_data.m_count++; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return(ret); } static inline int mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) { int ret = 0; if (mutex == NULL || *mutex == NULL) { ret = EINVAL; } else { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != _thread_run) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Get the next thread from the queue of * threads waiting on the mutex: */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) { /* * Unless the new owner of the mutex is * currently suspended, allow the owner * to run. If the thread is suspended, * make a note that the thread isn't in * a wait queue any more. */ if (((*mutex)->m_owner->state != PS_SUSPENDED)) { PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); } else { (*mutex)->m_owner->suspended = SUSP_NOWAIT; } /* * Add the mutex to the threads list of * owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; } } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != _thread_run) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* * Restore the threads inherited priority and * recompute the active priority (being careful * not to override changes in the threads base * priority subsequent to locking the mutex). */ _thread_run->inherited_priority = (*mutex)->m_saved_prio; _thread_run->active_priority = MAX(_thread_run->inherited_priority, _thread_run->base_priority); /* * This thread now owns one less priority mutex. */ _thread_run->priority_mutex_count--; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Get the next thread from the queue of threads * waiting on the mutex: */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) == NULL) /* This mutex has no priority. */ (*mutex)->m_prio = 0; else { /* * Track number of priority mutexes owned: */ (*mutex)->m_owner->priority_mutex_count++; /* * Add the mutex to the threads list * of owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; /* * Set the priority of the mutex. Since * our waiting threads are in descending * priority order, the priority of the * mutex becomes the active priority of * the thread we just dequeued. */ (*mutex)->m_prio = (*mutex)->m_owner->active_priority; /* * Save the owning threads inherited * priority: */ (*mutex)->m_saved_prio = (*mutex)->m_owner->inherited_priority; /* * The owning threads inherited priority * now becomes his active priority (the * priority of the mutex). */ (*mutex)->m_owner->inherited_priority = (*mutex)->m_prio; /* * Unless the new owner of the mutex is * currently suspended, allow the owner * to run. If the thread is suspended, * make a note that the thread isn't in * a wait queue any more. */ if (((*mutex)->m_owner->state != PS_SUSPENDED)) { PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); } else { (*mutex)->m_owner->suspended = SUSP_NOWAIT; } } } break; /* POSIX priority ceiling mutex: */ case PTHREAD_PRIO_PROTECT: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != _thread_run) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* * Restore the threads inherited priority and * recompute the active priority (being careful * not to override changes in the threads base * priority subsequent to locking the mutex). */ _thread_run->inherited_priority = (*mutex)->m_saved_prio; _thread_run->active_priority = MAX(_thread_run->inherited_priority, _thread_run->base_priority); /* * This thread now owns one less priority mutex. */ _thread_run->priority_mutex_count--; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Enter a loop to find a waiting thread whose * active priority will not cause a ceiling * violation: */ while ((((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) && ((*mutex)->m_owner->active_priority > (*mutex)->m_prio)) { /* * Either the mutex ceiling priority * been lowered and/or this threads * priority has been raised subsequent * to this thread being queued on the * waiting list. */ (*mutex)->m_owner->error = EINVAL; PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); /* * The thread is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; } /* Check for a new owner: */ if ((*mutex)->m_owner != NULL) { /* * Track number of priority mutexes owned: */ (*mutex)->m_owner->priority_mutex_count++; /* * Add the mutex to the threads list * of owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; /* * Save the owning threads inherited * priority: */ (*mutex)->m_saved_prio = (*mutex)->m_owner->inherited_priority; /* * The owning thread inherits the * ceiling priority of the mutex and * executes at that priority: */ (*mutex)->m_owner->inherited_priority = (*mutex)->m_prio; (*mutex)->m_owner->active_priority = (*mutex)->m_prio; /* * Unless the new owner of the mutex is * currently suspended, allow the owner * to run. If the thread is suspended, * make a note that the thread isn't in * a wait queue any more. */ if (((*mutex)->m_owner->state != PS_SUSPENDED)) { PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); } else { (*mutex)->m_owner->suspended = SUSP_NOWAIT; } } } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } if ((ret == 0) && (add_reference != 0)) { /* Increment the reference count: */ (*mutex)->m_refcount++; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (ret); } /* * This function is called when a change in base priority occurs for * a thread that is holding or waiting for a priority protection or * inheritence mutex. A change in a threads base priority can effect * changes to active priorities of other threads and to the ordering * of mutex locking by waiting threads. * * This must be called while thread scheduling is deferred. */ void _mutex_notify_priochange(pthread_t pthread) { /* Adjust the priorites of any owned priority mutexes: */ if (pthread->priority_mutex_count > 0) { /* * Rescan the mutexes owned by this thread and correct * their priorities to account for this threads change * in priority. This has the side effect of changing * the threads active priority. */ mutex_rescan_owned(pthread, /* rescan all owned */ NULL); } /* * If this thread is waiting on a priority inheritence mutex, * check for priority adjustments. A change in priority can * also effect a ceiling violation(*) for a thread waiting on * a priority protection mutex; we don't perform the check here * as it is done in pthread_mutex_unlock. * * (*) It should be noted that a priority change to a thread * _after_ taking and owning a priority ceiling mutex * does not affect ownership of that mutex; the ceiling * priority is only checked before mutex ownership occurs. */ if (pthread->state == PS_MUTEX_WAIT) { /* Lock the mutex structure: */ _SPINLOCK(&pthread->data.mutex->lock); /* * Check to make sure this thread is still in the same state * (the spinlock above can yield the CPU to another thread): */ if (pthread->state == PS_MUTEX_WAIT) { /* * Remove and reinsert this thread into the list of * waiting threads to preserve decreasing priority * order. */ mutex_queue_remove(pthread->data.mutex, pthread); mutex_queue_enq(pthread->data.mutex, pthread); if (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT) { /* Adjust priorities: */ mutex_priority_adjust(pthread->data.mutex); } } /* Unlock the mutex structure: */ _SPINUNLOCK(&pthread->data.mutex->lock); } } /* * Called when a new thread is added to the mutex waiting queue or * when a threads priority changes that is already in the mutex * waiting queue. */ static void mutex_priority_adjust(pthread_mutex_t mutex) { pthread_t pthread_next, pthread = mutex->m_owner; int temp_prio; pthread_mutex_t m = mutex; /* * Calculate the mutex priority as the maximum of the highest * active priority of any waiting threads and the owning threads * active priority(*). * * (*) Because the owning threads current active priority may * reflect priority inherited from this mutex (and the mutex * priority may have changed) we must recalculate the active * priority based on the threads saved inherited priority * and its base priority. */ pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ temp_prio = MAX(pthread_next->active_priority, MAX(m->m_saved_prio, pthread->base_priority)); /* See if this mutex really needs adjusting: */ if (temp_prio == m->m_prio) /* No need to propagate the priority: */ return; /* Set new priority of the mutex: */ m->m_prio = temp_prio; while (m != NULL) { /* * Save the threads priority before rescanning the * owned mutexes: */ temp_prio = pthread->active_priority; /* * Fix the priorities for all the mutexes this thread has * locked since taking this mutex. This also has a * potential side-effect of changing the threads priority. */ mutex_rescan_owned(pthread, m); /* * If the thread is currently waiting on a mutex, check * to see if the threads new priority has affected the * priority of the mutex. */ if ((temp_prio != pthread->active_priority) && (pthread->state == PS_MUTEX_WAIT) && (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) { /* Grab the mutex this thread is waiting on: */ m = pthread->data.mutex; /* * The priority for this thread has changed. Remove * and reinsert this thread into the list of waiting * threads to preserve decreasing priority order. */ mutex_queue_remove(m, pthread); mutex_queue_enq(m, pthread); /* Grab the waiting thread with highest priority: */ pthread_next = TAILQ_FIRST(&m->m_queue); /* * Calculate the mutex priority as the maximum of the * highest active priority of any waiting threads and * the owning threads active priority. */ temp_prio = MAX(pthread_next->active_priority, MAX(m->m_saved_prio, m->m_owner->base_priority)); if (temp_prio != m->m_prio) { /* * The priority needs to be propagated to the * mutex this thread is waiting on and up to * the owner of that mutex. */ m->m_prio = temp_prio; pthread = m->m_owner; } else /* We're done: */ m = NULL; } else /* We're done: */ m = NULL; } } static void mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex) { int active_prio, inherited_prio; pthread_mutex_t m; pthread_t pthread_next; /* * Start walking the mutexes the thread has taken since * taking this mutex. */ if (mutex == NULL) { /* * A null mutex means start at the beginning of the owned * mutex list. */ m = TAILQ_FIRST(&pthread->mutexq); /* There is no inherited priority yet. */ inherited_prio = 0; } else { /* * The caller wants to start after a specific mutex. It * is assumed that this mutex is a priority inheritence * mutex and that its priority has been correctly * calculated. */ m = TAILQ_NEXT(mutex, m_qe); /* Start inheriting priority from the specified mutex. */ inherited_prio = mutex->m_prio; } active_prio = MAX(inherited_prio, pthread->base_priority); while (m != NULL) { /* * We only want to deal with priority inheritence * mutexes. This might be optimized by only placing * priority inheritence mutexes into the owned mutex * list, but it may prove to be useful having all * owned mutexes in this list. Consider a thread * exiting while holding mutexes... */ if (m->m_protocol == PTHREAD_PRIO_INHERIT) { /* * Fix the owners saved (inherited) priority to * reflect the priority of the previous mutex. */ m->m_saved_prio = inherited_prio; if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) /* Recalculate the priority of the mutex: */ m->m_prio = MAX(active_prio, pthread_next->active_priority); else m->m_prio = active_prio; /* Recalculate new inherited and active priorities: */ inherited_prio = m->m_prio; active_prio = MAX(m->m_prio, pthread->base_priority); } /* Advance to the next mutex owned by this thread: */ m = TAILQ_NEXT(m, m_qe); } /* * Fix the threads inherited priority and recalculate its * active priority. */ pthread->inherited_priority = inherited_prio; active_prio = MAX(inherited_prio, pthread->base_priority); if (active_prio != pthread->active_priority) { /* * If this thread is in the priority queue, it must be * removed and reinserted for its new priority. */ if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) { /* * Remove the thread from the priority queue * before changing its priority: */ PTHREAD_PRIOQ_REMOVE(pthread); /* * POSIX states that if the priority is being * lowered, the thread must be inserted at the * head of the queue for its priority if it owns * any priority protection or inheritence mutexes. */ if ((active_prio < pthread->active_priority) && (pthread->priority_mutex_count > 0)) { /* Set the new active priority. */ pthread->active_priority = active_prio; PTHREAD_PRIOQ_INSERT_HEAD(pthread); } else { /* Set the new active priority. */ pthread->active_priority = active_prio; PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } else { /* Set the new active priority. */ pthread->active_priority = active_prio; } } } void _mutex_unlock_private(pthread_t pthread) { struct pthread_mutex *m, *m_next; for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { m_next = TAILQ_NEXT(m, m_qe); if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) pthread_mutex_unlock(&m); } } void _mutex_lock_backout(pthread_t pthread) { struct pthread_mutex *mutex; /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); - if (pthread->state == PS_MUTEX_WAIT) { + if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { mutex = pthread->data.mutex; /* Lock the mutex structure: */ _SPINLOCK(&mutex->lock); mutex_queue_remove(mutex, pthread); /* This thread is no longer waiting for the mutex: */ - mutex->m_owner->data.mutex = NULL; + pthread->data.mutex = NULL; /* Unlock the mutex structure: */ _SPINUNLOCK(&mutex->lock); } /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* * Dequeue a waiting thread from the head of a mutex queue in descending * priority order. */ static inline pthread_t mutex_queue_deq(pthread_mutex_t mutex) { pthread_t pthread; while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; /* * Only exit the loop if the thread hasn't been * cancelled. */ if (pthread->interrupted == 0) break; } return(pthread); } /* * Remove a waiting thread from a mutex queue in descending priority order. */ static inline void mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) { if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; } } /* * Enqueue a waiting thread to a queue in descending priority order. */ static inline void mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) { pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); /* * For the common case of all threads having equal priority, * we perform a quick check against the priority of the thread * at the tail of the queue. */ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); else { tid = TAILQ_FIRST(&mutex->m_queue); while (pthread->active_priority <= tid->active_priority) tid = TAILQ_NEXT(tid, sqe); TAILQ_INSERT_BEFORE(tid, pthread, sqe); } pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; } #endif Index: head/lib/libc_r/uthread/uthread_sig.c =================================================================== --- head/lib/libc_r/uthread/uthread_sig.c (revision 68515) +++ head/lib/libc_r/uthread/uthread_sig.c (revision 68516) @@ -1,1267 +1,1116 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" /* Prototypes: */ static void thread_sig_add(pthread_t pthread, int sig, int has_args); static void thread_sig_check_state(pthread_t pthread, int sig); static pthread_t thread_sig_find(int sig); static void thread_sig_handle_special(int sig); static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp); -static void thread_sigframe_add(pthread_t thread, int sig); -static void thread_sigframe_leave(pthread_t thread, int frame); -static void thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf); +static void thread_sigframe_add(pthread_t thread, int sig, int has_args); static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf); /* #define DEBUG_SIGNAL */ #ifdef DEBUG_SIGNAL #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif #if defined(_PTHREADS_INVARIANTS) #define SIG_SET_ACTIVE() _sig_in_handler = 1 #define SIG_SET_INACTIVE() _sig_in_handler = 0 #else #define SIG_SET_ACTIVE() #define SIG_SET_INACTIVE() #endif void _thread_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) { - pthread_t pthread; - int current_frame; + pthread_t pthread, pthread_h; + void *stackp; + int in_sched = 0; char c; if (ucp == NULL) PANIC("Thread signal handler received null context"); DBG_MSG("Got signal %d, current thread %p\n", sig, _thread_run); + if (_thread_kern_in_sched != 0) + in_sched = 1; + else { + stackp = (void *)GET_STACK_UC(ucp); + if ((stackp >= _thread_kern_sched_stack) && + (stackp <= _thread_kern_sched_stack + SCHED_STACK_SIZE)) + in_sched = 1; + } /* Check if an interval timer signal: */ if (sig == _SCHED_SIGNAL) { /* Update the scheduling clock: */ gettimeofday((struct timeval *)&_sched_tod, NULL); _sched_ticks++; - if (_thread_kern_in_sched != 0) { + if (in_sched != 0) { /* * The scheduler is already running; ignore this * signal. */ } /* * Check if the scheduler interrupt has come when * the currently running thread has deferred thread * signals. */ else if (_thread_run->sig_defer_count > 0) _thread_run->yield_on_sig_undefer = 1; else { /* * Save the context of the currently running thread: */ thread_sig_savecontext(_thread_run, ucp); /* * Schedule the next thread. This function is not * expected to return because it will do a longjmp - * instead. + * instead. */ _thread_kern_sched(ucp); /* * This point should not be reached, so abort the - * process: + * process: */ PANIC("Returned to signal function from scheduler"); } } /* * Check if the kernel has been interrupted while the scheduler * is accessing the scheduling queues or if there is a currently * running thread that has deferred signals. */ - else if ((_thread_kern_in_sched != 0) || - (_thread_run->sig_defer_count > 0)) { + else if ((in_sched != 0) || (_thread_run->sig_defer_count > 0)) { /* Cast the signal number to a character variable: */ c = sig; /* * Write the signal number to the kernel pipe so that it will * be ready to read when this signal handler returns. */ if (_queue_signals != 0) { _thread_sys_write(_thread_kern_pipe[1], &c, 1); DBG_MSG("Got signal %d, queueing to kernel pipe\n", sig); } if (_thread_sigq[sig - 1].blocked == 0) { DBG_MSG("Got signal %d, adding to _thread_sigq\n", sig); /* * Do not block this signal; it will be blocked * when the pending signals are run down. */ /* _thread_sigq[sig - 1].blocked = 1; */ /* * Queue the signal, saving siginfo and sigcontext * (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); /* Indicate that there are queued signals: */ _thread_sigq[sig - 1].pending = 1; _sigq_check_reqd = 1; } /* These signals need special handling: */ else if (sig == SIGCHLD || sig == SIGTSTP || sig == SIGTTIN || sig == SIGTTOU) { _thread_sigq[sig - 1].pending = 1; _thread_sigq[sig - 1].signo = sig; _sigq_check_reqd = 1; } else DBG_MSG("Got signal %d, ignored.\n", sig); } /* * The signal handlers should have been installed so that they * cannot be interrupted by other signals. */ else if (_thread_sigq[sig - 1].blocked == 0) { - /* The signal is not blocked; handle the signal: */ - current_frame = _thread_run->sigframe_count; - /* + * The signal is not blocked; handle the signal. + * * Ignore subsequent occurrences of this signal * until the current signal is handled: */ _thread_sigq[sig - 1].blocked = 1; /* This signal will be handled; clear the pending flag: */ _thread_sigq[sig - 1].pending = 0; /* * Save siginfo and sigcontext (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); SIG_SET_ACTIVE(); /* Handle special signals: */ thread_sig_handle_special(sig); + pthread_h = NULL; if ((pthread = thread_sig_find(sig)) != NULL) { DBG_MSG("Got signal %d, adding frame to thread %p\n", sig, pthread); /* * A thread was found that can handle the signal. * Save the context of the currently running thread * so that we can switch to another thread without * losing track of where the current thread left off. * This also applies if the current thread is the * thread to be signaled. */ thread_sig_savecontext(_thread_run, ucp); /* Setup the target thread to receive the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); /* Take a peek at the next ready to run thread: */ - pthread = PTHREAD_PRIOQ_FIRST(); + pthread_h = PTHREAD_PRIOQ_FIRST(); DBG_MSG("Finished adding frame, head of prio list %p\n", - pthread); + pthread_h); } else DBG_MSG("No thread to handle signal %d\n", sig); SIG_SET_INACTIVE(); /* * Switch to a different context if the currently running * thread takes a signal, or if another thread takes a * signal and the currently running thread is not in a * signal handler. */ - if ((_thread_run->sigframe_count > current_frame) || - ((pthread != NULL) && - (pthread->active_priority > _thread_run->active_priority))) { + if ((pthread == _thread_run) || ((pthread_h != NULL) && + (pthread_h->active_priority > _thread_run->active_priority))) { /* Enter the kernel scheduler: */ - DBG_MSG("Entering scheduler from signal handler\n"); _thread_kern_sched(ucp); } } else { SIG_SET_ACTIVE(); thread_sig_handle_special(sig); SIG_SET_INACTIVE(); } } static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp) { - struct pthread_signal_frame *psf; + memcpy(&pthread->ctx.uc, ucp, sizeof(*ucp)); - psf = _thread_run->curframe; - - memcpy(&psf->ctx.uc, ucp, sizeof(*ucp)); - /* XXX - Save FP registers too? */ - FP_SAVE_UC(&psf->ctx.uc); + FP_SAVE_UC(&pthread->ctx.uc); /* Mark the context saved as a ucontext: */ - psf->ctxtype = CTX_UC; + pthread->ctxtype = CTX_UC; } /* * Find a thread that can handle the signal. */ pthread_t thread_sig_find(int sig) { int handler_installed; pthread_t pthread, pthread_next; pthread_t suspended_thread, signaled_thread; DBG_MSG("Looking for thread to handle signal %d\n", sig); /* Check if the signal requires a dump of thread information: */ - if (sig == SIGINFO) + if (sig == SIGINFO) { /* Dump thread information to file: */ _thread_dump_info(); + /* Unblock this signal to allow further dumps: */ + _thread_sigq[sig - 1].blocked = 0; + } /* Check if an interval timer signal: */ else if (sig == _SCHED_SIGNAL) { /* * This shouldn't ever occur (should this panic?). */ } else { /* * Enter a loop to look for threads that have the signal * unmasked. POSIX specifies that a thread in a sigwait * will get the signal over any other threads. Second * preference will be threads in in a sigsuspend. Third * preference will be the current thread. If none of the * above, then the signal is delivered to the first thread * that is found. Note that if a custom handler is not * installed, the signal only affects threads in sigwait. */ suspended_thread = NULL; if ((_thread_run != &_thread_kern_thread) && !sigismember(&_thread_run->sigmask, sig)) signaled_thread = _thread_run; else signaled_thread = NULL; if ((_thread_sigact[sig - 1].sa_handler == SIG_IGN) || (_thread_sigact[sig - 1].sa_handler == SIG_DFL)) handler_installed = 0; else handler_installed = 1; for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly destroying * the link entry. */ pthread_next = TAILQ_NEXT(pthread, pqe); if ((pthread->state == PS_SIGWAIT) && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* * A signal handler is not invoked for threads * in sigwait. Clear the blocked and pending * flags. - */ + */ _thread_sigq[sig - 1].blocked = 0; _thread_sigq[sig - 1].pending = 0; /* Return the signal number: */ pthread->signo = sig; /* * POSIX doesn't doesn't specify which thread * will get the signal if there are multiple * waiters, so we give it to the first thread * we find. * * Do not attempt to deliver this signal * to other threads and do not add the signal * to the process pending set. */ return (NULL); } else if ((handler_installed != 0) && !sigismember(&pthread->sigmask, sig)) { if (pthread->state == PS_SIGSUSPEND) { if (suspended_thread == NULL) suspended_thread = pthread; } else if (signaled_thread == NULL) signaled_thread = pthread; } } /* * Only perform wakeups and signal delivery if there is a * custom handler installed: */ if (handler_installed == 0) { /* * There is no handler installed. Unblock the * signal so that if a handler _is_ installed, any * subsequent signals can be handled. */ _thread_sigq[sig - 1].blocked = 0; } else { /* * If we didn't find a thread in the waiting queue, * check the all threads queue: */ if (suspended_thread == NULL && signaled_thread == NULL) { /* * Enter a loop to look for other threads - * capable of receiving the signal: + * capable of receiving the signal: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { if (!sigismember(&pthread->sigmask, sig)) { signaled_thread = pthread; break; } } } if (suspended_thread == NULL && signaled_thread == NULL) /* * Add it to the set of signals pending * on the process: */ sigaddset(&_process_sigpending, sig); else { /* * We only deliver the signal to one thread; * give preference to the suspended thread: */ if (suspended_thread != NULL) pthread = suspended_thread; else pthread = signaled_thread; return (pthread); } } } /* Returns nothing. */ return (NULL); } void _thread_sig_check_pending(pthread_t pthread) { sigset_t sigset; int i; /* * Check if there are pending signals for the running * thread or process that aren't blocked: */ sigset = pthread->sigpend; SIGSETOR(sigset, _process_sigpending); SIGSETNAND(sigset, pthread->sigmask); if (SIGNOTEMPTY(sigset)) { for (i = 1; i < NSIG; i++) { if (sigismember(&sigset, i) != 0) { if (sigismember(&pthread->sigpend, i) != 0) thread_sig_add(pthread, i, /*has_args*/ 0); else { thread_sig_add(pthread, i, /*has_args*/ 1); sigdelset(&_process_sigpending, i); } } } } } /* * This can only be called from the kernel scheduler. It assumes that * all thread contexts are saved and that a signal frame can safely be * added to any user thread. */ void _thread_sig_handle_pending(void) { pthread_t pthread; int i, sig; PTHREAD_ASSERT(_thread_kern_in_sched != 0, "_thread_sig_handle_pending called from outside kernel schedule"); /* * Check the array of pending signals: */ for (i = 0; i < NSIG; i++) { if (_thread_sigq[i].pending != 0) { /* This signal is no longer pending. */ _thread_sigq[i].pending = 0; sig = _thread_sigq[i].signo; /* Some signals need special handling: */ thread_sig_handle_special(sig); if (_thread_sigq[i].blocked == 0) { /* * Block future signals until this one * is handled: */ _thread_sigq[i].blocked = 1; if ((pthread = thread_sig_find(sig)) != NULL) { /* * Setup the target thread to receive * the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); } } } } } static void thread_sig_handle_special(int sig) { pthread_t pthread, pthread_next; int i; switch (sig) { case SIGCHLD: /* * Go through the file list and set all files * to non-blocking again in case the child * set some of them to block. Sigh. */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file is used: */ if (_thread_fd_table[i] != NULL) { /* * Set the file descriptor to non-blocking: */ _thread_sys_fcntl(i, F_SETFL, _thread_fd_table[i]->flags | O_NONBLOCK); } } /* * Enter a loop to wake up all threads waiting * for a process to complete: */ for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly * destroying the link entry: */ pthread_next = TAILQ_NEXT(pthread, pqe); /* * If this thread is waiting for a child * process to complete, wake it up: */ if (pthread->state == PS_WAIT_WAIT) { /* Make the thread runnable: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } } break; /* * POSIX says that pending SIGCONT signals are * discarded when one of these signals occurs. */ case SIGTSTP: case SIGTTIN: case SIGTTOU: /* * Enter a loop to discard pending SIGCONT * signals: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { sigdelset(&pthread->sigpend, SIGCONT); } break; default: break; } } /* * Perform thread specific actions in response to a signal. * This function is only called if there is a handler installed * for the signal, and if the target thread has the signal * unmasked. */ static void thread_sig_add(pthread_t pthread, int sig, int has_args) { - int restart, frame; - int block_signals = 0; + int restart; int suppress_handler = 0; restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: /* * You can't call a signal handler for threads in these * states. */ suppress_handler = 1; break; /* * States which do not need any cleanup handling when signals * occur: */ case PS_RUNNING: /* * Remove the thread from the queue before changing its * priority: */ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) PTHREAD_PRIOQ_REMOVE(pthread); break; case PS_SUSPENDED: break; case PS_SPINBLOCK: /* Remove the thread from the workq and waitq: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); /* Make the thread runnable: */ PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGWAIT: /* The signal handler is not called for threads in SIGWAIT. */ suppress_handler = 1; /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* Change the state of the thread to run: */ PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else { /* * Mark the thread as interrupted only if the * restart flag is not set on the signal action: */ if (restart == 0) pthread->interrupted = 1; PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); } break; /* * States which cannot be interrupted but still require the * signal handler to run: */ - case PS_COND_WAIT: case PS_JOIN: + /* Only set the interrupted flag for PS_JOIN: */ + pthread->interrupted = 1; + /* FALLTHROUGH */ + case PS_COND_WAIT: case PS_MUTEX_WAIT: /* * Remove the thread from the wait queue. It will * be added back to the wait queue once all signal * handlers have been invoked. */ PTHREAD_WAITQ_REMOVE(pthread); break; /* * States which are interruptible but may need to be removed * from queues before any signal handler is called. * * XXX - We may not need to handle this condition, but will * mark it as a potential problem. */ case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: if (restart == 0) pthread->interrupted = 1; /* * Remove the thread from the wait queue. Our * signal handler hook will remove this thread * from the fd or file queue before invoking * the actual handler. */ PTHREAD_WAITQ_REMOVE(pthread); - /* - * To ensure the thread is removed from the fd and file - * queues before any other signal interrupts it, set the - * signal mask to block all signals. As soon as the thread - * is removed from the queue the signal mask will be - * restored. - */ - block_signals = 1; break; /* * States which are interruptible: */ case PS_FDR_WAIT: case PS_FDW_WAIT: if (restart == 0) { /* * Flag the operation as interrupted and * set the state to running: */ pthread->interrupted = 1; PTHREAD_SET_STATE(pthread, PS_RUNNING); } PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); break; case PS_POLL_WAIT: case PS_SELECT_WAIT: case PS_SLEEP_WAIT: /* * Unmasked signals always cause poll, select, and sleep * to terminate early, regardless of SA_RESTART: */ pthread->interrupted = 1; /* Remove threads in poll and select from the workq: */ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGSUSPEND: PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; } if (suppress_handler == 0) { + /* Setup a signal frame and save the current threads state: */ + thread_sigframe_add(pthread, sig, has_args); + /* - * Save the current state of the thread and add a - * new signal frame. + * Signals are deferred until just before the threads + * signal handler is invoked: */ - frame = pthread->sigframe_count; - thread_sigframe_save(pthread, pthread->curframe); - thread_sigframe_add(pthread, sig); - pthread->sigframes[frame + 1]->sig_has_args = has_args; - SIGSETOR(pthread->sigmask, _thread_sigact[sig - 1].sa_mask); - if (block_signals != 0) { - /* Save the signal mask and block all signals: */ - pthread->sigframes[frame + 1]->saved_state.psd_sigmask = - pthread->sigmask; - sigfillset(&pthread->sigmask); - } - + pthread->sig_defer_count = 1; + /* Make sure the thread is runnable: */ if (pthread->state != PS_RUNNING) PTHREAD_SET_STATE(pthread, PS_RUNNING); /* * The thread should be removed from all scheduling * queues at this point. Raise the priority and place * the thread in the run queue. */ pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY; if (pthread != _thread_run) PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } static void thread_sig_check_state(pthread_t pthread, int sig) { /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: case PS_RUNNING: case PS_SUSPENDED: case PS_SPINBLOCK: case PS_COND_WAIT: case PS_JOIN: case PS_MUTEX_WAIT: break; case PS_SIGWAIT: /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* * Remove the thread from the wait queue and * make it runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } break; case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_SIGSUSPEND: case PS_SLEEP_WAIT: /* * Remove the thread from the wait queue and make it * runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; /* * These states are additionally in the work queue: */ case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_FILE_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* * Remove the thread from the wait and work queues, and * make it runnable: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; } } /* * Send a signal to a specific thread (ala pthread_kill): */ void _thread_sig_send(pthread_t pthread, int sig) { /* Check for signals whose actions are SIG_DFL: */ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) { /* * Check to see if a temporary signal handler is * installed for sigwaiters: */ if (_thread_dfl_count[sig] == 0) /* * Deliver the signal to the process if a handler * is not installed: */ kill(getpid(), sig); /* * Assuming we're still running after the above kill(), * make any necessary state changes to the thread: */ thread_sig_check_state(pthread, sig); } /* * Check that the signal is not being ignored: */ else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) { if (pthread->state == PS_SIGWAIT && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else if (pthread == _thread_run) { /* Add the signal to the pending set: */ sigaddset(&pthread->sigpend, sig); if (!sigismember(&pthread->sigmask, sig)) { /* * Call the kernel scheduler which will safely * install a signal frame for this thread: */ _thread_kern_sched_sig(); } } else if (!sigismember(&pthread->sigmask, sig)) { /* Protect the scheduling queues: */ _thread_kern_sig_defer(); /* * Perform any state changes due to signal * arrival: */ thread_sig_add(pthread, sig, /* has args */ 0); /* Unprotect the scheduling queues: */ _thread_kern_sig_undefer(); } else { /* Increment the pending signal count. */ sigaddset(&pthread->sigpend,sig); } } } /* * User thread signal handler wrapper. * * thread - current running thread */ void _thread_sig_wrapper(void) { void (*sigfunc)(int, siginfo_t *, void *); struct pthread_signal_frame *psf; - pthread_t thread; - int dead = 0; - int i, sig, has_args; - int frame, dst_frame; + pthread_t thread; thread = _thread_run; /* Get the current frame and state: */ - frame = thread->sigframe_count; - PTHREAD_ASSERT(frame > 0, "Invalid signal frame in signal handler"); psf = thread->curframe; + thread->curframe = NULL; + PTHREAD_ASSERT(psf != NULL, "Invalid signal frame in signal handler"); - /* Check the threads previous state: */ + /* Check the threads previous state: */ if (psf->saved_state.psd_state != PS_RUNNING) { /* * Do a little cleanup handling for those threads in * queues before calling the signal handler. Signals * for these threads are temporarily blocked until * after cleanup handling. */ switch (psf->saved_state.psd_state) { case PS_FDLR_WAIT: case PS_FDLW_WAIT: _fd_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; - /* Reenable signals: */ - thread->sigmask = psf->saved_state.psd_sigmask; break; case PS_FILE_WAIT: _flockfile_backout(thread); psf->saved_state.psd_state = PS_RUNNING; - /* Reenable signals: */ - thread->sigmask = psf->saved_state.psd_sigmask; break; + case PS_COND_WAIT: + _cond_wait_backout(thread); + psf->saved_state.psd_state = PS_RUNNING; + break; + + case PS_JOIN: + _join_backout(thread); + psf->saved_state.psd_state = PS_RUNNING; + break; + + case PS_MUTEX_WAIT: + _mutex_lock_backout(thread); + psf->saved_state.psd_state = PS_RUNNING; + break; + default: break; } } + /* Unblock the signal in case we don't return from the handler: */ + _thread_sigq[psf->signo - 1].blocked = 0; + /* - * Unless the thread exits or longjmps out of the signal handler, - * return to the previous frame: + * Lower the priority before calling the handler in case + * it never returns (longjmps back): */ - dst_frame = frame - 1; + thread->active_priority &= ~PTHREAD_SIGNAL_PRIORITY; /* + * Reenable interruptions without checking for the need to + * context switch: + */ + thread->sig_defer_count = 0; + + /* * Check that a custom handler is installed and if the signal * is not blocked: */ sigfunc = _thread_sigact[psf->signo - 1].sa_sigaction; if (((__sighandler_t *)sigfunc != SIG_DFL) && ((__sighandler_t *)sigfunc != SIG_IGN)) { + DBG_MSG("_thread_sig_wrapper: Calling signal handler for " + "thread 0x%p\n", thread); /* - * The signal jump buffer is allocated off the stack. - * If the signal handler tries to [_][sig]longjmp() or - * setcontext(), our wrapped versions of these routines - * will copy the user supplied jump buffer or context - * to the destination signal frame, set the destination - * signal frame in psf->dst_frame, and _longjmp() back - * to here. + * Dispatch the signal via the custom signal + * handler: */ - jmp_buf jb; - - /* - * Set up the context for abnormal returns out of signal - * handlers. - */ - psf->sig_jb = &jb; - if (_setjmp(jb) == 0) { - DBG_MSG("_thread_sig_wrapper: Entering frame %d, " - "stack 0x%lx\n", frame, GET_STACK_JB(jb)); - /* - * Invalidate the destination frame before calling - * the signal handler. - */ - psf->dst_frame = -1; - - /* - * Dispatch the signal via the custom signal - * handler: - */ - if (psf->sig_has_args == 0) - (*(sigfunc))(psf->signo, NULL, NULL); - else if ((_thread_sigact[psf->signo - 1].sa_flags & - SA_SIGINFO) != 0) - (*(sigfunc))(psf->signo, - &_thread_sigq[psf->signo - 1].siginfo, - &_thread_sigq[psf->signo - 1].uc); - else - (*(sigfunc))(psf->signo, - (siginfo_t *)_thread_sigq[psf->signo - 1].siginfo.si_code, - &_thread_sigq[psf->signo - 1].uc); - } - else { - /* - * The return from _setjmp() should only be non-zero - * when the signal handler wants to xxxlongjmp() or - * setcontext() to a different context, or if the - * thread has exited (via pthread_exit). - */ - /* - * Grab a copy of the destination frame before it - * gets clobbered after unwinding. - */ - dst_frame = psf->dst_frame; - DBG_MSG("Abnormal exit from handler for signal %d, " - "frame %d\n", psf->signo, frame); - - /* Has the thread exited? */ - if ((dead = thread->flags & PTHREAD_EXITING) != 0) - /* When exiting, unwind to frame 0. */ - dst_frame = 0; - else if ((dst_frame < 0) || (dst_frame > frame)) - PANIC("Attempt to unwind to invalid " - "signal frame"); - - /* Unwind to the target frame: */ - for (i = frame; i > dst_frame; i--) { - DBG_MSG("Leaving frame %d, signal %d\n", i, - thread->sigframes[i]->signo); - /* Leave the current signal frame: */ - thread_sigframe_leave(thread, i); - - /* - * Save whatever is needed out of the state - * data; as soon as the frame count is - * is decremented, another signal can arrive - * and corrupt this view of the state data. - */ - sig = thread->sigframes[i]->signo; - has_args = thread->sigframes[i]->sig_has_args; - - /* - * We're done with this signal frame: - */ - thread->curframe = thread->sigframes[i - 1]; - thread->sigframe_count = i - 1; - - /* - * Only unblock the signal if it was a - * process signal as opposed to a signal - * generated by pthread_kill(). - */ - if (has_args != 0) - _thread_sigq[sig - 1].blocked = 0; - } - } + if (psf->sig_has_args == 0) + (*(sigfunc))(psf->signo, NULL, NULL); + else if ((_thread_sigact[psf->signo - 1].sa_flags & + SA_SIGINFO) != 0) + (*(sigfunc))(psf->signo, &psf->siginfo, &psf->uc); + else + (*(sigfunc))(psf->signo, + (siginfo_t *)psf->siginfo.si_code, &psf->uc); } - /* - * Call the kernel scheduler to schedule the next - * thread. + * Call the kernel scheduler to safely restore the frame and + * schedule the next thread: */ - if (dead == 0) { - /* Restore the threads state: */ - thread_sigframe_restore(thread, thread->sigframes[dst_frame]); - _thread_kern_sched_frame(dst_frame); - } - else { - PTHREAD_ASSERT(dst_frame == 0, - "Invalid signal frame for dead thread"); - - /* Perform any necessary cleanup before exiting. */ - thread_sigframe_leave(thread, 0); - - /* This should never return: */ - _thread_exit_finish(); - PANIC("Return from _thread_exit_finish in signal wrapper"); - } + _thread_kern_sched_frame(psf); } static void -thread_sigframe_add(pthread_t thread, int sig) +thread_sigframe_add(pthread_t thread, int sig, int has_args) { + struct pthread_signal_frame *psf = NULL; unsigned long stackp = 0; /* Get the top of the threads stack: */ - switch (thread->curframe->ctxtype) { + switch (thread->ctxtype) { case CTX_JB: case CTX_JB_NOSIG: - stackp = GET_STACK_JB(thread->curframe->ctx.jb); + stackp = GET_STACK_JB(thread->ctx.jb); break; case CTX_SJB: - stackp = GET_STACK_SJB(thread->curframe->ctx.sigjb); + stackp = GET_STACK_SJB(thread->ctx.sigjb); break; case CTX_UC: - stackp = GET_STACK_UC(&thread->curframe->ctx.uc); + stackp = GET_STACK_UC(&thread->ctx.uc); break; default: PANIC("Invalid thread context type"); break; } /* * Leave a little space on the stack and round down to the * nearest aligned word: */ stackp -= sizeof(double); stackp &= ~0x3UL; /* Allocate room on top of the stack for a new signal frame: */ stackp -= sizeof(struct pthread_signal_frame); - /* Set up the new frame: */ - thread->sigframe_count++; - thread->sigframes[thread->sigframe_count] = - (struct pthread_signal_frame *) stackp; - thread->curframe = thread->sigframes[thread->sigframe_count]; - thread->curframe->stackp = stackp; - thread->curframe->ctxtype = CTX_JB_NOSIG; - thread->curframe->longjmp_val = 1; - thread->curframe->signo = sig; + psf = (struct pthread_signal_frame *) stackp; - /* - * Set up the context: - */ - _setjmp(thread->curframe->ctx.jb); - SET_STACK_JB(thread->curframe->ctx.jb, stackp); - SET_RETURN_ADDR_JB(thread->curframe->ctx.jb, _thread_sig_wrapper); -} + /* Save the current context in the signal frame: */ + thread_sigframe_save(thread, psf); -/* - * Locate the signal frame from the specified stack pointer. - */ -int -_thread_sigframe_find(pthread_t pthread, void *stackp) -{ - int frame; + /* Set handler specific information: */ + psf->sig_has_args = has_args; + psf->signo = sig; + if (has_args) { + /* Copy the signal handler arguments to the signal frame: */ + memcpy(&psf->uc, &_thread_sigq[psf->signo - 1].uc, + sizeof(psf->uc)); + memcpy(&psf->siginfo, &_thread_sigq[psf->signo - 1].siginfo, + sizeof(psf->siginfo)); + } + /* Set up the new frame: */ + thread->curframe = psf; + thread->ctxtype = CTX_JB_NOSIG; + thread->longjmp_val = 1; + thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE | + PTHREAD_FLAGS_IN_SYNCQ; /* - * Find the destination of the target frame based on the - * given stack pointer. + * Set up the context: */ - for (frame = pthread->sigframe_count; frame >= 0; frame--) { - if (stackp < (void *)pthread->sigframes[frame]->stackp) - break; - } - return (frame); + stackp += sizeof(double); + _setjmp(thread->ctx.jb); + SET_STACK_JB(thread->ctx.jb, stackp); + SET_RETURN_ADDR_JB(thread->ctx.jb, _thread_sig_wrapper); } - + void -thread_sigframe_leave(pthread_t thread, int frame) +_thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf) { - struct pthread_state_data *psd; - - psd = &thread->sigframes[frame]->saved_state; - + thread->ctxtype = psf->ctxtype; + memcpy(&thread->ctx.uc, &psf->ctx.uc, sizeof(thread->ctx.uc)); /* - * Perform any necessary cleanup for this signal frame: + * Only restore the signal mask if it hasn't been changed + * by the application during invocation of the signal handler: */ - switch (psd->psd_state) { - case PS_DEAD: - case PS_DEADLOCK: - case PS_RUNNING: - case PS_SIGTHREAD: - case PS_STATE_MAX: - case PS_SUSPENDED: - break; - - /* - * Threads in the following states need to be removed - * from queues. - */ - case PS_COND_WAIT: - _cond_wait_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_FDLR_WAIT: - case PS_FDLW_WAIT: - _fd_lock_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_FILE_WAIT: - _flockfile_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_JOIN: - _join_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_MUTEX_WAIT: - _mutex_lock_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_FDR_WAIT: - case PS_FDW_WAIT: - case PS_POLL_WAIT: - case PS_SELECT_WAIT: - case PS_SIGSUSPEND: - case PS_SIGWAIT: - case PS_SLEEP_WAIT: - case PS_SPINBLOCK: - case PS_WAIT_WAIT: - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) { - PTHREAD_WAITQ_REMOVE(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WORKQ) != 0) - PTHREAD_WORKQ_REMOVE(thread); - } - break; - } -} - -static void -thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf) -{ - thread->interrupted = psf->saved_state.psd_interrupted; - thread->sigmask = psf->saved_state.psd_sigmask; - thread->state = psf->saved_state.psd_state; - thread->flags = psf->saved_state.psd_flags; + if (thread->sigmask_seqno == psf->saved_state.psd_sigmask_seqno) + thread->sigmask = psf->saved_state.psd_sigmask; + thread->curframe = psf->saved_state.psd_curframe; thread->wakeup_time = psf->saved_state.psd_wakeup_time; thread->data = psf->saved_state.psd_wait_data; + thread->state = psf->saved_state.psd_state; + thread->flags = psf->saved_state.psd_flags; + thread->interrupted = psf->saved_state.psd_interrupted; + thread->longjmp_val = psf->saved_state.psd_longjmp_val; + thread->signo = psf->saved_state.psd_signo; + thread->sig_defer_count = psf->saved_state.psd_sig_defer_count; } static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf) { - psf->saved_state.psd_interrupted = thread->interrupted; + psf->ctxtype = thread->ctxtype; + memcpy(&psf->ctx.uc, &thread->ctx.uc, sizeof(thread->ctx.uc)); psf->saved_state.psd_sigmask = thread->sigmask; - psf->saved_state.psd_state = thread->state; - psf->saved_state.psd_flags = thread->flags; - thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE | - PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ | - PTHREAD_FLAGS_IN_JOINQ; + psf->saved_state.psd_curframe = thread->curframe; psf->saved_state.psd_wakeup_time = thread->wakeup_time; psf->saved_state.psd_wait_data = thread->data; + psf->saved_state.psd_state = thread->state; + psf->saved_state.psd_flags = thread->flags & + (PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE); + psf->saved_state.psd_interrupted = thread->interrupted; + psf->saved_state.psd_longjmp_val = thread->longjmp_val; + psf->saved_state.psd_sigmask_seqno = thread->sigmask_seqno; + psf->saved_state.psd_signo = thread->signo; + psf->saved_state.psd_sig_defer_count = thread->sig_defer_count; } #endif Index: head/lib/libc_r/uthread/uthread_sigaction.c =================================================================== --- head/lib/libc_r/uthread/uthread_sigaction.c (revision 68515) +++ head/lib/libc_r/uthread/uthread_sigaction.c (revision 68516) @@ -1,111 +1,111 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" int _sigaction(int sig, const struct sigaction * act, struct sigaction * oact) { int ret = 0; struct sigaction gact; /* Check if the signal number is out of range: */ if (sig < 1 || sig > NSIG) { /* Return an invalid argument: */ errno = EINVAL; ret = -1; } else { /* * Check if the existing signal action structure contents are * to be returned: */ if (oact != NULL) { /* Return the existing signal action contents: */ oact->sa_handler = _thread_sigact[sig - 1].sa_handler; oact->sa_mask = _thread_sigact[sig - 1].sa_mask; oact->sa_flags = _thread_sigact[sig - 1].sa_flags; } /* Check if a signal action was supplied: */ if (act != NULL) { /* Set the new signal handler: */ _thread_sigact[sig - 1].sa_mask = act->sa_mask; _thread_sigact[sig - 1].sa_flags = act->sa_flags; _thread_sigact[sig - 1].sa_handler = act->sa_handler; } /* * Check if the kernel needs to be advised of a change * in signal action: */ if (act != NULL && sig != _SCHED_SIGNAL && sig != SIGCHLD && sig != SIGINFO) { /* * Ensure the signal handler cannot be interrupted * by other signals. Always request the POSIX signal * handler arguments. */ sigfillset(&gact.sa_mask); - gact.sa_flags = SA_SIGINFO; + gact.sa_flags = SA_SIGINFO | SA_ONSTACK; /* * Check if the signal handler is being set to * the default or ignore handlers: */ if (act->sa_handler == SIG_DFL || act->sa_handler == SIG_IGN) /* Specify the built in handler: */ gact.sa_handler = act->sa_handler; else /* * Specify the thread kernel signal * handler: */ gact.sa_handler = (void (*) ()) _thread_sig_handler; /* Change the signal action in the kernel: */ if (_thread_sys_sigaction(sig,&gact,NULL) != 0) ret = -1; } } /* Return the completion status: */ return (ret); } __strong_reference(_sigaction, sigaction); #endif Index: head/lib/libc_r/uthread/uthread_sigmask.c =================================================================== --- head/lib/libc_r/uthread/uthread_sigmask.c (revision 68515) +++ head/lib/libc_r/uthread/uthread_sigmask.c (revision 68516) @@ -1,102 +1,105 @@ /* * Copyright (c) 1997 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset) { sigset_t sigset; int ret = 0; /* Check if the existing signal process mask is to be returned: */ if (oset != NULL) { /* Return the current mask: */ *oset = _thread_run->sigmask; } /* Check if a new signal set was provided by the caller: */ if (set != NULL) { /* Process according to what to do: */ switch (how) { /* Block signals: */ case SIG_BLOCK: /* Add signals to the existing mask: */ SIGSETOR(_thread_run->sigmask, *set); break; /* Unblock signals: */ case SIG_UNBLOCK: /* Clear signals from the existing mask: */ SIGSETNAND(_thread_run->sigmask, *set); break; /* Set the signal process mask: */ case SIG_SETMASK: /* Set the new mask: */ _thread_run->sigmask = *set; break; /* Trap invalid actions: */ default: /* Return an invalid argument: */ errno = EINVAL; ret = -1; break; } + /* Increment the sequence number: */ + _thread_run->sigmask_seqno++; + /* * Check if there are pending signals for the running * thread or process that aren't blocked: */ sigset = _thread_run->sigpend; SIGSETOR(sigset, _process_sigpending); SIGSETNAND(sigset, _thread_run->sigmask); if (SIGNOTEMPTY(sigset)) /* * Call the kernel scheduler which will safely * install a signal frame for the running thread: */ _thread_kern_sched_sig(); } /* Return the completion status: */ return (ret); } #endif Index: head/lib/libkse/thread/thr_cond.c =================================================================== --- head/lib/libkse/thread/thr_cond.c (revision 68515) +++ head/lib/libkse/thread/thr_cond.c (revision 68516) @@ -1,696 +1,755 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" /* * Prototypes */ static inline pthread_t cond_queue_deq(pthread_cond_t); static inline void cond_queue_remove(pthread_cond_t, pthread_t); static inline void cond_queue_enq(pthread_cond_t, pthread_t); /* Reinitialize a condition variable to defaults. */ int -_cond_reinit(pthread_cond_t * cond) +_cond_reinit(pthread_cond_t *cond) { int ret = 0; if (cond == NULL) ret = EINVAL; else if (*cond == NULL) ret = pthread_cond_init(cond, NULL); else { /* * Initialize the condition variable structure: */ TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags = COND_FLAGS_INITED; (*cond)->c_type = COND_TYPE_FAST; (*cond)->c_mutex = NULL; + (*cond)->c_seqno = 0; memset(&(*cond)->lock, 0, sizeof((*cond)->lock)); } return (ret); } int -pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr) +pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { enum pthread_cond_type type; pthread_cond_t pcond; int rval = 0; if (cond == NULL) rval = EINVAL; else { /* * Check if a pointer to a condition variable attribute * structure was passed by the caller: */ if (cond_attr != NULL && *cond_attr != NULL) { /* Default to a fast condition variable: */ type = (*cond_attr)->c_type; } else { /* Default to a fast condition variable: */ type = COND_TYPE_FAST; } /* Process according to condition variable type: */ switch (type) { /* Fast condition variable: */ case COND_TYPE_FAST: /* Nothing to do here. */ break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Check for no errors: */ if (rval == 0) { if ((pcond = (pthread_cond_t) malloc(sizeof(struct pthread_cond))) == NULL) { rval = ENOMEM; } else { /* * Initialise the condition variable * structure: */ TAILQ_INIT(&pcond->c_queue); pcond->c_flags |= COND_FLAGS_INITED; pcond->c_type = type; pcond->c_mutex = NULL; + pcond->c_seqno = 0; memset(&pcond->lock,0,sizeof(pcond->lock)); *cond = pcond; } } } /* Return the completion status: */ return (rval); } int -pthread_cond_destroy(pthread_cond_t * cond) +pthread_cond_destroy(pthread_cond_t *cond) { int rval = 0; if (cond == NULL || *cond == NULL) rval = EINVAL; else { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * Free the memory allocated for the condition * variable structure: */ free(*cond); /* * NULL the caller's pointer now that the condition * variable has been destroyed: */ *cond = NULL; } /* Return the completion status: */ return (rval); } int -pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex) +pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { int rval = 0; + int done = 0; int interrupted = 0; + int unlock_mutex = 1; + int seqno; _thread_enter_cancellation_point(); if (cond == NULL) - rval = EINVAL; + return (EINVAL); /* * If the condition variable is statically initialized, * perform the dynamic initialization: */ - else if (*cond != NULL || - (rval = pthread_cond_init(cond, NULL)) == 0) { + if (*cond == NULL && + (rval = pthread_cond_init(cond, NULL)) != 0) + return (rval); + + /* + * Enter a loop waiting for a condition signal or broadcast + * to wake up this thread. A loop is needed in case the waiting + * thread is interrupted by a signal to execute a signal handler. + * It is not (currently) possible to remain in the waiting queue + * while running a handler. Instead, the thread is interrupted + * and backed out of the waiting queue prior to executing the + * signal handler. + */ + do { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * If the condvar was statically allocated, properly * initialize the tail queue. */ if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) { TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags |= COND_FLAGS_INITED; } /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return invalid argument error: */ rval = EINVAL; } else { /* Reset the timeout and interrupted flags: */ _thread_run->timeout = 0; _thread_run->interrupted = 0; /* * Queue the running thread for the condition * variable: */ cond_queue_enq(*cond, _thread_run); - /* Remember the mutex that is being used: */ + /* Remember the mutex and sequence number: */ (*cond)->c_mutex = *mutex; + seqno = (*cond)->c_seqno; /* Wait forever: */ _thread_run->wakeup_time.tv_sec = -1; /* Unlock the mutex: */ - if ((rval = _mutex_cv_unlock(mutex)) != 0) { + if ((unlock_mutex != 0) && + ((rval = _mutex_cv_unlock(mutex)) != 0)) { /* * Cannot unlock the mutex, so remove * the running thread from the condition * variable queue: */ cond_queue_remove(*cond, _thread_run); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* + * Don't unlock the mutex in the event + * this thread has to be requeued in + * condition variable queue: + */ + unlock_mutex = 0; + + /* * Schedule the next thread and unlock * the condition variable structure: */ _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); - if (_thread_run->interrupted != 0) { - /* - * Remember that this thread - * was interrupted: - */ - interrupted = 1; + done = (seqno != (*cond)->c_seqno); + if ((_thread_run->flags & + PTHREAD_FLAGS_IN_CONDQ) != 0) { /* * Lock the condition variable * while removing the thread. */ _SPINLOCK(&(*cond)->lock); cond_queue_remove(*cond, _thread_run); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; _SPINUNLOCK(&(*cond)->lock); } /* + * Save the interrupted flag; locking + * the mutex will destroy it. + */ + interrupted = _thread_run->interrupted; + + /* * Note that even though this thread may have * been canceled, POSIX requires that the mutex * be reaquired prior to cancellation. */ rval = _mutex_cv_lock(mutex); } } break; /* Trap invalid condition variable types: */ default: /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return an invalid argument error: */ rval = EINVAL; break; } - if (interrupted != 0) { - if (_thread_run->continuation != NULL) - _thread_run->continuation((void *) _thread_run); - } - } + if ((interrupted != 0) && (_thread_run->continuation != NULL)) + _thread_run->continuation((void *) _thread_run); + } while ((done == 0) && (rval == 0)); _thread_leave_cancellation_point(); /* Return the completion status: */ return (rval); } int pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, const struct timespec * abstime) { int rval = 0; + int done = 0; int interrupted = 0; + int unlock_mutex = 1; + int seqno; _thread_enter_cancellation_point(); if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) - rval = EINVAL; + return (EINVAL); /* * If the condition variable is statically initialized, perform dynamic * initialization. */ - else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) { + if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) + return (rval); + + /* + * Enter a loop waiting for a condition signal or broadcast + * to wake up this thread. A loop is needed in case the waiting + * thread is interrupted by a signal to execute a signal handler. + * It is not (currently) possible to remain in the waiting queue + * while running a handler. Instead, the thread is interrupted + * and backed out of the waiting queue prior to executing the + * signal handler. + */ + do { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * If the condvar was statically allocated, properly * initialize the tail queue. */ if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) { TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags |= COND_FLAGS_INITED; } /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { /* Return invalid argument error: */ rval = EINVAL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* Set the wakeup time: */ _thread_run->wakeup_time.tv_sec = abstime->tv_sec; _thread_run->wakeup_time.tv_nsec = abstime->tv_nsec; /* Reset the timeout and interrupted flags: */ _thread_run->timeout = 0; _thread_run->interrupted = 0; /* * Queue the running thread for the condition * variable: */ cond_queue_enq(*cond, _thread_run); - /* Remember the mutex that is being used: */ + /* Remember the mutex and sequence number: */ (*cond)->c_mutex = *mutex; + seqno = (*cond)->c_seqno; /* Unlock the mutex: */ - if ((rval = _mutex_cv_unlock(mutex)) != 0) { + if ((unlock_mutex != 0) && + ((rval = _mutex_cv_unlock(mutex)) != 0)) { /* * Cannot unlock the mutex, so remove * the running thread from the condition * variable queue: */ cond_queue_remove(*cond, _thread_run); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* + * Don't unlock the mutex in the event + * this thread has to be requeued in + * condition variable queue: + */ + unlock_mutex = 0; + + /* * Schedule the next thread and unlock * the condition variable structure: */ _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); + done = (seqno != (*cond)->c_seqno); + /* - * Check if the wait timedout or was - * interrupted (canceled): + * Check if the wait timedout, was + * interrupted (canceled), or needs to + * be resumed after handling a signal. */ if ((_thread_run->timeout == 0) && - (_thread_run->interrupted == 0)) { + (_thread_run->interrupted == 0) && + (done != 0)) { /* Lock the mutex: */ rval = _mutex_cv_lock(mutex); - } else { - /* - * Remember if this thread was - * interrupted: - */ - interrupted = _thread_run->interrupted; - - /* Lock the condition variable structure: */ + /* Lock the CV structure: */ _SPINLOCK(&(*cond)->lock); /* * The wait timed out; remove * the thread from the condition - * variable queue: + * variable queue: */ cond_queue_remove(*cond, _thread_run); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; - /* Unock the condition variable structure: */ + /* Unock the CV structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return a timeout error: */ - rval = ETIMEDOUT; + if (_thread_run->timeout != 0) + rval = ETIMEDOUT; + /* + * Save the interrupted flag; + * locking the mutex will + * destroy it. + */ + interrupted = _thread_run->interrupted; /* * Lock the mutex and ignore any * errors. Note that even though * this thread may have been * canceled, POSIX requires that * the mutex be reaquired prior * to cancellation. */ (void)_mutex_cv_lock(mutex); } } } break; /* Trap invalid condition variable types: */ default: /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return an invalid argument error: */ rval = EINVAL; break; } - if (interrupted != 0) { - if (_thread_run->continuation != NULL) - _thread_run->continuation((void *) _thread_run); - } - } + if ((interrupted != 0) && (_thread_run->continuation != NULL)) + _thread_run->continuation((void *) _thread_run); + } while ((done == 0) && (rval == 0)); _thread_leave_cancellation_point(); /* Return the completion status: */ return (rval); } int pthread_cond_signal(pthread_cond_t * cond) { int rval = 0; pthread_t pthread; if (cond == NULL) rval = EINVAL; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL) == 0)) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: + /* Increment the sequence number: */ + (*cond)->c_seqno++; + if ((pthread = cond_queue_deq(*cond)) != NULL) { /* * Unless the thread is currently suspended, * allow it to run. If the thread is suspended, * make a note that the thread isn't in a wait * queue any more. */ if (pthread->state != PS_SUSPENDED) PTHREAD_NEW_STATE(pthread,PS_RUNNING); else pthread->suspended = SUSP_NOWAIT; } /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (rval); } int pthread_cond_broadcast(pthread_cond_t * cond) { int rval = 0; pthread_t pthread; if (cond == NULL) rval = EINVAL; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL) == 0)) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: + /* Increment the sequence number: */ + (*cond)->c_seqno++; + /* * Enter a loop to bring all threads off the * condition queue: */ while ((pthread = cond_queue_deq(*cond)) != NULL) { /* * Unless the thread is currently suspended, * allow it to run. If the thread is suspended, * make a note that the thread isn't in a wait * queue any more. */ if (pthread->state != PS_SUSPENDED) PTHREAD_NEW_STATE(pthread,PS_RUNNING); else pthread->suspended = SUSP_NOWAIT; } /* There are no more waiting threads: */ (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (rval); } void _cond_wait_backout(pthread_t pthread) { pthread_cond_t cond; cond = pthread->data.cond; if (cond != NULL) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&cond->lock); /* Process according to condition variable type: */ switch (cond->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: cond_queue_remove(cond, pthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&cond->c_queue) == NULL) cond->c_mutex = NULL; break; default: break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&cond->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } } /* * Dequeue a waiting thread from the head of a condition queue in * descending priority order. */ static inline pthread_t cond_queue_deq(pthread_cond_t cond) { pthread_t pthread; while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) { TAILQ_REMOVE(&cond->c_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ; if ((pthread->timeout == 0) && (pthread->interrupted == 0)) /* * Only exit the loop when we find a thread * that hasn't timed out or been canceled; * those threads are already running and don't * need their run state changed. */ break; } return(pthread); } /* * Remove a waiting thread from a condition queue in descending priority * order. */ static inline void cond_queue_remove(pthread_cond_t cond, pthread_t pthread) { /* * Because pthread_cond_timedwait() can timeout as well * as be signaled by another thread, it is necessary to * guard against removing the thread from the queue if * it isn't in the queue. */ if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) { TAILQ_REMOVE(&cond->c_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ; } } /* * Enqueue a waiting thread to a condition queue in descending priority * order. */ static inline void cond_queue_enq(pthread_cond_t cond, pthread_t pthread) { pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head); PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); /* * For the common case of all threads having equal priority, * we perform a quick check against the priority of the thread * at the tail of the queue. */ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe); else { tid = TAILQ_FIRST(&cond->c_queue); while (pthread->active_priority <= tid->active_priority) tid = TAILQ_NEXT(tid, sqe); TAILQ_INSERT_BEFORE(tid, pthread, sqe); } pthread->flags |= PTHREAD_FLAGS_IN_CONDQ; pthread->data.cond = cond; } #endif Index: head/lib/libkse/thread/thr_create.c =================================================================== --- head/lib/libkse/thread/thr_create.c (revision 68515) +++ head/lib/libkse/thread/thr_create.c (revision 68516) @@ -1,324 +1,327 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include #include "pthread_private.h" #include "libc_private.h" static u_int64_t next_uniqueid = 1; #define OFF(f) offsetof(struct pthread, f) -#define SIGFRAME_OFF(f) offsetof(struct pthread_signal_frame, f) int _thread_next_offset = OFF(tle.tqe_next); int _thread_uniqueid_offset = OFF(uniqueid); int _thread_state_offset = OFF(state); int _thread_name_offset = OFF(name); -int _thread_curframe_offset = OFF(curframe); -int _thread_sigframe_ctx_offset = SIGFRAME_OFF(ctx); -int _thread_sigframe_ctxtype_offset = SIGFRAME_OFF(ctxtype); +int _thread_ctxtype_offset = OFF(ctxtype); +int _thread_ctx_offset = OFF(ctx); #undef OFF -#undef SIGFRAME_OFF int _thread_PS_RUNNING_value = PS_RUNNING; int _thread_PS_DEAD_value = PS_DEAD; int _thread_CTX_JB_NOSIG_value = CTX_JB_NOSIG; int _thread_CTX_JB_value = CTX_JB; int _thread_CTX_SJB_value = CTX_SJB; int _thread_CTX_UC_value = CTX_UC; -int _thread_sigframe_size_value = sizeof(struct pthread_signal_frame); int pthread_create(pthread_t * thread, const pthread_attr_t * attr, void *(*start_routine) (void *), void *arg) { + struct itimerval itimer; int f_gc = 0; int ret = 0; pthread_t gc_thread; pthread_t new_thread; pthread_attr_t pattr; void *stack; /* * Locking functions in libc are required when there are * threads other than the initial thread. */ __isthreaded = 1; /* Allocate memory for the thread structure: */ if ((new_thread = (pthread_t) malloc(sizeof(struct pthread))) == NULL) { /* Insufficient memory to create a thread: */ ret = EAGAIN; } else { /* Check if default thread attributes are required: */ if (attr == NULL || *attr == NULL) { /* Use the default thread attributes: */ pattr = &pthread_attr_default; } else { pattr = *attr; } /* Check if a stack was specified in the thread attributes: */ if ((stack = pattr->stackaddr_attr) != NULL) { } /* Allocate memory for a default-size stack: */ else if (pattr->stacksize_attr == PTHREAD_STACK_DEFAULT) { struct stack *spare_stack; /* Allocate or re-use a default-size stack. */ /* * Use the garbage collector mutex for synchronization * of the spare stack list. */ if (pthread_mutex_lock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); if ((spare_stack = SLIST_FIRST(&_stackq)) != NULL) { /* Use the spare stack. */ SLIST_REMOVE_HEAD(&_stackq, qe); /* Unlock the garbage collector mutex. */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot unlock gc mutex"); stack = sizeof(struct stack) + (void *) spare_stack - PTHREAD_STACK_DEFAULT; } else { /* Allocate a new stack. */ stack = _next_stack + PTHREAD_STACK_GUARD; - + /* * Even if stack allocation fails, we don't want * to try to use this location again, so * unconditionally decrement _next_stack. Under * normal operating conditions, the most likely * reason for an mmap() error is a stack * overflow of the adjacent thread stack. */ _next_stack -= (PTHREAD_STACK_DEFAULT + PTHREAD_STACK_GUARD); /* Unlock the garbage collector mutex. */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot unlock gc mutex"); /* Stack: */ if (mmap(stack, PTHREAD_STACK_DEFAULT, PROT_READ | PROT_WRITE, MAP_STACK, -1, 0) == MAP_FAILED) { ret = EAGAIN; free(new_thread); } } } /* * The user wants a stack of a particular size. Lets hope they * really know what they want, and simply malloc the stack. */ else if ((stack = (void *) malloc(pattr->stacksize_attr)) == NULL) { /* Insufficient memory to create a thread: */ ret = EAGAIN; free(new_thread); } /* Check for errors: */ if (ret != 0) { } else { /* Initialise the thread structure: */ memset(new_thread, 0, sizeof(struct pthread)); new_thread->slice_usec = -1; new_thread->stack = stack; new_thread->start_routine = start_routine; new_thread->arg = arg; new_thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; /* * Write a magic value to the thread structure * to help identify valid ones: */ new_thread->magic = PTHREAD_MAGIC; /* Initialise the thread for signals: */ new_thread->sigmask = _thread_run->sigmask; + new_thread->sigmask_seqno = 0; - /* Initialize the first signal frame: */ - new_thread->sigframes[0] = &new_thread->sigframe0; - new_thread->curframe = &new_thread->sigframe0; + /* Initialize the signal frame: */ + new_thread->curframe = NULL; /* Initialise the jump buffer: */ - _setjmp(new_thread->curframe->ctx.jb); + _setjmp(new_thread->ctx.jb); /* * Set up new stack frame so that it looks like it * returned from a longjmp() to the beginning of * _thread_start(). */ - SET_RETURN_ADDR_JB(new_thread->curframe->ctx.jb, - _thread_start); + SET_RETURN_ADDR_JB(new_thread->ctx.jb, _thread_start); /* The stack starts high and builds down: */ - SET_STACK_JB(new_thread->curframe->ctx.jb, + SET_STACK_JB(new_thread->ctx.jb, (long)new_thread->stack + pattr->stacksize_attr - sizeof(double)); /* Initialize the rest of the frame: */ - new_thread->curframe->ctxtype = CTX_JB_NOSIG; - /* Set the base of the stack: */ - new_thread->curframe->stackp = - GET_STACK_JB(new_thread->curframe->ctx.jb); - new_thread->sigframe_count = 0; + new_thread->ctxtype = CTX_JB_NOSIG; /* Copy the thread attributes: */ memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr)); /* * Check if this thread is to inherit the scheduling - * attributes from its parent: + * attributes from its parent: */ if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) { /* Copy the scheduling attributes: */ new_thread->base_priority = _thread_run->base_priority & ~PTHREAD_SIGNAL_PRIORITY; new_thread->attr.prio = _thread_run->base_priority & ~PTHREAD_SIGNAL_PRIORITY; new_thread->attr.sched_policy = _thread_run->attr.sched_policy; } else { /* * Use just the thread priority, leaving the * other scheduling attributes as their - * default values: + * default values: */ new_thread->base_priority = new_thread->attr.prio; } new_thread->active_priority = new_thread->base_priority; new_thread->inherited_priority = 0; /* Initialise the join queue for the new thread: */ TAILQ_INIT(&(new_thread->join_queue)); /* Initialize the mutex queue: */ TAILQ_INIT(&new_thread->mutexq); /* Initialise hooks in the thread structure: */ new_thread->specific_data = NULL; new_thread->cleanup = NULL; new_thread->flags = 0; new_thread->poll_data.nfds = 0; new_thread->poll_data.fds = NULL; new_thread->continuation = NULL; /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* * Initialise the unique id which GDB uses to * track threads. */ new_thread->uniqueid = next_uniqueid++; /* * Check if the garbage collector thread * needs to be started. */ f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial); /* Add the thread to the linked list of all threads: */ TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle); if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) new_thread->state = PS_SUSPENDED; else { new_thread->state = PS_RUNNING; PTHREAD_PRIOQ_INSERT_TAIL(new_thread); } /* * Undefer and handle pending signals, yielding * if necessary. */ _thread_kern_sig_undefer(); /* Return a pointer to the thread structure: */ (*thread) = new_thread; + if (f_gc != 0) { + /* Install the scheduling timer: */ + itimer.it_interval.tv_sec = 0; + itimer.it_interval.tv_usec = _clock_res_usec; + itimer.it_value = itimer.it_interval; + if (setitimer(_ITIMER_SCHED_TIMER, &itimer, + NULL) != 0) + PANIC("Cannot set interval timer"); + } + /* Schedule the new user thread: */ _thread_kern_sched(NULL); + /* * Start a garbage collector thread * if necessary. */ if (f_gc && pthread_create(&gc_thread,NULL, _thread_gc,NULL) != 0) PANIC("Can't create gc thread"); } } /* Return the status: */ return (ret); } void _thread_start(void) { /* We just left the scheduler via longjmp: */ _thread_kern_in_sched = 0; /* Run the current thread's start routine with argument: */ pthread_exit(_thread_run->start_routine(_thread_run->arg)); /* This point should never be reached. */ PANIC("Thread has resumed after exit"); } #endif Index: head/lib/libkse/thread/thr_detach.c =================================================================== --- head/lib/libkse/thread/thr_detach.c (revision 68515) +++ head/lib/libkse/thread/thr_detach.c (revision 68516) @@ -1,83 +1,88 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" int pthread_detach(pthread_t pthread) { int rval = 0; pthread_t next_thread; /* Check for invalid calling parameters: */ if (pthread == NULL || pthread->magic != PTHREAD_MAGIC) /* Return an invalid argument error: */ rval = EINVAL; /* Check if the thread has not been detached: */ else if ((pthread->attr.flags & PTHREAD_DETACHED) == 0) { /* Flag the thread as detached: */ pthread->attr.flags |= PTHREAD_DETACHED; /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Enter a loop to bring all threads off the join queue: */ while ((next_thread = TAILQ_FIRST(&pthread->join_queue)) != NULL) { /* Remove the thread from the queue: */ TAILQ_REMOVE(&pthread->join_queue, next_thread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ; /* Make the thread runnable: */ - PTHREAD_NEW_STATE(next_thread,PS_RUNNING); + PTHREAD_NEW_STATE(next_thread, PS_RUNNING); + + /* + * Set the return value for the woken thread: + */ + next_thread->error = ESRCH; } /* * Undefer and handle pending signals, yielding if a * scheduling signal occurred while in the critical region. */ _thread_kern_sig_undefer(); } else /* Return an error: */ rval = EINVAL; /* Return the completion status: */ return (rval); } #endif Index: head/lib/libkse/thread/thr_exit.c =================================================================== --- head/lib/libkse/thread/thr_exit.c (revision 68515) +++ head/lib/libkse/thread/thr_exit.c (revision 68516) @@ -1,244 +1,237 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" #define FLAGS_IN_SCHEDQ \ (PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ) void __exit(int status) { int flags; int i; struct itimerval itimer; /* Disable the interval timer: */ itimer.it_interval.tv_sec = 0; itimer.it_interval.tv_usec = 0; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = 0; setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL); /* Close the pthread kernel pipe: */ _thread_sys_close(_thread_kern_pipe[0]); _thread_sys_close(_thread_kern_pipe[1]); /* * Enter a loop to set all file descriptors to blocking * if they were not created as non-blocking: */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file descriptor is in use: */ if (_thread_fd_table[i] != NULL && !(_thread_fd_table[i]->flags & O_NONBLOCK)) { /* Get the current flags: */ flags = _thread_sys_fcntl(i, F_GETFL, NULL); /* Clear the nonblocking file descriptor flag: */ _thread_sys_fcntl(i, F_SETFL, flags & ~O_NONBLOCK); } } /* Call the _exit syscall: */ _thread_sys__exit(status); } __strong_reference(__exit, _exit); void _thread_exit(char *fname, int lineno, char *string) { char s[256]; /* Prepare an error message string: */ strcpy(s, "Fatal error '"); strcat(s, string); strcat(s, "' at line ? "); strcat(s, "in file "); strcat(s, fname); strcat(s, " (errno = ?"); strcat(s, ")\n"); /* Write the string to the standard error file descriptor: */ _thread_sys_write(2, s, strlen(s)); /* Force this process to exit: */ /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */ #if defined(_PTHREADS_INVARIANTS) abort(); #else _exit(1); #endif } /* * Only called when a thread is cancelled. It may be more useful * to call it from pthread_exit() if other ways of asynchronous or * abnormal thread termination can be found. */ void _thread_exit_cleanup(void) { /* * POSIX states that cancellation/termination of a thread should * not release any visible resources (such as mutexes) and that * it is the applications responsibility. Resources that are * internal to the threads library, including file and fd locks, * are not visible to the application and need to be released. */ /* Unlock all owned fd locks: */ _thread_fd_unlock_owned(_thread_run); /* Unlock all owned file locks: */ _funlock_owned(_thread_run); /* Unlock all private mutexes: */ _mutex_unlock_private(_thread_run); /* * This still isn't quite correct because we don't account * for held spinlocks (see libc/stdlib/malloc.c). */ } void pthread_exit(void *status) { - int frame; + pthread_t pthread; /* Check if this thread is already in the process of exiting: */ if ((_thread_run->flags & PTHREAD_EXITING) != 0) { char msg[128]; snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",_thread_run); PANIC(msg); } /* Flag this thread as exiting: */ _thread_run->flags |= PTHREAD_EXITING; /* Save the return value: */ _thread_run->ret = status; while (_thread_run->cleanup != NULL) { pthread_cleanup_pop(1); } - if (_thread_run->attr.cleanup_attr != NULL) { _thread_run->attr.cleanup_attr(_thread_run->attr.arg_attr); } /* Check if there is thread specific data: */ if (_thread_run->specific_data != NULL) { /* Run the thread-specific data destructors: */ _thread_cleanupspecific(); } /* Free thread-specific poll_data structure, if allocated: */ if (_thread_run->poll_data.fds != NULL) { free(_thread_run->poll_data.fds); _thread_run->poll_data.fds = NULL; } - if ((frame = _thread_run->sigframe_count) == 0) - _thread_exit_finish(); - else { - /* - * Jump back and unwind the signal frames to gracefully - * cleanup. - */ - ___longjmp(*_thread_run->sigframes[frame]->sig_jb, 1); - } - - /* This point should not be reached. */ - PANIC("Dead thread has resumed"); -} - -void -_thread_exit_finish(void) -{ - pthread_t pthread; - /* * Lock the garbage collector mutex to ensure that the garbage * collector is not using the dead thread list. */ if (pthread_mutex_lock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Add this thread to the list of dead threads. */ TAILQ_INSERT_HEAD(&_dead_list, _thread_run, dle); /* * Signal the garbage collector thread that there is something * to clean up. */ if (pthread_cond_signal(&_gc_cond) != 0) PANIC("Cannot signal gc cond"); /* * Avoid a race condition where a scheduling signal can occur * causing the garbage collector thread to run. If this happens, * the current thread can be cleaned out from under us. */ _thread_kern_sig_defer(); /* Unlock the garbage collector mutex: */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Check if there are any threads joined to this one: */ while ((pthread = TAILQ_FIRST(&(_thread_run->join_queue))) != NULL) { /* Remove the thread from the queue: */ TAILQ_REMOVE(&_thread_run->join_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ; /* * Wake the joined thread and let it * detach this thread: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); + + /* + * Set the return value for the woken thread: + */ + if ((_thread_run->attr.flags & PTHREAD_DETACHED) != 0) + pthread->error = ESRCH; + else { + pthread->ret = _thread_run->ret; + pthread->error = 0; + } } /* Remove this thread from the thread list: */ TAILQ_REMOVE(&_thread_list, _thread_run, tle); /* This thread will never be re-scheduled. */ _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__); + + /* This point should not be reached. */ + PANIC("Dead thread has resumed"); } #endif Index: head/lib/libkse/thread/thr_info.c =================================================================== --- head/lib/libkse/thread/thr_info.c (revision 68515) +++ head/lib/libkse/thread/thr_info.c (revision 68516) @@ -1,315 +1,290 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #ifdef _THREAD_SAFE #include #include #include "pthread_private.h" +#ifndef NELEMENTS +#define NELEMENTS(arr) (sizeof(arr) / sizeof(arr[0])) +#endif + +static void dump_thread(int fd, pthread_t pthread, int long_version); + + struct s_thread_info { enum pthread_state state; char *name; }; /* Static variables: */ static const struct s_thread_info thread_info[] = { {PS_RUNNING , "Running"}, {PS_SIGTHREAD , "Waiting on signal thread"}, {PS_MUTEX_WAIT , "Waiting on a mutex"}, {PS_COND_WAIT , "Waiting on a condition variable"}, {PS_FDLR_WAIT , "Waiting for a file read lock"}, {PS_FDLW_WAIT , "Waiting for a file write lock"}, {PS_FDR_WAIT , "Waiting for read"}, {PS_FDW_WAIT , "Waiting for write"}, {PS_FILE_WAIT , "Waiting for FILE lock"}, {PS_POLL_WAIT , "Waiting on poll"}, {PS_SELECT_WAIT , "Waiting on select"}, {PS_SLEEP_WAIT , "Sleeping"}, {PS_WAIT_WAIT , "Waiting process"}, {PS_SIGSUSPEND , "Suspended, waiting for a signal"}, {PS_SIGWAIT , "Waiting for a signal"}, {PS_SPINBLOCK , "Waiting for a spinlock"}, {PS_JOIN , "Waiting to join"}, {PS_SUSPENDED , "Suspended"}, {PS_DEAD , "Dead"}, {PS_DEADLOCK , "Deadlocked"}, {PS_STATE_MAX , "Not a real state!"} }; void _thread_dump_info(void) { char s[512]; int fd; int i; - int j; pthread_t pthread; char tmpfile[128]; pq_list_t *pq_list; - for (i = 0; i < 100000; i++) { + for (i = 0; i < 100000; i++) { snprintf(tmpfile, sizeof(tmpfile), "/tmp/uthread.dump.%u.%i", getpid(), i); /* Open the dump file for append and create it if necessary: */ if ((fd = _thread_sys_open(tmpfile, O_RDWR | O_CREAT | O_EXCL, 0666)) < 0) { /* Can't open the dump file. */ if (errno == EEXIST) continue; /* * We only need to continue in case of * EEXIT error. Most other error * codes means that we will fail all * the times. */ return; } else { break; } } if (i==100000) { /* all 100000 possibilities are in use :( */ return; } else { /* Output a header for active threads: */ strcpy(s, "\n\n=============\nACTIVE THREADS\n\n"); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report each thread in the global list: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { - /* Find the state: */ - for (j = 0; j < (sizeof(thread_info) / - sizeof(struct s_thread_info)) - 1; j++) - if (thread_info[j].state == pthread->state) - break; - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", - pthread, (pthread->name == NULL) ? - "":pthread->name, pthread->base_priority, - thread_info[j].name, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); - - /* Check if this is the running thread: */ - if (pthread == _thread_run) { - /* Output a record for the running thread: */ - strcpy(s, "This is the running thread\n"); - _thread_sys_write(fd, s, strlen(s)); - } - /* Check if this is the initial thread: */ - if (pthread == _thread_initial) { - /* Output a record for the initial thread: */ - strcpy(s, "This is the initial thread\n"); - _thread_sys_write(fd, s, strlen(s)); - } - /* Process according to thread state: */ - switch (pthread->state) { - /* File descriptor read lock wait: */ - case PS_FDLR_WAIT: - case PS_FDLW_WAIT: - case PS_FDR_WAIT: - case PS_FDW_WAIT: - /* Write the lock details: */ - snprintf(s, sizeof(s), "fd %d[%s:%d]", - pthread->data.fd.fd, - pthread->data.fd.fname, - pthread->data.fd.branch); - _thread_sys_write(fd, s, strlen(s)); - snprintf(s, sizeof(s), "owner %pr/%pw\n", - _thread_fd_table[pthread->data.fd.fd]->r_owner, - _thread_fd_table[pthread->data.fd.fd]->w_owner); - _thread_sys_write(fd, s, strlen(s)); - break; - case PS_SIGWAIT: - snprintf(s, sizeof(s), "sigmask (hi)"); - _thread_sys_write(fd, s, strlen(s)); - for (i = _SIG_WORDS - 1; i >= 0; i--) { - snprintf(s, sizeof(s), "%08x\n", - pthread->sigmask.__bits[i]); - _thread_sys_write(fd, s, strlen(s)); - } - snprintf(s, sizeof(s), "(lo)\n"); - _thread_sys_write(fd, s, strlen(s)); - break; - - /* - * Trap other states that are not explicitly - * coded to dump information: - */ - default: - /* Nothing to do here. */ - break; - } + dump_thread(fd, pthread, /*long_verson*/ 1); } /* Output a header for ready threads: */ strcpy(s, "\n\n=============\nREADY THREADS\n\n"); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report each thread in the ready queue: */ TAILQ_FOREACH (pq_list, &_readyq.pq_queue, pl_link) { TAILQ_FOREACH(pthread, &pq_list->pl_head, pqe) { - /* Find the state: */ - for (j = 0; j < (sizeof(thread_info) / - sizeof(struct s_thread_info)) - 1; j++) - if (thread_info[j].state == pthread->state) - break; - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", - pthread, (pthread->name == NULL) ? - "":pthread->name, pthread->base_priority, - thread_info[j].name, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); + dump_thread(fd, pthread, /*long_version*/ 0); } } /* Output a header for waiting threads: */ strcpy(s, "\n\n=============\nWAITING THREADS\n\n"); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report each thread in the waiting queue: */ TAILQ_FOREACH (pthread, &_waitingq, pqe) { - /* Find the state: */ - for (j = 0; j < (sizeof(thread_info) / - sizeof(struct s_thread_info)) - 1; j++) - if (thread_info[j].state == pthread->state) - break; - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", - pthread, (pthread->name == NULL) ? - "":pthread->name, pthread->base_priority, - thread_info[j].name, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); + dump_thread(fd, pthread, /*long_version*/ 0); } /* Output a header for threads in the work queue: */ strcpy(s, "\n\n=============\nTHREADS IN WORKQ\n\n"); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report each thread in the waiting queue: */ TAILQ_FOREACH (pthread, &_workq, qe) { - /* Find the state: */ - for (j = 0; j < (sizeof(thread_info) / - sizeof(struct s_thread_info)) - 1; j++) - if (thread_info[j].state == pthread->state) - break; - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", - pthread, (pthread->name == NULL) ? - "":pthread->name, pthread->base_priority, - thread_info[j].name, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); + dump_thread(fd, pthread, /*long_version*/ 0); } /* Check if there are no dead threads: */ if (TAILQ_FIRST(&_dead_list) == NULL) { /* Output a record: */ strcpy(s, "\n\nTHERE ARE NO DEAD THREADS\n"); _thread_sys_write(fd, s, strlen(s)); } else { /* Output a header for dead threads: */ strcpy(s, "\n\nDEAD THREADS\n\n"); _thread_sys_write(fd, s, strlen(s)); /* * Enter a loop to report each thread in the global - * dead thread list: + * dead thread list: */ TAILQ_FOREACH(pthread, &_dead_list, dle) { - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "Thread %p prio %3d [%s:%d]\n", - pthread, pthread->base_priority, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); + dump_thread(fd, pthread, /*long_version*/ 0); } } /* Output a header for file descriptors: */ - snprintf(s, sizeof(s), "\n\n=============\nFILE DESCRIPTOR TABLE (table size %d)\n\n",_thread_dtablesize); + snprintf(s, sizeof(s), "\n\n=============\nFILE DESCRIPTOR " + "TABLE (table size %d)\n\n", _thread_dtablesize); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report file descriptor lock usage: */ for (i = 0; i < _thread_dtablesize; i++) { /* * Check if memory is allocated for this file - * descriptor: + * descriptor: */ if (_thread_fd_table[i] != NULL) { /* Report the file descriptor lock status: */ snprintf(s, sizeof(s), - "fd[%3d] read owner %p count %d [%s:%d]\n write owner %p count %d [%s:%d]\n", - i, - _thread_fd_table[i]->r_owner, - _thread_fd_table[i]->r_lockcount, - _thread_fd_table[i]->r_fname, - _thread_fd_table[i]->r_lineno, - _thread_fd_table[i]->w_owner, - _thread_fd_table[i]->w_lockcount, - _thread_fd_table[i]->w_fname, - _thread_fd_table[i]->w_lineno); - _thread_sys_write(fd, s, strlen(s)); + "fd[%3d] read owner %p count %d [%s:%d]\n" + " write owner %p count %d [%s:%d]\n", + i, _thread_fd_table[i]->r_owner, + _thread_fd_table[i]->r_lockcount, + _thread_fd_table[i]->r_fname, + _thread_fd_table[i]->r_lineno, + _thread_fd_table[i]->w_owner, + _thread_fd_table[i]->w_lockcount, + _thread_fd_table[i]->w_fname, + _thread_fd_table[i]->w_lineno); + _thread_sys_write(fd, s, strlen(s)); } } /* Close the dump file: */ _thread_sys_close(fd); + } +} + +static void +dump_thread(int fd, pthread_t pthread, int long_version) +{ + char s[512]; + int i; + + /* Find the state: */ + for (i = 0; i < NELEMENTS(thread_info) - 1; i++) + if (thread_info[i].state == pthread->state) + break; + + /* Output a record for the thread: */ + snprintf(s, sizeof(s), + "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", + pthread, (pthread->name == NULL) ? "" : pthread->name, + pthread->active_priority, thread_info[i].name, pthread->fname, + pthread->lineno); + _thread_sys_write(fd, s, strlen(s)); + + if (long_version != 0) { + /* Check if this is the running thread: */ + if (pthread == _thread_run) { + /* Output a record for the running thread: */ + strcpy(s, "This is the running thread\n"); + _thread_sys_write(fd, s, strlen(s)); + } + /* Check if this is the initial thread: */ + if (pthread == _thread_initial) { + /* Output a record for the initial thread: */ + strcpy(s, "This is the initial thread\n"); + _thread_sys_write(fd, s, strlen(s)); + } + /* Process according to thread state: */ + switch (pthread->state) { + /* File descriptor read lock wait: */ + case PS_FDLR_WAIT: + case PS_FDLW_WAIT: + case PS_FDR_WAIT: + case PS_FDW_WAIT: + /* Write the lock details: */ + snprintf(s, sizeof(s), "fd %d[%s:%d]", + pthread->data.fd.fd, + pthread->data.fd.fname, + pthread->data.fd.branch); + _thread_sys_write(fd, s, strlen(s)); + snprintf(s, sizeof(s), "owner %pr/%pw\n", + _thread_fd_table[pthread->data.fd.fd]->r_owner, + _thread_fd_table[pthread->data.fd.fd]->w_owner); + _thread_sys_write(fd, s, strlen(s)); + break; + case PS_SIGWAIT: + snprintf(s, sizeof(s), "sigmask (hi)"); + _thread_sys_write(fd, s, strlen(s)); + for (i = _SIG_WORDS - 1; i >= 0; i--) { + snprintf(s, sizeof(s), "%08x\n", + pthread->sigmask.__bits[i]); + _thread_sys_write(fd, s, strlen(s)); + } + snprintf(s, sizeof(s), "(lo)\n"); + _thread_sys_write(fd, s, strlen(s)); + break; + /* + * Trap other states that are not explicitly + * coded to dump information: + */ + default: + /* Nothing to do here. */ + break; + } } } /* Set the thread name for debug: */ void pthread_set_name_np(pthread_t thread, char *name) { /* Check if the caller has specified a valid thread: */ if (thread != NULL && thread->magic == PTHREAD_MAGIC) { if (thread->name != NULL) { /* Free space for previous name. */ free(thread->name); } thread->name = strdup(name); } } #endif Index: head/lib/libkse/thread/thr_init.c =================================================================== --- head/lib/libkse/thread/thr_init.c (revision 68515) +++ head/lib/libkse/thread/thr_init.c (revision 68516) @@ -1,425 +1,421 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* Allocate space for global thread variables here: */ #define GLOBAL_PTHREAD_PRIVATE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include #include "pthread_private.h" #ifdef GCC_2_8_MADE_THREAD_AWARE typedef void *** (*dynamic_handler_allocator)(); extern void __set_dynamic_handler_allocator(dynamic_handler_allocator); static pthread_key_t except_head_key; typedef struct { void **__dynamic_handler_chain; void *top_elt[2]; } except_struct; static void ***dynamic_allocator_handler_fn() { except_struct *dh = (except_struct *)pthread_getspecific(except_head_key); if(dh == NULL) { dh = (except_struct *)malloc( sizeof(except_struct) ); memset(dh, '\0', sizeof(except_struct)); dh->__dynamic_handler_chain= dh->top_elt; pthread_setspecific(except_head_key, (void *)dh); } return &dh->__dynamic_handler_chain; } #endif /* GCC_2_8_MADE_THREAD_AWARE */ /* * Threaded process initialization */ void _thread_init(void) { int fd; int flags; int i; size_t len; int mib[2]; struct clockinfo clockinfo; struct sigaction act; - struct itimerval itimer; + struct sigaltstack alt; /* Check if this function has already been called: */ if (_thread_initial) /* Only initialise the threaded application once. */ return; /* * Check for the special case of this process running as * or in place of init as pid = 1: */ if (getpid() == 1) { /* * Setup a new session for this process which is * assumed to be running as root. */ if (setsid() == -1) PANIC("Can't set session ID"); if (revoke(_PATH_CONSOLE) != 0) PANIC("Can't revoke console"); if ((fd = _thread_sys_open(_PATH_CONSOLE, O_RDWR)) < 0) PANIC("Can't open console"); if (setlogin("root") == -1) PANIC("Can't set login to root"); if (_thread_sys_ioctl(fd,TIOCSCTTY, (char *) NULL) == -1) PANIC("Can't set controlling terminal"); if (_thread_sys_dup2(fd,0) == -1 || _thread_sys_dup2(fd,1) == -1 || _thread_sys_dup2(fd,2) == -1) PANIC("Can't dup2"); } /* Get the standard I/O flags before messing with them : */ for (i = 0; i < 3; i++) if (((_pthread_stdio_flags[i] = _thread_sys_fcntl(i,F_GETFL, NULL)) == -1) && (errno != EBADF)) PANIC("Cannot get stdio flags"); /* * Create a pipe that is written to by the signal handler to prevent - * signals being missed in calls to _select: + * signals being missed in calls to _select: */ if (_thread_sys_pipe(_thread_kern_pipe) != 0) { /* Cannot create pipe, so abort: */ PANIC("Cannot create kernel pipe"); } /* Get the flags for the read pipe: */ else if ((flags = _thread_sys_fcntl(_thread_kern_pipe[0], F_GETFL, NULL)) == -1) { /* Abort this application: */ PANIC("Cannot get kernel read pipe flags"); } /* Make the read pipe non-blocking: */ else if (_thread_sys_fcntl(_thread_kern_pipe[0], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ PANIC("Cannot make kernel read pipe non-blocking"); } /* Get the flags for the write pipe: */ else if ((flags = _thread_sys_fcntl(_thread_kern_pipe[1], F_GETFL, NULL)) == -1) { /* Abort this application: */ PANIC("Cannot get kernel write pipe flags"); } /* Make the write pipe non-blocking: */ else if (_thread_sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ PANIC("Cannot get kernel write pipe flags"); } /* Allocate and initialize the ready queue: */ else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) != 0) { /* Abort this application: */ PANIC("Cannot allocate priority ready queue."); } /* Allocate memory for the thread structure of the initial thread: */ else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) { /* * Insufficient memory to initialise this application, so - * abort: + * abort: */ PANIC("Cannot allocate memory for initial thread"); } /* Allocate memory for the scheduler stack: */ - else if ((_thread_kern_sched_stack = malloc(PAGE_SIZE * 10)) == NULL) + else if ((_thread_kern_sched_stack = malloc(SCHED_STACK_SIZE)) == NULL) PANIC("Failed to allocate stack for scheduler"); else { /* Zero the global kernel thread structure: */ memset(&_thread_kern_thread, 0, sizeof(struct pthread)); _thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE; memset(_thread_initial, 0, sizeof(struct pthread)); /* Initialize the waiting and work queues: */ TAILQ_INIT(&_waitingq); TAILQ_INIT(&_workq); /* Initialize the scheduling switch hook routine: */ _sched_switch_hook = NULL; /* Give this thread default attributes: */ memcpy((void *) &_thread_initial->attr, &pthread_attr_default, sizeof(struct pthread_attr)); /* Initialize the thread stack cache: */ SLIST_INIT(&_stackq); /* * Create a red zone below the main stack. All other stacks are * constrained to a maximum size by the paramters passed to * mmap(), but this stack is only limited by resource limits, so * this stack needs an explicitly mapped red zone to protect the * thread stack that is just beyond. */ if (mmap((void *) USRSTACK - PTHREAD_STACK_INITIAL - PTHREAD_STACK_GUARD, PTHREAD_STACK_GUARD, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* Set the main thread stack pointer. */ _thread_initial->stack = (void *) USRSTACK - PTHREAD_STACK_INITIAL; /* Set the stack attributes: */ _thread_initial->attr.stackaddr_attr = _thread_initial->stack; _thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL; /* Setup the context for the scheduler: */ _setjmp(_thread_kern_sched_jb); - SET_STACK_JB(_thread_kern_sched_jb, - _thread_kern_sched_stack + PAGE_SIZE*10 - sizeof(double)); + SET_STACK_JB(_thread_kern_sched_jb, _thread_kern_sched_stack + + SCHED_STACK_SIZE - sizeof(double)); SET_RETURN_ADDR_JB(_thread_kern_sched_jb, _thread_kern_scheduler); /* * Write a magic value to the thread structure * to help identify valid ones: */ _thread_initial->magic = PTHREAD_MAGIC; /* Set the initial cancel state */ _thread_initial->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; /* Default the priority of the initial thread: */ _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY; _thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY; _thread_initial->inherited_priority = 0; /* Initialise the state of the initial thread: */ _thread_initial->state = PS_RUNNING; /* Initialise the queue: */ TAILQ_INIT(&(_thread_initial->join_queue)); /* Initialize the owned mutex queue and count: */ TAILQ_INIT(&(_thread_initial->mutexq)); _thread_initial->priority_mutex_count = 0; /* Initialize the global scheduling time: */ _sched_ticks = 0; gettimeofday((struct timeval *) &_sched_tod, NULL); /* Initialize last active: */ _thread_initial->last_active = (long) _sched_ticks; - /* Initialize the initial signal frame: */ - _thread_initial->sigframes[0] = &_thread_initial->sigframe0; - _thread_initial->curframe = &_thread_initial->sigframe0; - _thread_initial->curframe->ctxtype = CTX_JB_NOSIG; - /* Set the base of the stack: */ - _thread_initial->curframe->stackp = (unsigned long) USRSTACK; + /* Initialize the initial context: */ + _thread_initial->curframe = NULL; + _thread_initial->ctxtype = CTX_JB_NOSIG; /* Initialise the rest of the fields: */ _thread_initial->poll_data.nfds = 0; _thread_initial->poll_data.fds = NULL; _thread_initial->sig_defer_count = 0; _thread_initial->yield_on_sig_undefer = 0; _thread_initial->specific_data = NULL; _thread_initial->cleanup = NULL; _thread_initial->flags = 0; _thread_initial->error = 0; TAILQ_INIT(&_thread_list); TAILQ_INSERT_HEAD(&_thread_list, _thread_initial, tle); _thread_run = _thread_initial; /* Initialise the global signal action structure: */ sigfillset(&act.sa_mask); act.sa_handler = (void (*) ()) _thread_sig_handler; - act.sa_flags = SA_SIGINFO; + act.sa_flags = SA_SIGINFO | SA_ONSTACK; /* Clear pending signals for the process: */ sigemptyset(&_process_sigpending); /* Clear the signal queue: */ memset(_thread_sigq, 0, sizeof(_thread_sigq)); + /* Create and install an alternate signal stack: */ + alt.ss_sp = malloc(SIGSTKSZ); /* recommended stack size */ + alt.ss_size = SIGSTKSZ; + alt.ss_flags = 0; + if (_thread_sys_sigaltstack(&alt, NULL) != 0) + PANIC("Unable to install alternate signal stack"); + /* Enter a loop to get the existing signal status: */ for (i = 1; i < NSIG; i++) { /* Check for signals which cannot be trapped: */ if (i == SIGKILL || i == SIGSTOP) { } /* Get the signal handler details: */ else if (_thread_sys_sigaction(i, NULL, &_thread_sigact[i - 1]) != 0) { /* * Abort this process if signal - * initialisation fails: + * initialisation fails: */ PANIC("Cannot read signal handler info"); } /* Initialize the SIG_DFL dummy handler count. */ _thread_dfl_count[i] = 0; } /* * Install the signal handler for the most important * signals that the user-thread kernel needs. Actually * SIGINFO isn't really needed, but it is nice to have. */ if (_thread_sys_sigaction(_SCHED_SIGNAL, &act, NULL) != 0 || _thread_sys_sigaction(SIGINFO, &act, NULL) != 0 || _thread_sys_sigaction(SIGCHLD, &act, NULL) != 0) { /* - * Abort this process if signal initialisation fails: + * Abort this process if signal initialisation fails: */ PANIC("Cannot initialise signal handler"); } _thread_sigact[_SCHED_SIGNAL - 1].sa_flags = SA_SIGINFO; _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO; _thread_sigact[SIGCHLD - 1].sa_flags = SA_SIGINFO; /* Get the process signal mask: */ _thread_sys_sigprocmask(SIG_SETMASK, NULL, &_process_sigmask); /* Get the kernel clockrate: */ mib[0] = CTL_KERN; mib[1] = KERN_CLOCKRATE; len = sizeof (struct clockinfo); if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0) _clock_res_usec = clockinfo.tick; /* Get the table size: */ if ((_thread_dtablesize = getdtablesize()) < 0) { /* * Cannot get the system defined table size, so abort - * this process. + * this process. */ PANIC("Cannot get dtablesize"); } /* Allocate memory for the file descriptor table: */ if ((_thread_fd_table = (struct fd_table_entry **) malloc(sizeof(struct fd_table_entry *) * _thread_dtablesize)) == NULL) { /* Avoid accesses to file descriptor table on exit: */ _thread_dtablesize = 0; /* * Cannot allocate memory for the file descriptor - * table, so abort this process. + * table, so abort this process. */ PANIC("Cannot allocate memory for file descriptor table"); } /* Allocate memory for the pollfd table: */ if ((_thread_pfd_table = (struct pollfd *) malloc(sizeof(struct pollfd) * _thread_dtablesize)) == NULL) { /* * Cannot allocate memory for the file descriptor - * table, so abort this process. + * table, so abort this process. */ PANIC("Cannot allocate memory for pollfd table"); } else { /* * Enter a loop to initialise the file descriptor - * table: + * table: */ for (i = 0; i < _thread_dtablesize; i++) { /* Initialise the file descriptor table: */ _thread_fd_table[i] = NULL; } /* Initialize stdio file descriptor table entries: */ for (i = 0; i < 3; i++) { if ((_thread_fd_table_init(i) != 0) && (errno != EBADF)) PANIC("Cannot initialize stdio file " "descriptor table entry"); } - - /* Install the scheduling timer: */ - itimer.it_interval.tv_sec = 0; - itimer.it_interval.tv_usec = _clock_res_usec; - itimer.it_value = itimer.it_interval; - if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0) - PANIC("Cannot set interval timer"); - } } #ifdef GCC_2_8_MADE_THREAD_AWARE /* Create the thread-specific data for the exception linked list. */ if(pthread_key_create(&except_head_key, NULL) != 0) PANIC("Failed to create thread specific execption head"); /* Setup the gcc exception handler per thread. */ __set_dynamic_handler_allocator( dynamic_allocator_handler_fn ); #endif /* GCC_2_8_MADE_THREAD_AWARE */ /* Initialise the garbage collector mutex and condition variable. */ if (pthread_mutex_init(&_gc_mutex,NULL) != 0 || pthread_cond_init(&_gc_cond,NULL) != 0) PANIC("Failed to initialise garbage collector mutex or condvar"); } /* - * Special start up code for NetBSD/Alpha + * Special start up code for NetBSD/Alpha */ #if defined(__NetBSD__) && defined(__alpha__) -int +int main(int argc, char *argv[], char *env); int _thread_main(int argc, char *argv[], char *env) { _thread_init(); return (main(argc, argv, env)); } #endif #else /* * A stub for non-threaded programs. */ void _thread_init(void) { } #endif Index: head/lib/libkse/thread/thr_join.c =================================================================== --- head/lib/libkse/thread/thr_join.c (revision 68515) +++ head/lib/libkse/thread/thr_join.c (revision 68516) @@ -1,138 +1,166 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" int pthread_join(pthread_t pthread, void **thread_return) { int ret = 0; _thread_enter_cancellation_point(); /* Check if the caller has specified an invalid thread: */ if (pthread == NULL || pthread->magic != PTHREAD_MAGIC) { /* Invalid thread: */ _thread_leave_cancellation_point(); return(EINVAL); } /* Check if the caller has specified itself: */ if (pthread == _thread_run) { /* Avoid a deadlock condition: */ _thread_leave_cancellation_point(); return(EDEADLK); } /* * Find the thread in the list of active threads or in the * list of dead threads: */ if ((_find_thread(pthread) != 0) && (_find_dead_thread(pthread) != 0)) /* Return an error: */ ret = ESRCH; /* Check if this thread has been detached: */ else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) /* Return an error: */ ret = ESRCH; /* Check if the thread is not dead: */ else if (pthread->state != PS_DEAD) { PTHREAD_ASSERT_NOT_IN_SYNCQ(_thread_run); - /* Clear the interrupted flag: */ - _thread_run->interrupted = 0; - /* - * Protect against being context switched out while - * adding this thread to the join queue. + * Enter a loop in case this thread is woken prematurely + * in order to invoke a signal handler: */ - _thread_kern_sig_defer(); + for (;;) { + /* Clear the interrupted flag: */ + _thread_run->interrupted = 0; - /* Add the running thread to the join queue: */ - TAILQ_INSERT_TAIL(&(pthread->join_queue), _thread_run, sqe); - _thread_run->flags |= PTHREAD_FLAGS_IN_JOINQ; - _thread_run->data.thread = pthread; + /* + * Protect against being context switched out while + * adding this thread to the join queue. + */ + _thread_kern_sig_defer(); - /* Schedule the next thread: */ - _thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__); + /* Add the running thread to the join queue: */ + TAILQ_INSERT_TAIL(&(pthread->join_queue), + _thread_run, sqe); + _thread_run->flags |= PTHREAD_FLAGS_IN_JOINQ; + _thread_run->data.thread = pthread; - if (_thread_run->interrupted != 0) { - TAILQ_REMOVE(&(pthread->join_queue), _thread_run, sqe); - _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ; - } - _thread_run->data.thread = NULL; + /* Schedule the next thread: */ + _thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__); - _thread_kern_sig_undefer(); + if ((_thread_run->flags & PTHREAD_FLAGS_IN_JOINQ) != 0) { + TAILQ_REMOVE(&(pthread->join_queue), + _thread_run, sqe); + _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ; + } + _thread_run->data.thread = NULL; - if (_thread_run->interrupted != 0 && - _thread_run->continuation != NULL) - _thread_run->continuation(_thread_run); + _thread_kern_sig_undefer(); - /* Check if the thread is not detached: */ - if ((pthread->attr.flags & PTHREAD_DETACHED) == 0) { - /* Check if the return value is required: */ - if (thread_return) - /* Return the thread's return value: */ - *thread_return = pthread->ret; - } - else - /* Return an error: */ - ret = ESRCH; + if (_thread_run->interrupted != 0) { + if (_thread_run->continuation != NULL) + _thread_run->continuation(_thread_run); + /* + * This thread was interrupted, probably to + * invoke a signal handler. Make sure the + * target thread is still joinable. + */ + if (((_find_thread(pthread) != 0) && + (_find_dead_thread(pthread) != 0)) || + ((pthread->attr.flags & + PTHREAD_DETACHED) != 0)) { + /* Return an error: */ + ret = ESRCH; + /* We're done; break out of the loop. */ + break; + } + else if (pthread->state == PS_DEAD) { + /* We're done; break out of the loop. */ + break; + } + } else { + /* + * The thread return value and error are set + * by the thread we're joining to when it + * exits or detaches: + */ + ret = _thread_run->error; + if ((ret == 0) && (thread_return != NULL)) + *thread_return = _thread_run->ret; + + /* We're done; break out of the loop. */ + break; + } + } /* Check if the return value is required: */ } else if (thread_return != NULL) /* Return the thread's return value: */ *thread_return = pthread->ret; _thread_leave_cancellation_point(); /* Return the completion status: */ return (ret); } void _join_backout(pthread_t pthread) { _thread_kern_sig_defer(); - if (pthread->state == PS_JOIN) { + if ((pthread->flags & PTHREAD_FLAGS_IN_JOINQ) != 0) { TAILQ_REMOVE(&pthread->data.thread->join_queue, pthread, sqe); _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ; } _thread_kern_sig_undefer(); } #endif Index: head/lib/libkse/thread/thr_kern.c =================================================================== --- head/lib/libkse/thread/thr_kern.c (revision 68515) +++ head/lib/libkse/thread/thr_kern.c (revision 68516) @@ -1,1108 +1,1111 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" /* #define DEBUG_THREAD_KERN */ #ifdef DEBUG_THREAD_KERN #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif /* Static function prototype definitions: */ -static void +static void thread_kern_poll(int wait_reqd); static void dequeue_signals(void); static inline void thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in); /* Static variables: */ static int last_tick = 0; /* * This is called when a signal handler finishes and wants to * return to a previous frame. */ void -_thread_kern_sched_frame(int frame) +_thread_kern_sched_frame(struct pthread_signal_frame *psf) { /* * Flag the pthread kernel as executing scheduler code * to avoid a signal from interrupting this execution and * corrupting the (soon-to-be) current frame. */ _thread_kern_in_sched = 1; - /* Return to the specified frame: */ - _thread_run->curframe = _thread_run->sigframes[frame]; - _thread_run->sigframe_count = frame; + /* Restore the signal frame: */ + _thread_sigframe_restore(_thread_run, psf); - if (_thread_run->sigframe_count == 0) - /* Restore the threads priority: */ - _thread_run->active_priority &= ~PTHREAD_SIGNAL_PRIORITY; - /* Switch to the thread scheduler: */ ___longjmp(_thread_kern_sched_jb, 1); } void _thread_kern_sched(ucontext_t *scp) { /* * Flag the pthread kernel as executing scheduler code * to avoid a scheduler signal from interrupting this * execution and calling the scheduler again. */ _thread_kern_in_sched = 1; /* Check if this function was called from the signal handler: */ if (scp != NULL) { /* * The signal handler should have saved the state of * the current thread. Restore the process signal * mask. */ if (_thread_sys_sigprocmask(SIG_SETMASK, &_process_sigmask, NULL) != 0) PANIC("Unable to restore process mask after signal"); /* * We're running on the signal stack; just call the * kernel scheduler directly. */ DBG_MSG("Entering scheduler due to signal\n"); _thread_kern_scheduler(); } else { /* Save the state of the current thread: */ - if (_setjmp(_thread_run->curframe->ctx.jb) == 0) { + if (_setjmp(_thread_run->ctx.jb) == 0) { /* Flag the jump buffer was the last state saved: */ - _thread_run->curframe->ctxtype = CTX_JB_NOSIG; - _thread_run->curframe->longjmp_val = 1; + _thread_run->ctxtype = CTX_JB_NOSIG; + _thread_run->longjmp_val = 1; } else { DBG_MSG("Returned from ___longjmp, thread %p\n", _thread_run); /* * This point is reached when a longjmp() is called - * to restore the state of a thread. + * to restore the state of a thread. * * This is the normal way out of the scheduler. */ _thread_kern_in_sched = 0; if (_thread_run->sig_defer_count == 0) { if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) && ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) - /* + /* * Cancellations override signals. * * Stick a cancellation point at the * start of each async-cancellable * thread's resumption. * * We allow threads woken at cancel * points to do their own checks. */ pthread_testcancel(); } if (_sched_switch_hook != NULL) { /* Run the installed switch hook: */ thread_run_switch_hook(_last_user_thread, _thread_run); } return; } /* Switch to the thread scheduler: */ ___longjmp(_thread_kern_sched_jb, 1); } } void _thread_kern_sched_sig(void) { _thread_run->check_pending = 1; _thread_kern_sched(NULL); } void _thread_kern_scheduler(void) { - struct pthread_signal_frame *psf; struct timespec ts; struct timeval tv; pthread_t pthread, pthread_h; unsigned int current_tick; int add_to_prioq; /* If the currently running thread is a user thread, save it: */ if ((_thread_run->flags & PTHREAD_FLAGS_PRIVATE) == 0) _last_user_thread = _thread_run; /* Are there pending signals for this thread? */ if (_thread_run->check_pending != 0) { _thread_run->check_pending = 0; _thread_sig_check_pending(_thread_run); } /* * Enter a scheduling loop that finds the next thread that is * ready to run. This loop completes when there are no more threads * in the global list or when a thread has its state restored by * either a sigreturn (if the state was saved as a sigcontext) or a - * longjmp (if the state was saved by a setjmp). + * longjmp (if the state was saved by a setjmp). */ while (!(TAILQ_EMPTY(&_thread_list))) { /* Get the current time of day: */ GET_CURRENT_TOD(tv); TIMEVAL_TO_TIMESPEC(&tv, &ts); current_tick = _sched_ticks; /* * Protect the scheduling queues from access by the signal * handler. */ _queue_signals = 1; add_to_prioq = 0; if (_thread_run != &_thread_kern_thread) { /* * This thread no longer needs to yield the CPU. */ _thread_run->yield_on_sig_undefer = 0; if (_thread_run->state != PS_RUNNING) { /* * Save the current time as the time that the - * thread became inactive: + * thread became inactive: */ _thread_run->last_inactive = (long)current_tick; if (_thread_run->last_inactive < _thread_run->last_active) { /* Account for a rollover: */ _thread_run->last_inactive =+ UINT_MAX + 1; } } /* * Place the currently running thread into the * appropriate queue(s). */ switch (_thread_run->state) { case PS_DEAD: case PS_STATE_MAX: /* to silence -Wall */ case PS_SUSPENDED: /* * Dead and suspended threads are not placed * in any queue: */ break; case PS_RUNNING: /* * Runnable threads can't be placed in the * priority queue until after waiting threads * are polled (to preserve round-robin * scheduling). */ add_to_prioq = 1; break; /* * States which do not depend on file descriptor I/O - * operations or timeouts: + * operations or timeouts: */ case PS_DEADLOCK: case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: case PS_JOIN: case PS_MUTEX_WAIT: case PS_SIGSUSPEND: case PS_SIGTHREAD: case PS_SIGWAIT: case PS_WAIT_WAIT: /* No timeouts for these states: */ _thread_run->wakeup_time.tv_sec = -1; _thread_run->wakeup_time.tv_nsec = -1; /* Restart the time slice: */ _thread_run->slice_usec = -1; /* Insert into the waiting queue: */ PTHREAD_WAITQ_INSERT(_thread_run); break; /* States which can timeout: */ case PS_COND_WAIT: case PS_SLEEP_WAIT: /* Restart the time slice: */ _thread_run->slice_usec = -1; /* Insert into the waiting queue: */ PTHREAD_WAITQ_INSERT(_thread_run); break; /* States that require periodic work: */ case PS_SPINBLOCK: /* No timeouts for this state: */ _thread_run->wakeup_time.tv_sec = -1; _thread_run->wakeup_time.tv_nsec = -1; /* Increment spinblock count: */ _spinblock_count++; /* FALLTHROUGH */ case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* Restart the time slice: */ _thread_run->slice_usec = -1; /* Insert into the waiting queue: */ PTHREAD_WAITQ_INSERT(_thread_run); /* Insert into the work queue: */ PTHREAD_WORKQ_INSERT(_thread_run); break; } } /* + * Avoid polling file descriptors if there are none + * waiting: + */ + if (TAILQ_EMPTY(&_workq) == 0) { + } + /* * Poll file descriptors only if a new scheduling signal * has occurred or if we have no more runnable threads. */ - if (((current_tick = _sched_ticks) != last_tick) || + else if (((current_tick = _sched_ticks) != last_tick) || ((_thread_run->state != PS_RUNNING) && (PTHREAD_PRIOQ_FIRST() == NULL))) { /* Unprotect the scheduling queues: */ _queue_signals = 0; /* * Poll file descriptors to update the state of threads - * waiting on file I/O where data may be available: + * waiting on file I/O where data may be available: */ thread_kern_poll(0); /* Protect the scheduling queues: */ _queue_signals = 1; } last_tick = current_tick; /* * Wake up threads that have timedout. This has to be * done after polling in case a thread does a poll or * select with zero time. */ PTHREAD_WAITQ_SETACTIVE(); while (((pthread = TAILQ_FIRST(&_waitingq)) != NULL) && (pthread->wakeup_time.tv_sec != -1) && (((pthread->wakeup_time.tv_sec == 0) && (pthread->wakeup_time.tv_nsec == 0)) || (pthread->wakeup_time.tv_sec < ts.tv_sec) || ((pthread->wakeup_time.tv_sec == ts.tv_sec) && (pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) { switch (pthread->state) { case PS_POLL_WAIT: case PS_SELECT_WAIT: /* Return zero file descriptors ready: */ pthread->data.poll_data->nfds = 0; /* fall through */ default: /* * Remove this thread from the waiting queue * (and work queue if necessary) and place it * in the ready queue. */ PTHREAD_WAITQ_CLEARACTIVE(); if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ) PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread, PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); break; } /* * Flag the timeout in the thread structure: */ pthread->timeout = 1; } PTHREAD_WAITQ_CLEARACTIVE(); /* * Check to see if the current thread needs to be added * to the priority queue: */ if (add_to_prioq != 0) { /* * Save the current time as the time that the - * thread became inactive: + * thread became inactive: */ current_tick = _sched_ticks; _thread_run->last_inactive = (long)current_tick; if (_thread_run->last_inactive < _thread_run->last_active) { /* Account for a rollover: */ _thread_run->last_inactive =+ UINT_MAX + 1; } if ((_thread_run->slice_usec != -1) && (_thread_run->attr.sched_policy != SCHED_FIFO)) { /* * Accumulate the number of microseconds for * which the current thread has run: */ _thread_run->slice_usec += (_thread_run->last_inactive - _thread_run->last_active) * (long)_clock_res_usec; /* Check for time quantum exceeded: */ if (_thread_run->slice_usec > TIMESLICE_USEC) _thread_run->slice_usec = -1; } if (_thread_run->slice_usec == -1) { /* * The thread exceeded its time * quantum or it yielded the CPU; * place it at the tail of the * queue for its priority. */ PTHREAD_PRIOQ_INSERT_TAIL(_thread_run); } else { /* * The thread hasn't exceeded its * interval. Place it at the head * of the queue for its priority. */ PTHREAD_PRIOQ_INSERT_HEAD(_thread_run); } } /* * Get the highest priority thread in the ready queue. */ pthread_h = PTHREAD_PRIOQ_FIRST(); /* Check if there are no threads ready to run: */ if (pthread_h == NULL) { /* * Lock the pthread kernel by changing the pointer to * the running thread to point to the global kernel - * thread structure: + * thread structure: */ _thread_run = &_thread_kern_thread; DBG_MSG("No runnable threads, using kernel thread %p\n", _thread_run); /* Unprotect the scheduling queues: */ _queue_signals = 0; /* * There are no threads ready to run, so wait until - * something happens that changes this condition: + * something happens that changes this condition: */ thread_kern_poll(1); /* * This process' usage will likely be very small * while waiting in a poll. Since the scheduling * clock is based on the profiling timer, it is * unlikely that the profiling timer will fire * and update the time of day. To account for this, * get the time of day after polling with a timeout. */ gettimeofday((struct timeval *) &_sched_tod, NULL); /* Check once more for a runnable thread: */ _queue_signals = 1; pthread_h = PTHREAD_PRIOQ_FIRST(); _queue_signals = 0; } if (pthread_h != NULL) { /* Remove the thread from the ready queue: */ PTHREAD_PRIOQ_REMOVE(pthread_h); /* Unprotect the scheduling queues: */ _queue_signals = 0; /* * Check for signals queued while the scheduling * queues were protected: */ while (_sigq_check_reqd != 0) { /* Clear before handling queued signals: */ _sigq_check_reqd = 0; /* Protect the scheduling queues again: */ _queue_signals = 1; dequeue_signals(); /* * Check for a higher priority thread that * became runnable due to signal handling. */ if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) && (pthread->active_priority > pthread_h->active_priority)) { /* Remove the thread from the ready queue: */ PTHREAD_PRIOQ_REMOVE(pthread); /* * Insert the lower priority thread * at the head of its priority list: */ PTHREAD_PRIOQ_INSERT_HEAD(pthread_h); /* There's a new thread in town: */ pthread_h = pthread; } /* Unprotect the scheduling queues: */ _queue_signals = 0; } /* Make the selected thread the current thread: */ _thread_run = pthread_h; /* * Save the current time as the time that the thread - * became active: + * became active: */ current_tick = _sched_ticks; _thread_run->last_active = (long) current_tick; /* * Check if this thread is running for the first time * or running again after using its full time slice - * allocation: + * allocation: */ if (_thread_run->slice_usec == -1) { /* Reset the accumulated time slice period: */ _thread_run->slice_usec = 0; } /* * If we had a context switch, run any * installed switch hooks. */ if ((_sched_switch_hook != NULL) && (_last_user_thread != _thread_run)) { thread_run_switch_hook(_last_user_thread, _thread_run); } /* * Continue the thread at its current frame: */ - psf = _thread_run->curframe; - switch(psf->ctxtype) { + switch(_thread_run->ctxtype) { case CTX_JB_NOSIG: - ___longjmp(psf->ctx.jb, psf->longjmp_val); + ___longjmp(_thread_run->ctx.jb, + _thread_run->longjmp_val); break; case CTX_JB: - __longjmp(psf->ctx.jb, psf->longjmp_val); + __longjmp(_thread_run->ctx.jb, + _thread_run->longjmp_val); break; case CTX_SJB: - __siglongjmp(psf->ctx.sigjb, psf->longjmp_val); + __siglongjmp(_thread_run->ctx.sigjb, + _thread_run->longjmp_val); break; case CTX_UC: /* XXX - Restore FP regsisters? */ - FP_RESTORE_UC(&psf->ctx.uc); + FP_RESTORE_UC(&_thread_run->ctx.uc); /* * Do a sigreturn to restart the thread that - * was interrupted by a signal: + * was interrupted by a signal: */ _thread_kern_in_sched = 0; #if NOT_YET - _setcontext(&psf->ctx.uc); + _setcontext(&_thread_run->ctx.uc); #else /* * Ensure the process signal mask is set * correctly: */ - psf->ctx.uc.uc_sigmask = _process_sigmask; - _thread_sys_sigreturn(&psf->ctx.uc); + _thread_run->ctx.uc.uc_sigmask = + _process_sigmask; + _thread_sys_sigreturn(&_thread_run->ctx.uc); #endif break; } /* This point should not be reached. */ PANIC("Thread has returned from sigreturn or longjmp"); } } /* There are no more threads, so exit this process: */ exit(0); } void _thread_kern_sched_state(enum pthread_state state, char *fname, int lineno) { /* * Flag the pthread kernel as executing scheduler code * to avoid a scheduler signal from interrupting this * execution and calling the scheduler again. */ _thread_kern_in_sched = 1; /* * Prevent the signal handler from fiddling with this thread * before its state is set and is placed into the proper queue. */ _queue_signals = 1; /* Change the state of the current thread: */ _thread_run->state = state; _thread_run->fname = fname; _thread_run->lineno = lineno; /* Schedule the next thread that is ready: */ _thread_kern_sched(NULL); } void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno) { /* * Flag the pthread kernel as executing scheduler code * to avoid a scheduler signal from interrupting this * execution and calling the scheduler again. */ _thread_kern_in_sched = 1; /* * Prevent the signal handler from fiddling with this thread * before its state is set and it is placed into the proper * queue(s). */ _queue_signals = 1; /* Change the state of the current thread: */ _thread_run->state = state; _thread_run->fname = fname; _thread_run->lineno = lineno; _SPINUNLOCK(lock); /* Schedule the next thread that is ready: */ _thread_kern_sched(NULL); } static void thread_kern_poll(int wait_reqd) { int count = 0; int i, found; int kern_pipe_added = 0; int nfds = 0; int timeout_ms = 0; struct pthread *pthread; struct timespec ts; struct timeval tv; /* Check if the caller wants to wait: */ if (wait_reqd == 0) { timeout_ms = 0; } else { /* Get the current time of day: */ GET_CURRENT_TOD(tv); TIMEVAL_TO_TIMESPEC(&tv, &ts); _queue_signals = 1; pthread = TAILQ_FIRST(&_waitingq); _queue_signals = 0; if ((pthread == NULL) || (pthread->wakeup_time.tv_sec == -1)) { /* * Either there are no threads in the waiting queue, * or there are no threads that can timeout. */ timeout_ms = INFTIM; } else { /* * Calculate the time left for the next thread to * timeout: */ timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) * 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec) / 1000000); /* * Don't allow negative timeouts: */ if (timeout_ms < 0) timeout_ms = 0; } } /* Protect the scheduling queues: */ _queue_signals = 1; /* * Check to see if the signal queue needs to be walked to look * for threads awoken by a signal while in the scheduler. */ if (_sigq_check_reqd != 0) { /* Reset flag before handling queued signals: */ _sigq_check_reqd = 0; dequeue_signals(); } /* * Check for a thread that became runnable due to a signal: */ if (PTHREAD_PRIOQ_FIRST() != NULL) { /* * Since there is at least one runnable thread, * disable the wait. */ timeout_ms = 0; } /* * Form the poll table: */ nfds = 0; if (timeout_ms != 0) { /* Add the kernel pipe to the poll table: */ _thread_pfd_table[nfds].fd = _thread_kern_pipe[0]; _thread_pfd_table[nfds].events = POLLRDNORM; _thread_pfd_table[nfds].revents = 0; nfds++; kern_pipe_added = 1; } PTHREAD_WAITQ_SETACTIVE(); TAILQ_FOREACH(pthread, &_workq, qe) { switch (pthread->state) { case PS_SPINBLOCK: /* * If the lock is available, let the thread run. */ if (pthread->data.spinlock->access_lock == 0) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); /* One less thread in a spinblock state: */ _spinblock_count--; /* * Since there is at least one runnable * thread, disable the wait. */ timeout_ms = 0; } break; /* File descriptor read wait: */ case PS_FDR_WAIT: /* Limit number of polled files to table size: */ if (nfds < _thread_dtablesize) { _thread_pfd_table[nfds].events = POLLRDNORM; _thread_pfd_table[nfds].fd = pthread->data.fd.fd; nfds++; } break; /* File descriptor write wait: */ case PS_FDW_WAIT: /* Limit number of polled files to table size: */ if (nfds < _thread_dtablesize) { _thread_pfd_table[nfds].events = POLLWRNORM; _thread_pfd_table[nfds].fd = pthread->data.fd.fd; nfds++; } break; /* File descriptor poll or select wait: */ case PS_POLL_WAIT: case PS_SELECT_WAIT: /* Limit number of polled files to table size: */ if (pthread->data.poll_data->nfds + nfds < _thread_dtablesize) { for (i = 0; i < pthread->data.poll_data->nfds; i++) { _thread_pfd_table[nfds + i].fd = pthread->data.poll_data->fds[i].fd; _thread_pfd_table[nfds + i].events = pthread->data.poll_data->fds[i].events; } nfds += pthread->data.poll_data->nfds; } break; /* Other states do not depend on file I/O. */ default: break; } } PTHREAD_WAITQ_CLEARACTIVE(); /* * Wait for a file descriptor to be ready for read, write, or - * an exception, or a timeout to occur: + * an exception, or a timeout to occur: */ count = _thread_sys_poll(_thread_pfd_table, nfds, timeout_ms); if (kern_pipe_added != 0) /* * Remove the pthread kernel pipe file descriptor - * from the pollfd table: + * from the pollfd table: */ nfds = 1; else nfds = 0; /* * Check if it is possible that there are bytes in the kernel * read pipe waiting to be read: */ if (count < 0 || ((kern_pipe_added != 0) && (_thread_pfd_table[0].revents & POLLRDNORM))) { /* * If the kernel read pipe was included in the - * count: + * count: */ if (count > 0) { /* Decrement the count of file descriptors: */ count--; } if (_sigq_check_reqd != 0) { /* Reset flag before handling signals: */ _sigq_check_reqd = 0; dequeue_signals(); } } /* * Check if any file descriptors are ready: */ if (count > 0) { /* * Enter a loop to look for threads waiting on file * descriptors that are flagged as available by the - * _poll syscall: + * _poll syscall: */ PTHREAD_WAITQ_SETACTIVE(); TAILQ_FOREACH(pthread, &_workq, qe) { switch (pthread->state) { case PS_SPINBLOCK: /* * If the lock is available, let the thread run. */ if (pthread->data.spinlock->access_lock == 0) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); /* * One less thread in a spinblock state: */ _spinblock_count--; } break; /* File descriptor read wait: */ case PS_FDR_WAIT: if ((nfds < _thread_dtablesize) && (_thread_pfd_table[nfds].revents & POLLRDNORM)) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); } nfds++; break; /* File descriptor write wait: */ case PS_FDW_WAIT: if ((nfds < _thread_dtablesize) && (_thread_pfd_table[nfds].revents & POLLWRNORM)) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); } nfds++; break; /* File descriptor poll or select wait: */ case PS_POLL_WAIT: case PS_SELECT_WAIT: if (pthread->data.poll_data->nfds + nfds < _thread_dtablesize) { /* * Enter a loop looking for I/O * readiness: */ found = 0; for (i = 0; i < pthread->data.poll_data->nfds; i++) { if (_thread_pfd_table[nfds + i].revents != 0) { pthread->data.poll_data->fds[i].revents = _thread_pfd_table[nfds + i].revents; found++; } } /* Increment before destroying: */ nfds += pthread->data.poll_data->nfds; if (found != 0) { pthread->data.poll_data->nfds = found; PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); } } else nfds += pthread->data.poll_data->nfds; break; /* Other states do not depend on file I/O. */ default: break; } } PTHREAD_WAITQ_CLEARACTIVE(); } else if (_spinblock_count != 0) { /* * Enter a loop to look for threads waiting on a spinlock * that is now available. */ PTHREAD_WAITQ_SETACTIVE(); TAILQ_FOREACH(pthread, &_workq, qe) { if (pthread->state == PS_SPINBLOCK) { /* * If the lock is available, let the thread run. */ if (pthread->data.spinlock->access_lock == 0) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); /* * One less thread in a spinblock state: */ _spinblock_count--; } } } PTHREAD_WAITQ_CLEARACTIVE(); } /* Unprotect the scheduling queues: */ _queue_signals = 0; while (_sigq_check_reqd != 0) { /* Handle queued signals: */ _sigq_check_reqd = 0; /* Protect the scheduling queues: */ _queue_signals = 1; dequeue_signals(); /* Unprotect the scheduling queues: */ _queue_signals = 0; } } void _thread_kern_set_timeout(const struct timespec * timeout) { struct timespec current_time; struct timeval tv; /* Reset the timeout flag for the running thread: */ _thread_run->timeout = 0; /* Check if the thread is to wait forever: */ if (timeout == NULL) { /* * Set the wakeup time to something that can be recognised as - * different to an actual time of day: + * different to an actual time of day: */ _thread_run->wakeup_time.tv_sec = -1; _thread_run->wakeup_time.tv_nsec = -1; } /* Check if no waiting is required: */ else if (timeout->tv_sec == 0 && timeout->tv_nsec == 0) { /* Set the wake up time to 'immediately': */ _thread_run->wakeup_time.tv_sec = 0; _thread_run->wakeup_time.tv_nsec = 0; } else { /* Get the current time: */ GET_CURRENT_TOD(tv); TIMEVAL_TO_TIMESPEC(&tv, ¤t_time); /* Calculate the time for the current thread to wake up: */ _thread_run->wakeup_time.tv_sec = current_time.tv_sec + timeout->tv_sec; _thread_run->wakeup_time.tv_nsec = current_time.tv_nsec + timeout->tv_nsec; /* Check if the nanosecond field needs to wrap: */ if (_thread_run->wakeup_time.tv_nsec >= 1000000000) { /* Wrap the nanosecond field: */ _thread_run->wakeup_time.tv_sec += 1; _thread_run->wakeup_time.tv_nsec -= 1000000000; } } } void _thread_kern_sig_defer(void) { /* Allow signal deferral to be recursive. */ _thread_run->sig_defer_count++; } void _thread_kern_sig_undefer(void) { /* * Perform checks to yield only if we are about to undefer * signals. */ if (_thread_run->sig_defer_count > 1) { /* Decrement the signal deferral count. */ _thread_run->sig_defer_count--; } else if (_thread_run->sig_defer_count == 1) { /* Reenable signals: */ _thread_run->sig_defer_count = 0; /* * Check if there are queued signals: */ if (_sigq_check_reqd != 0) _thread_kern_sched(NULL); - /* + /* * Check for asynchronous cancellation before delivering any * pending signals: */ if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) && ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) pthread_testcancel(); /* * If there are pending signals or this thread has * to yield the CPU, call the kernel scheduler: * * XXX - Come back and revisit the pending signal problem */ if ((_thread_run->yield_on_sig_undefer != 0) || SIGNOTEMPTY(_thread_run->sigpend)) { _thread_run->yield_on_sig_undefer = 0; _thread_kern_sched(NULL); } } } static void dequeue_signals(void) { char bufr[128]; int num; /* - * Enter a loop to clear the pthread kernel pipe: + * Enter a loop to clear the pthread kernel pipe: */ while (((num = _thread_sys_read(_thread_kern_pipe[0], bufr, sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) { } if ((num < 0) && (errno != EAGAIN)) { /* * The only error we should expect is if there is * no data to read. */ PANIC("Unable to read from thread kernel pipe"); } /* Handle any pending signals: */ _thread_sig_handle_pending(); } static inline void thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in) { pthread_t tid_out = thread_out; pthread_t tid_in = thread_in; if ((tid_out != NULL) && (tid_out->flags & PTHREAD_FLAGS_PRIVATE) != 0) tid_out = NULL; if ((tid_in != NULL) && (tid_in->flags & PTHREAD_FLAGS_PRIVATE) != 0) tid_in = NULL; if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) { /* Run the scheduler switch hook: */ _sched_switch_hook(tid_out, tid_in); } } #endif Index: head/lib/libkse/thread/thr_mutex.c =================================================================== --- head/lib/libkse/thread/thr_mutex.c (revision 68515) +++ head/lib/libkse/thread/thr_mutex.c (revision 68516) @@ -1,1467 +1,1480 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" #if defined(_PTHREADS_INVARIANTS) #define _MUTEX_INIT_LINK(m) do { \ (m)->m_qe.tqe_prev = NULL; \ (m)->m_qe.tqe_next = NULL; \ } while (0) #define _MUTEX_ASSERT_IS_OWNED(m) do { \ if ((m)->m_qe.tqe_prev == NULL) \ PANIC("mutex is not on list"); \ } while (0) #define _MUTEX_ASSERT_NOT_OWNED(m) do { \ if (((m)->m_qe.tqe_prev != NULL) || \ ((m)->m_qe.tqe_next != NULL)) \ PANIC("mutex is on list"); \ } while (0) #else #define _MUTEX_INIT_LINK(m) #define _MUTEX_ASSERT_IS_OWNED(m) #define _MUTEX_ASSERT_NOT_OWNED(m) #endif /* * Prototypes */ static inline int mutex_self_trylock(pthread_mutex_t); static inline int mutex_self_lock(pthread_mutex_t); static inline int mutex_unlock_common(pthread_mutex_t *, int); static void mutex_priority_adjust(pthread_mutex_t); static void mutex_rescan_owned (pthread_t, pthread_mutex_t); static inline pthread_t mutex_queue_deq(pthread_mutex_t); static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; /* Reinitialize a mutex to defaults. */ int _mutex_reinit(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; else if (*mutex == NULL) ret = pthread_mutex_init(mutex, NULL); else { /* * Initialize the mutex structure: */ (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT; (*mutex)->m_protocol = PTHREAD_PRIO_NONE; TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_owner = NULL; (*mutex)->m_data.m_count = 0; (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE; (*mutex)->m_flags |= MUTEX_FLAGS_INITED; (*mutex)->m_refcount = 0; (*mutex)->m_prio = 0; (*mutex)->m_saved_prio = 0; _MUTEX_INIT_LINK(*mutex); memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock)); } return (ret); } int pthread_mutex_init(pthread_mutex_t * mutex, const pthread_mutexattr_t * mutex_attr) { enum pthread_mutextype type; int protocol; int ceiling; pthread_mutex_t pmutex; int ret = 0; if (mutex == NULL) ret = EINVAL; /* Check if default mutex attributes: */ else if (mutex_attr == NULL || *mutex_attr == NULL) { /* Default to a (error checking) POSIX mutex: */ type = PTHREAD_MUTEX_ERRORCHECK; protocol = PTHREAD_PRIO_NONE; ceiling = PTHREAD_MAX_PRIORITY; } /* Check mutex type: */ else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) /* Return an invalid argument error: */ ret = EINVAL; /* Check mutex protocol: */ else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) /* Return an invalid argument error: */ ret = EINVAL; else { /* Use the requested mutex type and protocol: */ type = (*mutex_attr)->m_type; protocol = (*mutex_attr)->m_protocol; ceiling = (*mutex_attr)->m_ceiling; } /* Check no errors so far: */ if (ret == 0) { if ((pmutex = (pthread_mutex_t) malloc(sizeof(struct pthread_mutex))) == NULL) ret = ENOMEM; else { /* Reset the mutex flags: */ pmutex->m_flags = 0; /* Process according to mutex type: */ switch (type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: /* Nothing to do here. */ break; /* Single UNIX Spec 2 recursive mutex: */ case PTHREAD_MUTEX_RECURSIVE: /* Reset the mutex count: */ pmutex->m_data.m_count = 0; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } if (ret == 0) { /* Initialise the rest of the mutex: */ TAILQ_INIT(&pmutex->m_queue); pmutex->m_flags |= MUTEX_FLAGS_INITED; pmutex->m_owner = NULL; pmutex->m_type = type; pmutex->m_protocol = protocol; pmutex->m_refcount = 0; if (protocol == PTHREAD_PRIO_PROTECT) pmutex->m_prio = ceiling; else pmutex->m_prio = 0; pmutex->m_saved_prio = 0; _MUTEX_INIT_LINK(pmutex); memset(&pmutex->lock, 0, sizeof(pmutex->lock)); *mutex = pmutex; } else { free(pmutex); *mutex = NULL; } } } /* Return the completion status: */ return(ret); } int pthread_mutex_destroy(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL || *mutex == NULL) ret = EINVAL; else { /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * Check to see if this mutex is in use: */ if (((*mutex)->m_owner != NULL) || (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || ((*mutex)->m_refcount != 0)) { ret = EBUSY; /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); } else { /* * Free the memory allocated for the mutex * structure: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); free(*mutex); /* * Leave the caller's pointer NULL now that * the mutex has been destroyed: */ *mutex = NULL; } } /* Return the completion status: */ return (ret); } static int init_static(pthread_mutex_t *mutex) { int ret; _SPINLOCK(&static_init_lock); if (*mutex == NULL) ret = pthread_mutex_init(mutex, NULL); else ret = 0; _SPINUNLOCK(&static_init_lock); return(ret); } int pthread_mutex_trylock(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization: */ else if (*mutex != NULL || (ret = init_static(mutex)) == 0) { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); _MUTEX_INIT_LINK(*mutex); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; } /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = _thread_run; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = _thread_run; /* Track number of priority mutexes owned: */ _thread_run->priority_mutex_count++; /* * The mutex takes on the attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = _thread_run->active_priority; (*mutex)->m_saved_prio = _thread_run->inherited_priority; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (_thread_run->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = _thread_run; /* Track number of priority mutexes owned: */ _thread_run->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority. */ _thread_run->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = _thread_run->inherited_priority; _thread_run->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (ret); } int pthread_mutex_lock(pthread_mutex_t * mutex) { int ret = 0; if (_thread_initial == NULL) _thread_init(); if (mutex == NULL) - ret = EINVAL; + return (EINVAL); /* * If the mutex is statically initialized, perform the dynamic * initialization: */ - else if (*mutex != NULL || (ret = init_static(mutex)) == 0) { + if ((*mutex == NULL) && + ((ret = init_static(mutex)) != 0)) + return (ret); + + /* Reset the interrupted flag: */ + _thread_run->interrupted = 0; + + /* + * Enter a loop waiting to become the mutex owner. We need a + * loop in case the waiting thread is interrupted by a signal + * to execute a signal handler. It is not (currently) possible + * to remain in the waiting queue while running a handler. + * Instead, the thread is interrupted and backed out of the + * waiting queue prior to executing the signal handler. + */ + while (((*mutex)->m_owner != _thread_run) && (ret == 0) && + (_thread_run->interrupted == 0)) { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; _MUTEX_INIT_LINK(*mutex); } - /* Reset the interrupted flag: */ - _thread_run->interrupted = 0; - /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = _thread_run; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, _thread_run); /* * Keep a pointer to the mutex this thread * is waiting on: */ _thread_run->data.mutex = *mutex; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = _thread_run; /* Track number of priority mutexes owned: */ _thread_run->priority_mutex_count++; /* * The mutex takes on attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = _thread_run->active_priority; (*mutex)->m_saved_prio = _thread_run->inherited_priority; _thread_run->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, _thread_run); /* * Keep a pointer to the mutex this thread * is waiting on: */ _thread_run->data.mutex = *mutex; if (_thread_run->active_priority > (*mutex)->m_prio) /* Adjust priorities: */ mutex_priority_adjust(*mutex); /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (_thread_run->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* * Lock the mutex for the running * thread: */ (*mutex)->m_owner = _thread_run; /* Track number of priority mutexes owned: */ _thread_run->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority: */ _thread_run->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = _thread_run->inherited_priority; _thread_run->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, _thread_run); /* * Keep a pointer to the mutex this thread * is waiting on: */ _thread_run->data.mutex = *mutex; /* Clear any previous error: */ _thread_run->error = 0; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); /* * The threads priority may have changed while * waiting for the mutex causing a ceiling * violation. */ ret = _thread_run->error; _thread_run->error = 0; } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* * Check to see if this thread was interrupted and * is still in the mutex queue of waiting threads: */ if (_thread_run->interrupted != 0) mutex_queue_remove(*mutex, _thread_run); /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); - - if (_thread_run->interrupted != 0 && - _thread_run->continuation != NULL) - _thread_run->continuation((void *) _thread_run); } + if (_thread_run->interrupted != 0 && + _thread_run->continuation != NULL) + _thread_run->continuation((void *) _thread_run); + /* Return the completion status: */ return (ret); } int pthread_mutex_unlock(pthread_mutex_t * mutex) { return (mutex_unlock_common(mutex, /* add reference */ 0)); } int _mutex_cv_unlock(pthread_mutex_t * mutex) { return (mutex_unlock_common(mutex, /* add reference */ 1)); } int _mutex_cv_lock(pthread_mutex_t * mutex) { int ret; if ((ret = pthread_mutex_lock(mutex)) == 0) (*mutex)->m_refcount--; return (ret); } static inline int mutex_self_trylock(pthread_mutex_t mutex) { int ret = 0; switch (mutex->m_type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: /* * POSIX specifies that mutexes should return EDEADLK if a * recursive lock is detected. */ ret = EBUSY; break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ mutex->m_data.m_count++; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return(ret); } static inline int mutex_self_lock(pthread_mutex_t mutex) { int ret = 0; switch (mutex->m_type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: /* * POSIX specifies that mutexes should return EDEADLK if a * recursive lock is detected. */ ret = EDEADLK; break; case PTHREAD_MUTEX_NORMAL: /* * What SS2 define as a 'normal' mutex. Intentionally * deadlock on attempts to get a lock you already own. */ _thread_kern_sched_state_unlock(PS_DEADLOCK, &mutex->lock, __FILE__, __LINE__); break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ mutex->m_data.m_count++; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return(ret); } static inline int mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) { int ret = 0; if (mutex == NULL || *mutex == NULL) { ret = EINVAL; } else { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != _thread_run) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Get the next thread from the queue of * threads waiting on the mutex: */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) { /* * Unless the new owner of the mutex is * currently suspended, allow the owner * to run. If the thread is suspended, * make a note that the thread isn't in * a wait queue any more. */ if (((*mutex)->m_owner->state != PS_SUSPENDED)) { PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); } else { (*mutex)->m_owner->suspended = SUSP_NOWAIT; } /* * Add the mutex to the threads list of * owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; } } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != _thread_run) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* * Restore the threads inherited priority and * recompute the active priority (being careful * not to override changes in the threads base * priority subsequent to locking the mutex). */ _thread_run->inherited_priority = (*mutex)->m_saved_prio; _thread_run->active_priority = MAX(_thread_run->inherited_priority, _thread_run->base_priority); /* * This thread now owns one less priority mutex. */ _thread_run->priority_mutex_count--; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Get the next thread from the queue of threads * waiting on the mutex: */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) == NULL) /* This mutex has no priority. */ (*mutex)->m_prio = 0; else { /* * Track number of priority mutexes owned: */ (*mutex)->m_owner->priority_mutex_count++; /* * Add the mutex to the threads list * of owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; /* * Set the priority of the mutex. Since * our waiting threads are in descending * priority order, the priority of the * mutex becomes the active priority of * the thread we just dequeued. */ (*mutex)->m_prio = (*mutex)->m_owner->active_priority; /* * Save the owning threads inherited * priority: */ (*mutex)->m_saved_prio = (*mutex)->m_owner->inherited_priority; /* * The owning threads inherited priority * now becomes his active priority (the * priority of the mutex). */ (*mutex)->m_owner->inherited_priority = (*mutex)->m_prio; /* * Unless the new owner of the mutex is * currently suspended, allow the owner * to run. If the thread is suspended, * make a note that the thread isn't in * a wait queue any more. */ if (((*mutex)->m_owner->state != PS_SUSPENDED)) { PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); } else { (*mutex)->m_owner->suspended = SUSP_NOWAIT; } } } break; /* POSIX priority ceiling mutex: */ case PTHREAD_PRIO_PROTECT: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != _thread_run) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* * Restore the threads inherited priority and * recompute the active priority (being careful * not to override changes in the threads base * priority subsequent to locking the mutex). */ _thread_run->inherited_priority = (*mutex)->m_saved_prio; _thread_run->active_priority = MAX(_thread_run->inherited_priority, _thread_run->base_priority); /* * This thread now owns one less priority mutex. */ _thread_run->priority_mutex_count--; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Enter a loop to find a waiting thread whose * active priority will not cause a ceiling * violation: */ while ((((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) && ((*mutex)->m_owner->active_priority > (*mutex)->m_prio)) { /* * Either the mutex ceiling priority * been lowered and/or this threads * priority has been raised subsequent * to this thread being queued on the * waiting list. */ (*mutex)->m_owner->error = EINVAL; PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); /* * The thread is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; } /* Check for a new owner: */ if ((*mutex)->m_owner != NULL) { /* * Track number of priority mutexes owned: */ (*mutex)->m_owner->priority_mutex_count++; /* * Add the mutex to the threads list * of owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; /* * Save the owning threads inherited * priority: */ (*mutex)->m_saved_prio = (*mutex)->m_owner->inherited_priority; /* * The owning thread inherits the * ceiling priority of the mutex and * executes at that priority: */ (*mutex)->m_owner->inherited_priority = (*mutex)->m_prio; (*mutex)->m_owner->active_priority = (*mutex)->m_prio; /* * Unless the new owner of the mutex is * currently suspended, allow the owner * to run. If the thread is suspended, * make a note that the thread isn't in * a wait queue any more. */ if (((*mutex)->m_owner->state != PS_SUSPENDED)) { PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); } else { (*mutex)->m_owner->suspended = SUSP_NOWAIT; } } } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } if ((ret == 0) && (add_reference != 0)) { /* Increment the reference count: */ (*mutex)->m_refcount++; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (ret); } /* * This function is called when a change in base priority occurs for * a thread that is holding or waiting for a priority protection or * inheritence mutex. A change in a threads base priority can effect * changes to active priorities of other threads and to the ordering * of mutex locking by waiting threads. * * This must be called while thread scheduling is deferred. */ void _mutex_notify_priochange(pthread_t pthread) { /* Adjust the priorites of any owned priority mutexes: */ if (pthread->priority_mutex_count > 0) { /* * Rescan the mutexes owned by this thread and correct * their priorities to account for this threads change * in priority. This has the side effect of changing * the threads active priority. */ mutex_rescan_owned(pthread, /* rescan all owned */ NULL); } /* * If this thread is waiting on a priority inheritence mutex, * check for priority adjustments. A change in priority can * also effect a ceiling violation(*) for a thread waiting on * a priority protection mutex; we don't perform the check here * as it is done in pthread_mutex_unlock. * * (*) It should be noted that a priority change to a thread * _after_ taking and owning a priority ceiling mutex * does not affect ownership of that mutex; the ceiling * priority is only checked before mutex ownership occurs. */ if (pthread->state == PS_MUTEX_WAIT) { /* Lock the mutex structure: */ _SPINLOCK(&pthread->data.mutex->lock); /* * Check to make sure this thread is still in the same state * (the spinlock above can yield the CPU to another thread): */ if (pthread->state == PS_MUTEX_WAIT) { /* * Remove and reinsert this thread into the list of * waiting threads to preserve decreasing priority * order. */ mutex_queue_remove(pthread->data.mutex, pthread); mutex_queue_enq(pthread->data.mutex, pthread); if (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT) { /* Adjust priorities: */ mutex_priority_adjust(pthread->data.mutex); } } /* Unlock the mutex structure: */ _SPINUNLOCK(&pthread->data.mutex->lock); } } /* * Called when a new thread is added to the mutex waiting queue or * when a threads priority changes that is already in the mutex * waiting queue. */ static void mutex_priority_adjust(pthread_mutex_t mutex) { pthread_t pthread_next, pthread = mutex->m_owner; int temp_prio; pthread_mutex_t m = mutex; /* * Calculate the mutex priority as the maximum of the highest * active priority of any waiting threads and the owning threads * active priority(*). * * (*) Because the owning threads current active priority may * reflect priority inherited from this mutex (and the mutex * priority may have changed) we must recalculate the active * priority based on the threads saved inherited priority * and its base priority. */ pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ temp_prio = MAX(pthread_next->active_priority, MAX(m->m_saved_prio, pthread->base_priority)); /* See if this mutex really needs adjusting: */ if (temp_prio == m->m_prio) /* No need to propagate the priority: */ return; /* Set new priority of the mutex: */ m->m_prio = temp_prio; while (m != NULL) { /* * Save the threads priority before rescanning the * owned mutexes: */ temp_prio = pthread->active_priority; /* * Fix the priorities for all the mutexes this thread has * locked since taking this mutex. This also has a * potential side-effect of changing the threads priority. */ mutex_rescan_owned(pthread, m); /* * If the thread is currently waiting on a mutex, check * to see if the threads new priority has affected the * priority of the mutex. */ if ((temp_prio != pthread->active_priority) && (pthread->state == PS_MUTEX_WAIT) && (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) { /* Grab the mutex this thread is waiting on: */ m = pthread->data.mutex; /* * The priority for this thread has changed. Remove * and reinsert this thread into the list of waiting * threads to preserve decreasing priority order. */ mutex_queue_remove(m, pthread); mutex_queue_enq(m, pthread); /* Grab the waiting thread with highest priority: */ pthread_next = TAILQ_FIRST(&m->m_queue); /* * Calculate the mutex priority as the maximum of the * highest active priority of any waiting threads and * the owning threads active priority. */ temp_prio = MAX(pthread_next->active_priority, MAX(m->m_saved_prio, m->m_owner->base_priority)); if (temp_prio != m->m_prio) { /* * The priority needs to be propagated to the * mutex this thread is waiting on and up to * the owner of that mutex. */ m->m_prio = temp_prio; pthread = m->m_owner; } else /* We're done: */ m = NULL; } else /* We're done: */ m = NULL; } } static void mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex) { int active_prio, inherited_prio; pthread_mutex_t m; pthread_t pthread_next; /* * Start walking the mutexes the thread has taken since * taking this mutex. */ if (mutex == NULL) { /* * A null mutex means start at the beginning of the owned * mutex list. */ m = TAILQ_FIRST(&pthread->mutexq); /* There is no inherited priority yet. */ inherited_prio = 0; } else { /* * The caller wants to start after a specific mutex. It * is assumed that this mutex is a priority inheritence * mutex and that its priority has been correctly * calculated. */ m = TAILQ_NEXT(mutex, m_qe); /* Start inheriting priority from the specified mutex. */ inherited_prio = mutex->m_prio; } active_prio = MAX(inherited_prio, pthread->base_priority); while (m != NULL) { /* * We only want to deal with priority inheritence * mutexes. This might be optimized by only placing * priority inheritence mutexes into the owned mutex * list, but it may prove to be useful having all * owned mutexes in this list. Consider a thread * exiting while holding mutexes... */ if (m->m_protocol == PTHREAD_PRIO_INHERIT) { /* * Fix the owners saved (inherited) priority to * reflect the priority of the previous mutex. */ m->m_saved_prio = inherited_prio; if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) /* Recalculate the priority of the mutex: */ m->m_prio = MAX(active_prio, pthread_next->active_priority); else m->m_prio = active_prio; /* Recalculate new inherited and active priorities: */ inherited_prio = m->m_prio; active_prio = MAX(m->m_prio, pthread->base_priority); } /* Advance to the next mutex owned by this thread: */ m = TAILQ_NEXT(m, m_qe); } /* * Fix the threads inherited priority and recalculate its * active priority. */ pthread->inherited_priority = inherited_prio; active_prio = MAX(inherited_prio, pthread->base_priority); if (active_prio != pthread->active_priority) { /* * If this thread is in the priority queue, it must be * removed and reinserted for its new priority. */ if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) { /* * Remove the thread from the priority queue * before changing its priority: */ PTHREAD_PRIOQ_REMOVE(pthread); /* * POSIX states that if the priority is being * lowered, the thread must be inserted at the * head of the queue for its priority if it owns * any priority protection or inheritence mutexes. */ if ((active_prio < pthread->active_priority) && (pthread->priority_mutex_count > 0)) { /* Set the new active priority. */ pthread->active_priority = active_prio; PTHREAD_PRIOQ_INSERT_HEAD(pthread); } else { /* Set the new active priority. */ pthread->active_priority = active_prio; PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } else { /* Set the new active priority. */ pthread->active_priority = active_prio; } } } void _mutex_unlock_private(pthread_t pthread) { struct pthread_mutex *m, *m_next; for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { m_next = TAILQ_NEXT(m, m_qe); if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) pthread_mutex_unlock(&m); } } void _mutex_lock_backout(pthread_t pthread) { struct pthread_mutex *mutex; /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); - if (pthread->state == PS_MUTEX_WAIT) { + if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { mutex = pthread->data.mutex; /* Lock the mutex structure: */ _SPINLOCK(&mutex->lock); mutex_queue_remove(mutex, pthread); /* This thread is no longer waiting for the mutex: */ - mutex->m_owner->data.mutex = NULL; + pthread->data.mutex = NULL; /* Unlock the mutex structure: */ _SPINUNLOCK(&mutex->lock); } /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* * Dequeue a waiting thread from the head of a mutex queue in descending * priority order. */ static inline pthread_t mutex_queue_deq(pthread_mutex_t mutex) { pthread_t pthread; while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; /* * Only exit the loop if the thread hasn't been * cancelled. */ if (pthread->interrupted == 0) break; } return(pthread); } /* * Remove a waiting thread from a mutex queue in descending priority order. */ static inline void mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) { if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; } } /* * Enqueue a waiting thread to a queue in descending priority order. */ static inline void mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) { pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); /* * For the common case of all threads having equal priority, * we perform a quick check against the priority of the thread * at the tail of the queue. */ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); else { tid = TAILQ_FIRST(&mutex->m_queue); while (pthread->active_priority <= tid->active_priority) tid = TAILQ_NEXT(tid, sqe); TAILQ_INSERT_BEFORE(tid, pthread, sqe); } pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; } #endif Index: head/lib/libkse/thread/thr_private.h =================================================================== --- head/lib/libkse/thread/thr_private.h (revision 68515) +++ head/lib/libkse/thread/thr_private.h (revision 68516) @@ -1,1450 +1,1457 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Private thread definitions for the uthread kernel. * * $FreeBSD$ */ #ifndef _PTHREAD_PRIVATE_H #define _PTHREAD_PRIVATE_H /* * Evaluate the storage class specifier. */ #ifdef GLOBAL_PTHREAD_PRIVATE #define SCLASS #else #define SCLASS extern #endif /* * Include files. */ #include #include #include #include #include #include #include #include #include #include /* * Define machine dependent macros to get and set the stack pointer * from the supported contexts. Also define a macro to set the return * address in a jmp_buf context. * * XXX - These need to be moved into architecture dependent support files. */ #if defined(__i386__) #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2])) #define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp)) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk) #define FP_SAVE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("fnsave %0": :"m"(*fdata)); \ } while (0) #define FP_RESTORE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("frstor %0": :"m"(*fdata)); \ } while (0) #define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra) #elif defined(__alpha__) #include #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[R_SP + 4])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[R_SP + 4])) #define GET_STACK_UC(ucp) ((ucp)->uc_mcontext.mc_regs[R_SP]) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk) #define FP_SAVE_UC(ucp) #define FP_RESTORE_UC(ucp) #define SET_RETURN_ADDR_JB(jb, ra) do { \ (jb)[0]._jb[2] = (long)(ra); \ (jb)[0]._jb[R_RA + 4] = 0; \ (jb)[0]._jb[R_T12 + 4] = (long)(ra); \ } while (0) #else #error "Don't recognize this architecture!" #endif /* * Kernel fatal error handler macro. */ #define PANIC(string) _thread_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ _thread_sys_write(1, buf, strlen(buf)); \ } while (0) #define stderr_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ _thread_sys_write(2, buf, strlen(buf)); \ } while (0) /* * Priority queue manipulation macros (using pqe link): */ #define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd) #define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd) #define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd) #define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq) /* * Waiting queue manipulation macros (using pqe link): */ #define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd) #define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd) #if defined(_PTHREADS_INVARIANTS) #define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive() #define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive() #else #define PTHREAD_WAITQ_CLEARACTIVE() #define PTHREAD_WAITQ_SETACTIVE() #endif /* * Work queue manipulation macros (using qe link): */ #define PTHREAD_WORKQ_INSERT(thrd) do { \ TAILQ_INSERT_TAIL(&_workq,thrd,qe); \ (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \ } while (0) #define PTHREAD_WORKQ_REMOVE(thrd) do { \ TAILQ_REMOVE(&_workq,thrd,qe); \ (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \ } while (0) /* * State change macro without scheduling queue change: */ #define PTHREAD_SET_STATE(thrd, newstate) do { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } while (0) /* * State change macro with scheduling queue change - This must be * called with preemption deferred (see thread_kern_sched_[un]defer). */ #if defined(_PTHREADS_INVARIANTS) #include #define PTHREAD_ASSERT(cond, msg) do { \ if (!(cond)) \ PANIC(msg); \ } while (0) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \ PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \ "Illegal call from signal handler"); #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if (_thread_kern_new_state != 0) \ PANIC("Recursive PTHREAD_NEW_STATE"); \ _thread_kern_new_state = 1; \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ _thread_kern_new_state = 0; \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #else #define PTHREAD_ASSERT(cond, msg) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #endif /* * Define the signals to be used for scheduling. */ #if defined(_PTHREADS_COMPAT_SCHED) #define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL #define _SCHED_SIGNAL SIGVTALRM #else #define _ITIMER_SCHED_TIMER ITIMER_PROF #define _SCHED_SIGNAL SIGPROF #endif /* * Priority queues. * * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. */ typedef struct pq_list { TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ int pl_prio; /* the priority of this list */ int pl_queued; /* is this in the priority queue */ } pq_list_t; typedef struct pq_queue { TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ pq_list_t *pq_lists; /* array of all priority lists */ int pq_size; /* number of priority lists */ } pq_queue_t; /* * TailQ initialization values. */ #define TAILQ_INITIALIZER { NULL, NULL } /* * Mutex definitions. */ union pthread_mutex_data { void *m_ptr; int m_count; }; struct pthread_mutex { enum pthread_mutextype m_type; int m_protocol; TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; union pthread_mutex_data m_data; long m_flags; int m_refcount; /* * Used for priority inheritence and protection. * * m_prio - For priority inheritence, the highest active * priority (threads locking the mutex inherit * this priority). For priority protection, the * ceiling priority of this mutex. * m_saved_prio - mutex owners inherited priority before * taking the mutex, restored when the owner * unlocks the mutex. */ int m_prio; int m_saved_prio; /* * Link for list of all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; /* * Lock for accesses to this structure. */ spinlock_t lock; }; /* * Flags for mutexes. */ #define MUTEX_FLAGS_PRIVATE 0x01 #define MUTEX_FLAGS_INITED 0x02 #define MUTEX_FLAGS_BUSY 0x04 /* * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \ NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \ _SPINLOCK_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; long m_flags; }; /* * Condition variable definitions. */ enum pthread_cond_type { COND_TYPE_FAST, COND_TYPE_MAX }; struct pthread_cond { enum pthread_cond_type c_type; TAILQ_HEAD(cond_head, pthread) c_queue; pthread_mutex_t c_mutex; void *c_data; long c_flags; + int c_seqno; /* * Lock for accesses to this structure. */ spinlock_t lock; }; struct pthread_cond_attr { enum pthread_cond_type c_type; long c_flags; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \ - 0, _SPINLOCK_INITIALIZER } + 0, 0, _SPINLOCK_INITIALIZER } /* * Semaphore definitions. */ struct sem { #define SEM_MAGIC ((u_int32_t) 0x09fa4012) u_int32_t magic; pthread_mutex_t lock; pthread_cond_t gtzero; u_int32_t count; u_int32_t nwaiters; }; /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *next; void (*routine) (); void *routine_arg; }; struct pthread_attr { int sched_policy; int sched_inherit; int sched_interval; int prio; int suspend; int flags; void *arg_attr; void (*cleanup_attr) (); void *stackaddr_attr; size_t stacksize_attr; }; /* * Thread creation state attributes. */ #define PTHREAD_CREATE_RUNNING 0 #define PTHREAD_CREATE_SUSPENDED 1 /* * Additional state for a thread suspended with pthread_suspend_np(). */ enum pthread_susp { SUSP_NO, /* Not suspended. */ SUSP_YES, /* Suspended. */ SUSP_NOWAIT, /* Suspended, was in a mutex or condition queue. */ SUSP_MUTEX_WAIT,/* Suspended, still in a mutex queue. */ SUSP_COND_WAIT /* Suspended, still in a condition queue. */ }; /* * Miscellaneous definitions. */ #define PTHREAD_STACK_DEFAULT 65536 /* * Size of red zone at the end of each stack. In actuality, this "red zone" is * merely an unmapped region, except in the case of the initial stack. Since * mmap() makes it possible to specify the maximum growth of a MAP_STACK region, * an unmapped gap between thread stacks achieves the same effect as explicitly * mapped red zones. */ #define PTHREAD_STACK_GUARD PAGE_SIZE /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define PTHREAD_STACK_INITIAL 0x100000 +/* Size of the scheduler stack: */ +#define SCHED_STACK_SIZE PAGE_SIZE + /* * Define the different priority ranges. All applications have thread * priorities constrained within 0-31. The threads library raises the * priority when delivering signals in order to ensure that signal * delivery happens (from the POSIX spec) "as soon as possible". * In the future, the threads library will also be able to map specific * threads into real-time (cooperating) processes or kernel threads. * The RT and SIGNAL priorities will be used internally and added to * thread base priorities so that the scheduling queue can handle both * normal and RT priority threads with and without signal handling. * * The approach taken is that, within each class, signal delivery * always has priority over thread execution. */ #define PTHREAD_DEFAULT_PRIORITY 15 #define PTHREAD_MIN_PRIORITY 0 #define PTHREAD_MAX_PRIORITY 31 /* 0x1F */ #define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */ #define PTHREAD_RT_PRIORITY 64 /* 0x40 */ #define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY #define PTHREAD_LAST_PRIORITY \ (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY) #define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY) /* * Clock resolution in microseconds. */ #define CLOCK_RES_USEC 10000 /* * Time slice period in microseconds. */ #define TIMESLICE_USEC 20000 /* * Define a thread-safe macro to get the current time of day * which is updated at regular intervals by the scheduling signal * handler. */ #define GET_CURRENT_TOD(tv) \ do { \ tv.tv_sec = _sched_tod.tv_sec; \ tv.tv_usec = _sched_tod.tv_usec; \ } while (tv.tv_sec != _sched_tod.tv_sec) struct pthread_key { spinlock_t lock; volatile int allocated; volatile int count; void (*destructor) (); }; struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ int state; /* 0 = idle >0 = # of readers -1 = writer */ pthread_cond_t read_signal; pthread_cond_t write_signal; int blocked_writers; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_SIGTHREAD, PS_MUTEX_WAIT, PS_COND_WAIT, PS_FDLR_WAIT, PS_FDLW_WAIT, PS_FDR_WAIT, PS_FDW_WAIT, PS_FILE_WAIT, PS_POLL_WAIT, PS_SELECT_WAIT, PS_SLEEP_WAIT, PS_WAIT_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, PS_SPINBLOCK, PS_JOIN, PS_SUSPENDED, PS_DEAD, PS_DEADLOCK, PS_STATE_MAX }; /* * File descriptor locking definitions. */ #define FD_READ 0x1 #define FD_WRITE 0x2 #define FD_RDWR (FD_READ | FD_WRITE) /* * File descriptor table structure. */ struct fd_table_entry { /* * Lock for accesses to this file descriptor table * entry. This is passed to _spinlock() to provide atomic * access to this structure. It does *not* represent the * state of the lock on the file descriptor. */ spinlock_t lock; TAILQ_HEAD(, pthread) r_queue; /* Read queue. */ TAILQ_HEAD(, pthread) w_queue; /* Write queue. */ struct pthread *r_owner; /* Ptr to thread owning read lock. */ struct pthread *w_owner; /* Ptr to thread owning write lock. */ char *r_fname; /* Ptr to read lock source file name */ int r_lineno; /* Read lock source line number. */ char *w_fname; /* Ptr to write lock source file name */ int w_lineno; /* Write lock source line number. */ int r_lockcount; /* Count for FILE read locks. */ int w_lockcount; /* Count for FILE write locks. */ int flags; /* Flags used in open. */ }; struct pthread_poll_data { int nfds; struct pollfd *fds; }; union pthread_wait_data { pthread_mutex_t mutex; pthread_cond_t cond; const sigset_t *sigwait; /* Waiting on a signal in sigwait */ struct { short fd; /* Used when thread waiting on fd */ short branch; /* Line number, for debugging. */ char *fname; /* Source file name for debugging.*/ } fd; FILE *fp; struct pthread_poll_data *poll_data; spinlock_t *spinlock; struct pthread *thread; }; /* * Define a continuation routine that can be used to perform a * transfer of control: */ typedef void (*thread_continuation_t) (void *); +struct pthread_signal_frame; + struct pthread_state_data { - int psd_interrupted; + struct pthread_signal_frame *psd_curframe; sigset_t psd_sigmask; - enum pthread_state psd_state; - int psd_flags; struct timespec psd_wakeup_time; union pthread_wait_data psd_wait_data; + enum pthread_state psd_state; + int psd_flags; + int psd_interrupted; + int psd_longjmp_val; + int psd_sigmask_seqno; + int psd_signo; + int psd_sig_defer_count; /* XXX - What about thread->timeout and/or thread->error? */ }; /* * Normally thread contexts are stored as jmp_bufs via _setjmp()/_longjmp(), * but they may also be sigjmp_buf and ucontext_t. When a thread is * interrupted by a signal, it's context is saved as a ucontext_t. An * application is also free to use [_]longjmp()/[_]siglongjmp() to jump * between contexts within the same thread. Future support will also * include setcontext()/getcontext(). * * Define an enumerated type that can identify the 4 different context * types. */ typedef enum { CTX_JB_NOSIG, /* context is jmp_buf without saved sigset */ CTX_JB, /* context is jmp_buf (with saved sigset) */ CTX_SJB, /* context is sigjmp_buf (with saved sigset) */ CTX_UC /* context is ucontext_t (with saved sigset) */ } thread_context_t; /* * There are 2 basic contexts that a frame may contain at any * one time: * * o ctx - The context that the thread should return to after normal * completion of the signal handler. * o sig_jb - The context just before the signal handler is invoked. * Attempts at abnormal returns from user supplied signal handlers * will return back to the signal context to perform any necessary * cleanup. */ struct pthread_signal_frame { /* * This stores the threads state before the signal. */ struct pthread_state_data saved_state; - /* Beginning (bottom) of threads stack frame for this signal. */ - unsigned long stackp; - /* * Threads return context; ctxtype identifies the type of context. * For signal frame 0, these point to the context storage area * within the pthread structure. When handling signals (frame > 0), * these point to a context storage area that is allocated off the * threads stack. */ union { jmp_buf jb; sigjmp_buf sigjb; ucontext_t uc; } ctx; thread_context_t ctxtype; int longjmp_val; - - /* Threads "jump out of signal handler" destination frame. */ - int dst_frame; - - /* - * Used to return back to the signal handling frame in case - * the application tries to change contexts from the handler. - */ - jmp_buf *sig_jb; - int signo; /* signal, arg 1 to sighandler */ int sig_has_args; /* use signal args if true */ + ucontext_t uc; + siginfo_t siginfo; }; /* * Thread structure. */ struct pthread { /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; char *name; u_int64_t uniqueid; /* for gdb */ /* * Lock for accesses to this thread structure. */ spinlock_t lock; /* Queue entry for list of all threads: */ TAILQ_ENTRY(pthread) tle; /* Queue entry for list of dead threads: */ TAILQ_ENTRY(pthread) dle; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; void *stack; struct pthread_attr attr; /* - * Used for tracking delivery of nested signal handlers. - * Signal frame 0 is used for normal context (when no - * signal handlers are active for the thread). Frame - * 1 is used as the context for the first signal, and - * frames 2 .. NSIG-1 are used when additional signals - * arrive interrupting already active signal handlers. + * Threads return context; ctxtype identifies the type of context. */ - struct pthread_signal_frame *sigframes[NSIG]; - struct pthread_signal_frame sigframe0; + union { + jmp_buf jb; + sigjmp_buf sigjb; + ucontext_t uc; + } ctx; + thread_context_t ctxtype; + int longjmp_val; + + /* + * Used for tracking delivery of signal handlers. + */ struct pthread_signal_frame *curframe; - int sigframe_count; - int sigframe_done; /* * Cancelability flags - the lower 2 bits are used by cancel * definitions in pthread.h */ #define PTHREAD_AT_CANCEL_POINT 0x0004 #define PTHREAD_CANCELLING 0x0008 #define PTHREAD_CANCEL_NEEDED 0x0010 int cancelflags; enum pthread_susp suspended; thread_continuation_t continuation; /* * Current signal mask and pending signals. */ sigset_t sigmask; sigset_t sigpend; + int sigmask_seqno; int check_pending; /* Thread state: */ enum pthread_state state; /* Scheduling clock when this thread was last made active. */ long last_active; /* Scheduling clock when this thread was last made inactive. */ long last_inactive; /* * Number of microseconds accumulated by this thread when * time slicing is active. */ long slice_usec; /* * Time to wake up thread. This is used for sleeping threads and * for any operation which may time out (such as select). */ struct timespec wakeup_time; /* TRUE if operation has timed out. */ int timeout; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* Join queue head and link for waiting threads: */ TAILQ_HEAD(join_head, pthread) join_queue; /* * The current thread can belong to only one scheduling queue at * a time (ready or waiting queue). It can also belong to: * * o A queue of threads waiting for a mutex * o A queue of threads waiting for a condition variable * o A queue of threads waiting for another thread to terminate * (the join queue above) * o A queue of threads waiting for a file descriptor lock * o A queue of threads needing work done by the kernel thread * (waiting for a spinlock or file I/O) * * It is possible for a thread to belong to more than one of the * above queues if it is handling a signal. A thread may only * enter a mutex, condition variable, or join queue when it is * not being called from a signal handler. If a thread is a * member of one of these queues when a signal handler is invoked, * it must remain in the queue. For this reason, the links for * these queues must not be (re)used for other queues. * * Use pqe for the scheduling queue link (both ready and waiting), * sqe for synchronization (mutex, condition variable, and join) * queue links, and qe for all other links. */ TAILQ_ENTRY(pthread) pqe; /* priority queue link */ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ TAILQ_ENTRY(pthread) qe; /* all other queues link */ /* Wait data. */ union pthread_wait_data data; /* * Allocated for converting select into poll. */ struct pthread_poll_data poll_data; /* * Set to TRUE if a blocking operation was * interrupted by a signal: */ int interrupted; /* Signal number when in state PS_SIGWAIT: */ int signo; /* * Set to non-zero when this thread has deferred signals. * We allow for recursive deferral. */ int sig_defer_count; /* * Set to TRUE if this thread should yield after undeferring * signals. */ int yield_on_sig_undefer; /* Miscellaneous flags; only set with signals deferred. */ int flags; #define PTHREAD_FLAGS_PRIVATE 0x0001 #define PTHREAD_EXITING 0x0002 #define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */ #define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */ #define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */ #define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */ #define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */ #define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/ #define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */ #define PTHREAD_FLAGS_IN_JOINQ 0x0200 /* in join queue using sqe link */ #define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */ #define PTHREAD_FLAGS_IN_SYNCQ \ (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ | PTHREAD_FLAGS_IN_JOINQ) /* * Base priority is the user setable and retrievable priority * of the thread. It is only affected by explicit calls to * set thread priority and upon thread creation via a thread * attribute or default priority. */ char base_priority; /* * Inherited priority is the priority a thread inherits by * taking a priority inheritence or protection mutex. It * is not affected by base priority changes. Inherited * priority defaults to and remains 0 until a mutex is taken * that is being waited on by any other thread whose priority * is non-zero. */ char inherited_priority; /* * Active priority is always the maximum of the threads base * priority and inherited priority. When there is a change * in either the base or inherited priority, the active * priority must be recalculated. */ char active_priority; /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; /* * Queue of currently owned mutexes. */ TAILQ_HEAD(, pthread_mutex) mutexq; void *ret; const void **specific_data; int specific_data_count; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; char *fname; /* Ptr to source file name */ int lineno; /* Source line number. */ }; /* Spare thread stack. */ struct stack { SLIST_ENTRY(stack) qe; /* Queue entry for this stack. */ }; /* * Global variables for the uthread kernel. */ /* Kernel thread structure used when there are no running threads: */ SCLASS struct pthread _thread_kern_thread; /* Ptr to the thread structure for the running thread: */ SCLASS struct pthread * volatile _thread_run #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* Ptr to the thread structure for the last user thread to run: */ SCLASS struct pthread * volatile _last_user_thread #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* * Ptr to the thread running in single-threaded mode or NULL if * running multi-threaded (default POSIX behaviour). */ SCLASS struct pthread * volatile _thread_single #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* List of all threads: */ SCLASS TAILQ_HEAD(, pthread) _thread_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_thread_list); #else ; #endif /* * Array of kernel pipe file descriptors that are used to ensure that * no signals are missed in calls to _select. */ SCLASS int _thread_kern_pipe[2] #ifdef GLOBAL_PTHREAD_PRIVATE = { -1, -1 }; #else ; #endif SCLASS int volatile _queue_signals #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _thread_kern_in_sched #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _sig_in_handler #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Time of day at last scheduling timer signal: */ SCLASS struct timeval volatile _sched_tod #ifdef GLOBAL_PTHREAD_PRIVATE = { 0, 0 }; #else ; #endif /* * Current scheduling timer ticks; used as resource usage. */ SCLASS unsigned int volatile _sched_ticks #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Dead threads: */ SCLASS TAILQ_HEAD(, pthread) _dead_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_dead_list); #else ; #endif /* Initial thread: */ SCLASS struct pthread *_thread_initial #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Default thread attributes: */ SCLASS struct pthread_attr pthread_attr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, PTHREAD_STACK_DEFAULT }; #else ; #endif /* Default mutex attributes: */ SCLASS struct pthread_mutex_attr pthread_mutexattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }; #else ; #endif /* Default condition variable attributes: */ SCLASS struct pthread_cond_attr pthread_condattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { COND_TYPE_FAST, 0 }; #else ; #endif /* * Standard I/O file descriptors need special flag treatment since * setting one to non-blocking does all on *BSD. Sigh. This array * is used to store the initial flag settings. */ SCLASS int _pthread_stdio_flags[3]; /* File table information: */ SCLASS struct fd_table_entry **_thread_fd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Table for polling file descriptors: */ SCLASS struct pollfd *_thread_pfd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif SCLASS const int dtablecount #ifdef GLOBAL_PTHREAD_PRIVATE = 4096/sizeof(struct fd_table_entry); #else ; #endif SCLASS int _thread_dtablesize /* Descriptor table size. */ #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _clock_res_usec /* Clock resolution in usec. */ #ifdef GLOBAL_PTHREAD_PRIVATE = CLOCK_RES_USEC; #else ; #endif /* Garbage collector mutex and condition variable. */ SCLASS pthread_mutex_t _gc_mutex #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; SCLASS pthread_cond_t _gc_cond #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Array of signal actions for this process. */ SCLASS struct sigaction _thread_sigact[NSIG]; /* * Array of counts of dummy handlers for SIG_DFL signals. This is used to * assure that there is always a dummy signal handler installed while there is a * thread sigwait()ing on the corresponding signal. */ SCLASS int _thread_dfl_count[NSIG]; /* * Pending signals and mask for this process: */ SCLASS sigset_t _process_sigpending; -SCLASS sigset_t _process_sigmask; +SCLASS sigset_t _process_sigmask +#ifdef GLOBAL_PTHREAD_PRIVATE += { {0, 0, 0, 0} } +#endif +; /* * Scheduling queues: */ SCLASS pq_queue_t _readyq; SCLASS TAILQ_HEAD(, pthread) _waitingq; /* * Work queue: */ SCLASS TAILQ_HEAD(, pthread) _workq; /* Tracks the number of threads blocked while waiting for a spinlock. */ SCLASS volatile int _spinblock_count #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Used to maintain pending and active signals: */ struct sigstatus { int pending; /* Is this a pending signal? */ int blocked; /* * A handler is currently active for * this signal; ignore subsequent * signals until the handler is done. */ int signo; /* arg 1 to signal handler */ siginfo_t siginfo; /* arg 2 to signal handler */ ucontext_t uc; /* arg 3 to signal handler */ }; SCLASS struct sigstatus _thread_sigq[NSIG]; /* Indicates that the signal queue needs to be checked. */ SCLASS volatile int _sigq_check_reqd #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Thread switch hook. */ SCLASS pthread_switch_routine_t _sched_switch_hook #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Spare stack queue. Stacks of default size are cached in order to reduce * thread creation time. Spare stacks are used in LIFO order to increase cache * locality. */ SCLASS SLIST_HEAD(, stack) _stackq; /* * Base address of next unallocated default-size {stack, red zone}. Stacks are * allocated contiguously, starting below the bottom of the main stack. When a * new stack is created, a red zone is created (actually, the red zone is simply * left unmapped) below the bottom of the stack, such that the stack will not be * able to grow all the way to the top of the next stack. This isn't * fool-proof. It is possible for a stack to grow by a large amount, such that * it grows into the next stack, and as long as the memory within the red zone * is never accessed, nothing will prevent one thread stack from trouncing all * over the next. */ SCLASS void * _next_stack #ifdef GLOBAL_PTHREAD_PRIVATE /* main stack top - main stack size - stack size - (red zone + main stack red zone) */ = (void *) USRSTACK - PTHREAD_STACK_INITIAL - PTHREAD_STACK_DEFAULT - (2 * PTHREAD_STACK_GUARD) #endif ; /* * Declare the kernel scheduler jump buffer and stack: */ SCLASS jmp_buf _thread_kern_sched_jb; SCLASS void * _thread_kern_sched_stack #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* Used for _PTHREADS_INVARIANTS checking. */ SCLASS int _thread_kern_new_state #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Undefine the storage class specifier: */ #undef SCLASS #ifdef _LOCK_DEBUG #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \ _ts, __FILE__, __LINE__) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \ __FILE__, __LINE__) #else #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type) #endif /* * Function prototype definitions. */ __BEGIN_DECLS char *__ttyname_basic(int); char *__ttyname_r_basic(int, char *, size_t); char *ttyname_r(int, char *, size_t); void _cond_wait_backout(pthread_t); void _fd_lock_backout(pthread_t); int _find_dead_thread(pthread_t); int _find_thread(pthread_t); void _flockfile_backout(pthread_t); void _funlock_owned(pthread_t); void _join_backout(pthread_t); int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t); int _thread_fd_lock(int, int, struct timespec *); int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); void _mutex_lock_backout(pthread_t); void _mutex_notify_priochange(pthread_t); int _mutex_reinit(pthread_mutex_t *); void _mutex_unlock_private(pthread_t); int _cond_reinit(pthread_cond_t *); int _pq_alloc(struct pq_queue *, int, int); int _pq_init(struct pq_queue *); void _pq_remove(struct pq_queue *pq, struct pthread *); void _pq_insert_head(struct pq_queue *pq, struct pthread *); void _pq_insert_tail(struct pq_queue *pq, struct pthread *); struct pthread *_pq_first(struct pq_queue *pq); void _waitq_insert(pthread_t pthread); void _waitq_remove(pthread_t pthread); #if defined(_PTHREADS_INVARIANTS) void _waitq_setactive(void); void _waitq_clearactive(void); #endif void _thread_exit(char *, int, char *); void _thread_exit_cleanup(void); -void _thread_exit_finish(void); void _thread_fd_unlock(int, int); void _thread_fd_unlock_debug(int, int, char *, int); void _thread_fd_unlock_owned(pthread_t); void *_thread_cleanup(pthread_t); void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_init(void); void _thread_kern_sched(ucontext_t *); void _thread_kern_scheduler(void); -void _thread_kern_sched_frame(int frame); +void _thread_kern_sched_frame(struct pthread_signal_frame *psf); void _thread_kern_sched_sig(void); void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno); void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno); void _thread_kern_set_timeout(const struct timespec *); void _thread_kern_sig_defer(void); void _thread_kern_sig_undefer(void); void _thread_sig_handler(int, siginfo_t *, ucontext_t *); void _thread_sig_check_pending(pthread_t pthread); void _thread_sig_handle_pending(void); void _thread_sig_send(pthread_t pthread, int sig); void _thread_sig_wrapper(void); -int _thread_sigframe_find(pthread_t pthread, void *stackp); +void _thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf); void _thread_start(void); void _thread_seterrno(pthread_t, int); int _thread_fd_table_init(int fd); pthread_addr_t _thread_gc(pthread_addr_t); void _thread_enter_cancellation_point(void); void _thread_leave_cancellation_point(void); void _thread_cancellation_point(void); /* #include */ int _thread_sys_sigaction(int, const struct sigaction *, struct sigaction *); int _thread_sys_sigpending(sigset_t *); int _thread_sys_sigprocmask(int, const sigset_t *, sigset_t *); int _thread_sys_sigsuspend(const sigset_t *); int _thread_sys_siginterrupt(int, int); int _thread_sys_sigpause(int); int _thread_sys_sigreturn(ucontext_t *); +int _thread_sys_sigaltstack(const struct sigaltstack *, struct sigstack *); int _thread_sys_sigstack(const struct sigstack *, struct sigstack *); int _thread_sys_sigvec(int, struct sigvec *, struct sigvec *); void _thread_sys_psignal(unsigned int, const char *); void (*_thread_sys_signal(int, void (*)(int)))(int); /* #include */ #ifdef _SYS_STAT_H_ int _thread_sys_fchmod(int, mode_t); int _thread_sys_fstat(int, struct stat *); int _thread_sys_fchflags(int, u_long); #endif /* #include */ #ifdef _SYS_MOUNT_H_ int _thread_sys_fstatfs(int, struct statfs *); #endif int _thread_sys_pipe(int *); /* #include */ #ifdef _SYS_SOCKET_H_ int _thread_sys_accept(int, struct sockaddr *, int *); int _thread_sys_bind(int, const struct sockaddr *, int); int _thread_sys_connect(int, const struct sockaddr *, int); int _thread_sys_getpeername(int, struct sockaddr *, int *); int _thread_sys_getsockname(int, struct sockaddr *, int *); int _thread_sys_getsockopt(int, int, int, void *, int *); int _thread_sys_listen(int, int); int _thread_sys_setsockopt(int, int, int, const void *, int); int _thread_sys_shutdown(int, int); int _thread_sys_socket(int, int, int); int _thread_sys_socketpair(int, int, int, int *); ssize_t _thread_sys_recv(int, void *, size_t, int); ssize_t _thread_sys_recvfrom(int, void *, size_t, int, struct sockaddr *, int *); ssize_t _thread_sys_recvmsg(int, struct msghdr *, int); ssize_t _thread_sys_send(int, const void *, size_t, int); ssize_t _thread_sys_sendmsg(int, const struct msghdr *, int); ssize_t _thread_sys_sendto(int, const void *,size_t, int, const struct sockaddr *, int); #endif /* #include */ #ifdef _STDIO_H_ FILE *_thread_sys_fdopen(int, const char *); FILE *_thread_sys_fopen(const char *, const char *); FILE *_thread_sys_freopen(const char *, const char *, FILE *); FILE *_thread_sys_popen(const char *, const char *); FILE *_thread_sys_tmpfile(void); char *_thread_sys_ctermid(char *); char *_thread_sys_cuserid(char *); char *_thread_sys_fgetln(FILE *, size_t *); char *_thread_sys_fgets(char *, int, FILE *); char *_thread_sys_gets(char *); char *_thread_sys_tempnam(const char *, const char *); char *_thread_sys_tmpnam(char *); int _thread_sys_fclose(FILE *); int _thread_sys_feof(FILE *); int _thread_sys_ferror(FILE *); int _thread_sys_fflush(FILE *); int _thread_sys_fgetc(FILE *); int _thread_sys_fgetpos(FILE *, fpos_t *); int _thread_sys_fileno(FILE *); int _thread_sys_fprintf(FILE *, const char *, ...); int _thread_sys_fpurge(FILE *); int _thread_sys_fputc(int, FILE *); int _thread_sys_fputs(const char *, FILE *); int _thread_sys_fscanf(FILE *, const char *, ...); int _thread_sys_fseek(FILE *, long, int); int _thread_sys_fsetpos(FILE *, const fpos_t *); int _thread_sys_getc(FILE *); int _thread_sys_getchar(void); int _thread_sys_getw(FILE *); int _thread_sys_pclose(FILE *); int _thread_sys_printf(const char *, ...); int _thread_sys_putc(int, FILE *); int _thread_sys_putchar(int); int _thread_sys_puts(const char *); int _thread_sys_putw(int, FILE *); int _thread_sys_remove(const char *); int _thread_sys_rename (const char *, const char *); int _thread_sys_scanf(const char *, ...); int _thread_sys_setlinebuf(FILE *); int _thread_sys_setvbuf(FILE *, char *, int, size_t); int _thread_sys_snprintf(char *, size_t, const char *, ...); int _thread_sys_sprintf(char *, const char *, ...); int _thread_sys_sscanf(const char *, const char *, ...); int _thread_sys_ungetc(int, FILE *); int _thread_sys_vfprintf(FILE *, const char *, _BSD_VA_LIST_); int _thread_sys_vprintf(const char *, _BSD_VA_LIST_); int _thread_sys_vscanf(const char *, _BSD_VA_LIST_); int _thread_sys_vsnprintf(char *, size_t, const char *, _BSD_VA_LIST_); int _thread_sys_vsprintf(char *, const char *, _BSD_VA_LIST_); int _thread_sys_vsscanf(const char *, const char *, _BSD_VA_LIST_); long _thread_sys_ftell(FILE *); size_t _thread_sys_fread(void *, size_t, size_t, FILE *); size_t _thread_sys_fwrite(const void *, size_t, size_t, FILE *); void _thread_sys_clearerr(FILE *); void _thread_sys_perror(const char *); void _thread_sys_rewind(FILE *); void _thread_sys_setbuf(FILE *, char *); void _thread_sys_setbuffer(FILE *, char *, int); #endif /* #include */ #ifdef _UNISTD_H_ char *_thread_sys_ttyname(int); int _thread_sys_close(int); int _thread_sys_dup(int); int _thread_sys_dup2(int, int); int _thread_sys_exect(const char *, char * const *, char * const *); int _thread_sys_execve(const char *, char * const *, char * const *); int _thread_sys_fchdir(int); int _thread_sys_fchown(int, uid_t, gid_t); int _thread_sys_fsync(int); int _thread_sys_ftruncate(int, off_t); int _thread_sys_pause(void); int _thread_sys_pipe(int *); int _thread_sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); long _thread_sys_fpathconf(int, int); off_t _thread_sys_lseek(int, off_t, int); pid_t _thread_sys_fork(void); pid_t _thread_sys_tcgetpgrp(int); ssize_t _thread_sys_read(int, void *, size_t); ssize_t _thread_sys_write(int, const void *, size_t); void _thread_sys__exit(int); #endif /* #include */ #ifdef _SYS_FCNTL_H_ int _thread_sys_creat(const char *, mode_t); int _thread_sys_fcntl(int, int, ...); int _thread_sys_flock(int, int); int _thread_sys_open(const char *, int, ...); #endif /* #include */ #ifdef _SYS_IOCTL_H_ int _thread_sys_ioctl(int, unsigned long, ...); #endif /* #include */ #ifdef _DIRENT_H_ DIR *___thread_sys_opendir2(const char *, int); DIR *_thread_sys_opendir(const char *); int _thread_sys_alphasort(const void *, const void *); int _thread_sys_scandir(const char *, struct dirent ***, int (*)(struct dirent *), int (*)(const void *, const void *)); int _thread_sys_closedir(DIR *); int _thread_sys_getdirentries(int, char *, int, long *); long _thread_sys_telldir(const DIR *); struct dirent *_thread_sys_readdir(DIR *); void _thread_sys_rewinddir(DIR *); void _thread_sys_seekdir(DIR *, long); #endif /* #include */ #ifdef _SYS_UIO_H_ ssize_t _thread_sys_readv(int, const struct iovec *, int); ssize_t _thread_sys_writev(int, const struct iovec *, int); #endif /* #include */ #ifdef WNOHANG pid_t _thread_sys_wait(int *); pid_t _thread_sys_waitpid(pid_t, int *, int); pid_t _thread_sys_wait3(int *, int, struct rusage *); pid_t _thread_sys_wait4(pid_t, int *, int, struct rusage *); #endif /* #include */ #ifdef _SYS_POLL_H_ int _thread_sys_poll(struct pollfd *, unsigned, int); #endif /* #include */ #ifdef _SYS_MMAN_H_ int _thread_sys_msync(void *, size_t, int); #endif /* #include */ #ifdef _SETJMP_H_ extern void __siglongjmp(sigjmp_buf, int) __dead2; extern void __longjmp(jmp_buf, int) __dead2; extern void ___longjmp(jmp_buf, int) __dead2; #endif __END_DECLS #endif /* !_PTHREAD_PRIVATE_H */ Index: head/lib/libkse/thread/thr_sig.c =================================================================== --- head/lib/libkse/thread/thr_sig.c (revision 68515) +++ head/lib/libkse/thread/thr_sig.c (revision 68516) @@ -1,1267 +1,1116 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" /* Prototypes: */ static void thread_sig_add(pthread_t pthread, int sig, int has_args); static void thread_sig_check_state(pthread_t pthread, int sig); static pthread_t thread_sig_find(int sig); static void thread_sig_handle_special(int sig); static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp); -static void thread_sigframe_add(pthread_t thread, int sig); -static void thread_sigframe_leave(pthread_t thread, int frame); -static void thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf); +static void thread_sigframe_add(pthread_t thread, int sig, int has_args); static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf); /* #define DEBUG_SIGNAL */ #ifdef DEBUG_SIGNAL #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif #if defined(_PTHREADS_INVARIANTS) #define SIG_SET_ACTIVE() _sig_in_handler = 1 #define SIG_SET_INACTIVE() _sig_in_handler = 0 #else #define SIG_SET_ACTIVE() #define SIG_SET_INACTIVE() #endif void _thread_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) { - pthread_t pthread; - int current_frame; + pthread_t pthread, pthread_h; + void *stackp; + int in_sched = 0; char c; if (ucp == NULL) PANIC("Thread signal handler received null context"); DBG_MSG("Got signal %d, current thread %p\n", sig, _thread_run); + if (_thread_kern_in_sched != 0) + in_sched = 1; + else { + stackp = (void *)GET_STACK_UC(ucp); + if ((stackp >= _thread_kern_sched_stack) && + (stackp <= _thread_kern_sched_stack + SCHED_STACK_SIZE)) + in_sched = 1; + } /* Check if an interval timer signal: */ if (sig == _SCHED_SIGNAL) { /* Update the scheduling clock: */ gettimeofday((struct timeval *)&_sched_tod, NULL); _sched_ticks++; - if (_thread_kern_in_sched != 0) { + if (in_sched != 0) { /* * The scheduler is already running; ignore this * signal. */ } /* * Check if the scheduler interrupt has come when * the currently running thread has deferred thread * signals. */ else if (_thread_run->sig_defer_count > 0) _thread_run->yield_on_sig_undefer = 1; else { /* * Save the context of the currently running thread: */ thread_sig_savecontext(_thread_run, ucp); /* * Schedule the next thread. This function is not * expected to return because it will do a longjmp - * instead. + * instead. */ _thread_kern_sched(ucp); /* * This point should not be reached, so abort the - * process: + * process: */ PANIC("Returned to signal function from scheduler"); } } /* * Check if the kernel has been interrupted while the scheduler * is accessing the scheduling queues or if there is a currently * running thread that has deferred signals. */ - else if ((_thread_kern_in_sched != 0) || - (_thread_run->sig_defer_count > 0)) { + else if ((in_sched != 0) || (_thread_run->sig_defer_count > 0)) { /* Cast the signal number to a character variable: */ c = sig; /* * Write the signal number to the kernel pipe so that it will * be ready to read when this signal handler returns. */ if (_queue_signals != 0) { _thread_sys_write(_thread_kern_pipe[1], &c, 1); DBG_MSG("Got signal %d, queueing to kernel pipe\n", sig); } if (_thread_sigq[sig - 1].blocked == 0) { DBG_MSG("Got signal %d, adding to _thread_sigq\n", sig); /* * Do not block this signal; it will be blocked * when the pending signals are run down. */ /* _thread_sigq[sig - 1].blocked = 1; */ /* * Queue the signal, saving siginfo and sigcontext * (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); /* Indicate that there are queued signals: */ _thread_sigq[sig - 1].pending = 1; _sigq_check_reqd = 1; } /* These signals need special handling: */ else if (sig == SIGCHLD || sig == SIGTSTP || sig == SIGTTIN || sig == SIGTTOU) { _thread_sigq[sig - 1].pending = 1; _thread_sigq[sig - 1].signo = sig; _sigq_check_reqd = 1; } else DBG_MSG("Got signal %d, ignored.\n", sig); } /* * The signal handlers should have been installed so that they * cannot be interrupted by other signals. */ else if (_thread_sigq[sig - 1].blocked == 0) { - /* The signal is not blocked; handle the signal: */ - current_frame = _thread_run->sigframe_count; - /* + * The signal is not blocked; handle the signal. + * * Ignore subsequent occurrences of this signal * until the current signal is handled: */ _thread_sigq[sig - 1].blocked = 1; /* This signal will be handled; clear the pending flag: */ _thread_sigq[sig - 1].pending = 0; /* * Save siginfo and sigcontext (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); SIG_SET_ACTIVE(); /* Handle special signals: */ thread_sig_handle_special(sig); + pthread_h = NULL; if ((pthread = thread_sig_find(sig)) != NULL) { DBG_MSG("Got signal %d, adding frame to thread %p\n", sig, pthread); /* * A thread was found that can handle the signal. * Save the context of the currently running thread * so that we can switch to another thread without * losing track of where the current thread left off. * This also applies if the current thread is the * thread to be signaled. */ thread_sig_savecontext(_thread_run, ucp); /* Setup the target thread to receive the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); /* Take a peek at the next ready to run thread: */ - pthread = PTHREAD_PRIOQ_FIRST(); + pthread_h = PTHREAD_PRIOQ_FIRST(); DBG_MSG("Finished adding frame, head of prio list %p\n", - pthread); + pthread_h); } else DBG_MSG("No thread to handle signal %d\n", sig); SIG_SET_INACTIVE(); /* * Switch to a different context if the currently running * thread takes a signal, or if another thread takes a * signal and the currently running thread is not in a * signal handler. */ - if ((_thread_run->sigframe_count > current_frame) || - ((pthread != NULL) && - (pthread->active_priority > _thread_run->active_priority))) { + if ((pthread == _thread_run) || ((pthread_h != NULL) && + (pthread_h->active_priority > _thread_run->active_priority))) { /* Enter the kernel scheduler: */ - DBG_MSG("Entering scheduler from signal handler\n"); _thread_kern_sched(ucp); } } else { SIG_SET_ACTIVE(); thread_sig_handle_special(sig); SIG_SET_INACTIVE(); } } static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp) { - struct pthread_signal_frame *psf; + memcpy(&pthread->ctx.uc, ucp, sizeof(*ucp)); - psf = _thread_run->curframe; - - memcpy(&psf->ctx.uc, ucp, sizeof(*ucp)); - /* XXX - Save FP registers too? */ - FP_SAVE_UC(&psf->ctx.uc); + FP_SAVE_UC(&pthread->ctx.uc); /* Mark the context saved as a ucontext: */ - psf->ctxtype = CTX_UC; + pthread->ctxtype = CTX_UC; } /* * Find a thread that can handle the signal. */ pthread_t thread_sig_find(int sig) { int handler_installed; pthread_t pthread, pthread_next; pthread_t suspended_thread, signaled_thread; DBG_MSG("Looking for thread to handle signal %d\n", sig); /* Check if the signal requires a dump of thread information: */ - if (sig == SIGINFO) + if (sig == SIGINFO) { /* Dump thread information to file: */ _thread_dump_info(); + /* Unblock this signal to allow further dumps: */ + _thread_sigq[sig - 1].blocked = 0; + } /* Check if an interval timer signal: */ else if (sig == _SCHED_SIGNAL) { /* * This shouldn't ever occur (should this panic?). */ } else { /* * Enter a loop to look for threads that have the signal * unmasked. POSIX specifies that a thread in a sigwait * will get the signal over any other threads. Second * preference will be threads in in a sigsuspend. Third * preference will be the current thread. If none of the * above, then the signal is delivered to the first thread * that is found. Note that if a custom handler is not * installed, the signal only affects threads in sigwait. */ suspended_thread = NULL; if ((_thread_run != &_thread_kern_thread) && !sigismember(&_thread_run->sigmask, sig)) signaled_thread = _thread_run; else signaled_thread = NULL; if ((_thread_sigact[sig - 1].sa_handler == SIG_IGN) || (_thread_sigact[sig - 1].sa_handler == SIG_DFL)) handler_installed = 0; else handler_installed = 1; for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly destroying * the link entry. */ pthread_next = TAILQ_NEXT(pthread, pqe); if ((pthread->state == PS_SIGWAIT) && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* * A signal handler is not invoked for threads * in sigwait. Clear the blocked and pending * flags. - */ + */ _thread_sigq[sig - 1].blocked = 0; _thread_sigq[sig - 1].pending = 0; /* Return the signal number: */ pthread->signo = sig; /* * POSIX doesn't doesn't specify which thread * will get the signal if there are multiple * waiters, so we give it to the first thread * we find. * * Do not attempt to deliver this signal * to other threads and do not add the signal * to the process pending set. */ return (NULL); } else if ((handler_installed != 0) && !sigismember(&pthread->sigmask, sig)) { if (pthread->state == PS_SIGSUSPEND) { if (suspended_thread == NULL) suspended_thread = pthread; } else if (signaled_thread == NULL) signaled_thread = pthread; } } /* * Only perform wakeups and signal delivery if there is a * custom handler installed: */ if (handler_installed == 0) { /* * There is no handler installed. Unblock the * signal so that if a handler _is_ installed, any * subsequent signals can be handled. */ _thread_sigq[sig - 1].blocked = 0; } else { /* * If we didn't find a thread in the waiting queue, * check the all threads queue: */ if (suspended_thread == NULL && signaled_thread == NULL) { /* * Enter a loop to look for other threads - * capable of receiving the signal: + * capable of receiving the signal: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { if (!sigismember(&pthread->sigmask, sig)) { signaled_thread = pthread; break; } } } if (suspended_thread == NULL && signaled_thread == NULL) /* * Add it to the set of signals pending * on the process: */ sigaddset(&_process_sigpending, sig); else { /* * We only deliver the signal to one thread; * give preference to the suspended thread: */ if (suspended_thread != NULL) pthread = suspended_thread; else pthread = signaled_thread; return (pthread); } } } /* Returns nothing. */ return (NULL); } void _thread_sig_check_pending(pthread_t pthread) { sigset_t sigset; int i; /* * Check if there are pending signals for the running * thread or process that aren't blocked: */ sigset = pthread->sigpend; SIGSETOR(sigset, _process_sigpending); SIGSETNAND(sigset, pthread->sigmask); if (SIGNOTEMPTY(sigset)) { for (i = 1; i < NSIG; i++) { if (sigismember(&sigset, i) != 0) { if (sigismember(&pthread->sigpend, i) != 0) thread_sig_add(pthread, i, /*has_args*/ 0); else { thread_sig_add(pthread, i, /*has_args*/ 1); sigdelset(&_process_sigpending, i); } } } } } /* * This can only be called from the kernel scheduler. It assumes that * all thread contexts are saved and that a signal frame can safely be * added to any user thread. */ void _thread_sig_handle_pending(void) { pthread_t pthread; int i, sig; PTHREAD_ASSERT(_thread_kern_in_sched != 0, "_thread_sig_handle_pending called from outside kernel schedule"); /* * Check the array of pending signals: */ for (i = 0; i < NSIG; i++) { if (_thread_sigq[i].pending != 0) { /* This signal is no longer pending. */ _thread_sigq[i].pending = 0; sig = _thread_sigq[i].signo; /* Some signals need special handling: */ thread_sig_handle_special(sig); if (_thread_sigq[i].blocked == 0) { /* * Block future signals until this one * is handled: */ _thread_sigq[i].blocked = 1; if ((pthread = thread_sig_find(sig)) != NULL) { /* * Setup the target thread to receive * the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); } } } } } static void thread_sig_handle_special(int sig) { pthread_t pthread, pthread_next; int i; switch (sig) { case SIGCHLD: /* * Go through the file list and set all files * to non-blocking again in case the child * set some of them to block. Sigh. */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file is used: */ if (_thread_fd_table[i] != NULL) { /* * Set the file descriptor to non-blocking: */ _thread_sys_fcntl(i, F_SETFL, _thread_fd_table[i]->flags | O_NONBLOCK); } } /* * Enter a loop to wake up all threads waiting * for a process to complete: */ for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly * destroying the link entry: */ pthread_next = TAILQ_NEXT(pthread, pqe); /* * If this thread is waiting for a child * process to complete, wake it up: */ if (pthread->state == PS_WAIT_WAIT) { /* Make the thread runnable: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } } break; /* * POSIX says that pending SIGCONT signals are * discarded when one of these signals occurs. */ case SIGTSTP: case SIGTTIN: case SIGTTOU: /* * Enter a loop to discard pending SIGCONT * signals: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { sigdelset(&pthread->sigpend, SIGCONT); } break; default: break; } } /* * Perform thread specific actions in response to a signal. * This function is only called if there is a handler installed * for the signal, and if the target thread has the signal * unmasked. */ static void thread_sig_add(pthread_t pthread, int sig, int has_args) { - int restart, frame; - int block_signals = 0; + int restart; int suppress_handler = 0; restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: /* * You can't call a signal handler for threads in these * states. */ suppress_handler = 1; break; /* * States which do not need any cleanup handling when signals * occur: */ case PS_RUNNING: /* * Remove the thread from the queue before changing its * priority: */ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) PTHREAD_PRIOQ_REMOVE(pthread); break; case PS_SUSPENDED: break; case PS_SPINBLOCK: /* Remove the thread from the workq and waitq: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); /* Make the thread runnable: */ PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGWAIT: /* The signal handler is not called for threads in SIGWAIT. */ suppress_handler = 1; /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* Change the state of the thread to run: */ PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else { /* * Mark the thread as interrupted only if the * restart flag is not set on the signal action: */ if (restart == 0) pthread->interrupted = 1; PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); } break; /* * States which cannot be interrupted but still require the * signal handler to run: */ - case PS_COND_WAIT: case PS_JOIN: + /* Only set the interrupted flag for PS_JOIN: */ + pthread->interrupted = 1; + /* FALLTHROUGH */ + case PS_COND_WAIT: case PS_MUTEX_WAIT: /* * Remove the thread from the wait queue. It will * be added back to the wait queue once all signal * handlers have been invoked. */ PTHREAD_WAITQ_REMOVE(pthread); break; /* * States which are interruptible but may need to be removed * from queues before any signal handler is called. * * XXX - We may not need to handle this condition, but will * mark it as a potential problem. */ case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: if (restart == 0) pthread->interrupted = 1; /* * Remove the thread from the wait queue. Our * signal handler hook will remove this thread * from the fd or file queue before invoking * the actual handler. */ PTHREAD_WAITQ_REMOVE(pthread); - /* - * To ensure the thread is removed from the fd and file - * queues before any other signal interrupts it, set the - * signal mask to block all signals. As soon as the thread - * is removed from the queue the signal mask will be - * restored. - */ - block_signals = 1; break; /* * States which are interruptible: */ case PS_FDR_WAIT: case PS_FDW_WAIT: if (restart == 0) { /* * Flag the operation as interrupted and * set the state to running: */ pthread->interrupted = 1; PTHREAD_SET_STATE(pthread, PS_RUNNING); } PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); break; case PS_POLL_WAIT: case PS_SELECT_WAIT: case PS_SLEEP_WAIT: /* * Unmasked signals always cause poll, select, and sleep * to terminate early, regardless of SA_RESTART: */ pthread->interrupted = 1; /* Remove threads in poll and select from the workq: */ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGSUSPEND: PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; } if (suppress_handler == 0) { + /* Setup a signal frame and save the current threads state: */ + thread_sigframe_add(pthread, sig, has_args); + /* - * Save the current state of the thread and add a - * new signal frame. + * Signals are deferred until just before the threads + * signal handler is invoked: */ - frame = pthread->sigframe_count; - thread_sigframe_save(pthread, pthread->curframe); - thread_sigframe_add(pthread, sig); - pthread->sigframes[frame + 1]->sig_has_args = has_args; - SIGSETOR(pthread->sigmask, _thread_sigact[sig - 1].sa_mask); - if (block_signals != 0) { - /* Save the signal mask and block all signals: */ - pthread->sigframes[frame + 1]->saved_state.psd_sigmask = - pthread->sigmask; - sigfillset(&pthread->sigmask); - } - + pthread->sig_defer_count = 1; + /* Make sure the thread is runnable: */ if (pthread->state != PS_RUNNING) PTHREAD_SET_STATE(pthread, PS_RUNNING); /* * The thread should be removed from all scheduling * queues at this point. Raise the priority and place * the thread in the run queue. */ pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY; if (pthread != _thread_run) PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } static void thread_sig_check_state(pthread_t pthread, int sig) { /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: case PS_RUNNING: case PS_SUSPENDED: case PS_SPINBLOCK: case PS_COND_WAIT: case PS_JOIN: case PS_MUTEX_WAIT: break; case PS_SIGWAIT: /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* * Remove the thread from the wait queue and * make it runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } break; case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_SIGSUSPEND: case PS_SLEEP_WAIT: /* * Remove the thread from the wait queue and make it * runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; /* * These states are additionally in the work queue: */ case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_FILE_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* * Remove the thread from the wait and work queues, and * make it runnable: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; } } /* * Send a signal to a specific thread (ala pthread_kill): */ void _thread_sig_send(pthread_t pthread, int sig) { /* Check for signals whose actions are SIG_DFL: */ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) { /* * Check to see if a temporary signal handler is * installed for sigwaiters: */ if (_thread_dfl_count[sig] == 0) /* * Deliver the signal to the process if a handler * is not installed: */ kill(getpid(), sig); /* * Assuming we're still running after the above kill(), * make any necessary state changes to the thread: */ thread_sig_check_state(pthread, sig); } /* * Check that the signal is not being ignored: */ else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) { if (pthread->state == PS_SIGWAIT && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else if (pthread == _thread_run) { /* Add the signal to the pending set: */ sigaddset(&pthread->sigpend, sig); if (!sigismember(&pthread->sigmask, sig)) { /* * Call the kernel scheduler which will safely * install a signal frame for this thread: */ _thread_kern_sched_sig(); } } else if (!sigismember(&pthread->sigmask, sig)) { /* Protect the scheduling queues: */ _thread_kern_sig_defer(); /* * Perform any state changes due to signal * arrival: */ thread_sig_add(pthread, sig, /* has args */ 0); /* Unprotect the scheduling queues: */ _thread_kern_sig_undefer(); } else { /* Increment the pending signal count. */ sigaddset(&pthread->sigpend,sig); } } } /* * User thread signal handler wrapper. * * thread - current running thread */ void _thread_sig_wrapper(void) { void (*sigfunc)(int, siginfo_t *, void *); struct pthread_signal_frame *psf; - pthread_t thread; - int dead = 0; - int i, sig, has_args; - int frame, dst_frame; + pthread_t thread; thread = _thread_run; /* Get the current frame and state: */ - frame = thread->sigframe_count; - PTHREAD_ASSERT(frame > 0, "Invalid signal frame in signal handler"); psf = thread->curframe; + thread->curframe = NULL; + PTHREAD_ASSERT(psf != NULL, "Invalid signal frame in signal handler"); - /* Check the threads previous state: */ + /* Check the threads previous state: */ if (psf->saved_state.psd_state != PS_RUNNING) { /* * Do a little cleanup handling for those threads in * queues before calling the signal handler. Signals * for these threads are temporarily blocked until * after cleanup handling. */ switch (psf->saved_state.psd_state) { case PS_FDLR_WAIT: case PS_FDLW_WAIT: _fd_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; - /* Reenable signals: */ - thread->sigmask = psf->saved_state.psd_sigmask; break; case PS_FILE_WAIT: _flockfile_backout(thread); psf->saved_state.psd_state = PS_RUNNING; - /* Reenable signals: */ - thread->sigmask = psf->saved_state.psd_sigmask; break; + case PS_COND_WAIT: + _cond_wait_backout(thread); + psf->saved_state.psd_state = PS_RUNNING; + break; + + case PS_JOIN: + _join_backout(thread); + psf->saved_state.psd_state = PS_RUNNING; + break; + + case PS_MUTEX_WAIT: + _mutex_lock_backout(thread); + psf->saved_state.psd_state = PS_RUNNING; + break; + default: break; } } + /* Unblock the signal in case we don't return from the handler: */ + _thread_sigq[psf->signo - 1].blocked = 0; + /* - * Unless the thread exits or longjmps out of the signal handler, - * return to the previous frame: + * Lower the priority before calling the handler in case + * it never returns (longjmps back): */ - dst_frame = frame - 1; + thread->active_priority &= ~PTHREAD_SIGNAL_PRIORITY; /* + * Reenable interruptions without checking for the need to + * context switch: + */ + thread->sig_defer_count = 0; + + /* * Check that a custom handler is installed and if the signal * is not blocked: */ sigfunc = _thread_sigact[psf->signo - 1].sa_sigaction; if (((__sighandler_t *)sigfunc != SIG_DFL) && ((__sighandler_t *)sigfunc != SIG_IGN)) { + DBG_MSG("_thread_sig_wrapper: Calling signal handler for " + "thread 0x%p\n", thread); /* - * The signal jump buffer is allocated off the stack. - * If the signal handler tries to [_][sig]longjmp() or - * setcontext(), our wrapped versions of these routines - * will copy the user supplied jump buffer or context - * to the destination signal frame, set the destination - * signal frame in psf->dst_frame, and _longjmp() back - * to here. + * Dispatch the signal via the custom signal + * handler: */ - jmp_buf jb; - - /* - * Set up the context for abnormal returns out of signal - * handlers. - */ - psf->sig_jb = &jb; - if (_setjmp(jb) == 0) { - DBG_MSG("_thread_sig_wrapper: Entering frame %d, " - "stack 0x%lx\n", frame, GET_STACK_JB(jb)); - /* - * Invalidate the destination frame before calling - * the signal handler. - */ - psf->dst_frame = -1; - - /* - * Dispatch the signal via the custom signal - * handler: - */ - if (psf->sig_has_args == 0) - (*(sigfunc))(psf->signo, NULL, NULL); - else if ((_thread_sigact[psf->signo - 1].sa_flags & - SA_SIGINFO) != 0) - (*(sigfunc))(psf->signo, - &_thread_sigq[psf->signo - 1].siginfo, - &_thread_sigq[psf->signo - 1].uc); - else - (*(sigfunc))(psf->signo, - (siginfo_t *)_thread_sigq[psf->signo - 1].siginfo.si_code, - &_thread_sigq[psf->signo - 1].uc); - } - else { - /* - * The return from _setjmp() should only be non-zero - * when the signal handler wants to xxxlongjmp() or - * setcontext() to a different context, or if the - * thread has exited (via pthread_exit). - */ - /* - * Grab a copy of the destination frame before it - * gets clobbered after unwinding. - */ - dst_frame = psf->dst_frame; - DBG_MSG("Abnormal exit from handler for signal %d, " - "frame %d\n", psf->signo, frame); - - /* Has the thread exited? */ - if ((dead = thread->flags & PTHREAD_EXITING) != 0) - /* When exiting, unwind to frame 0. */ - dst_frame = 0; - else if ((dst_frame < 0) || (dst_frame > frame)) - PANIC("Attempt to unwind to invalid " - "signal frame"); - - /* Unwind to the target frame: */ - for (i = frame; i > dst_frame; i--) { - DBG_MSG("Leaving frame %d, signal %d\n", i, - thread->sigframes[i]->signo); - /* Leave the current signal frame: */ - thread_sigframe_leave(thread, i); - - /* - * Save whatever is needed out of the state - * data; as soon as the frame count is - * is decremented, another signal can arrive - * and corrupt this view of the state data. - */ - sig = thread->sigframes[i]->signo; - has_args = thread->sigframes[i]->sig_has_args; - - /* - * We're done with this signal frame: - */ - thread->curframe = thread->sigframes[i - 1]; - thread->sigframe_count = i - 1; - - /* - * Only unblock the signal if it was a - * process signal as opposed to a signal - * generated by pthread_kill(). - */ - if (has_args != 0) - _thread_sigq[sig - 1].blocked = 0; - } - } + if (psf->sig_has_args == 0) + (*(sigfunc))(psf->signo, NULL, NULL); + else if ((_thread_sigact[psf->signo - 1].sa_flags & + SA_SIGINFO) != 0) + (*(sigfunc))(psf->signo, &psf->siginfo, &psf->uc); + else + (*(sigfunc))(psf->signo, + (siginfo_t *)psf->siginfo.si_code, &psf->uc); } - /* - * Call the kernel scheduler to schedule the next - * thread. + * Call the kernel scheduler to safely restore the frame and + * schedule the next thread: */ - if (dead == 0) { - /* Restore the threads state: */ - thread_sigframe_restore(thread, thread->sigframes[dst_frame]); - _thread_kern_sched_frame(dst_frame); - } - else { - PTHREAD_ASSERT(dst_frame == 0, - "Invalid signal frame for dead thread"); - - /* Perform any necessary cleanup before exiting. */ - thread_sigframe_leave(thread, 0); - - /* This should never return: */ - _thread_exit_finish(); - PANIC("Return from _thread_exit_finish in signal wrapper"); - } + _thread_kern_sched_frame(psf); } static void -thread_sigframe_add(pthread_t thread, int sig) +thread_sigframe_add(pthread_t thread, int sig, int has_args) { + struct pthread_signal_frame *psf = NULL; unsigned long stackp = 0; /* Get the top of the threads stack: */ - switch (thread->curframe->ctxtype) { + switch (thread->ctxtype) { case CTX_JB: case CTX_JB_NOSIG: - stackp = GET_STACK_JB(thread->curframe->ctx.jb); + stackp = GET_STACK_JB(thread->ctx.jb); break; case CTX_SJB: - stackp = GET_STACK_SJB(thread->curframe->ctx.sigjb); + stackp = GET_STACK_SJB(thread->ctx.sigjb); break; case CTX_UC: - stackp = GET_STACK_UC(&thread->curframe->ctx.uc); + stackp = GET_STACK_UC(&thread->ctx.uc); break; default: PANIC("Invalid thread context type"); break; } /* * Leave a little space on the stack and round down to the * nearest aligned word: */ stackp -= sizeof(double); stackp &= ~0x3UL; /* Allocate room on top of the stack for a new signal frame: */ stackp -= sizeof(struct pthread_signal_frame); - /* Set up the new frame: */ - thread->sigframe_count++; - thread->sigframes[thread->sigframe_count] = - (struct pthread_signal_frame *) stackp; - thread->curframe = thread->sigframes[thread->sigframe_count]; - thread->curframe->stackp = stackp; - thread->curframe->ctxtype = CTX_JB_NOSIG; - thread->curframe->longjmp_val = 1; - thread->curframe->signo = sig; + psf = (struct pthread_signal_frame *) stackp; - /* - * Set up the context: - */ - _setjmp(thread->curframe->ctx.jb); - SET_STACK_JB(thread->curframe->ctx.jb, stackp); - SET_RETURN_ADDR_JB(thread->curframe->ctx.jb, _thread_sig_wrapper); -} + /* Save the current context in the signal frame: */ + thread_sigframe_save(thread, psf); -/* - * Locate the signal frame from the specified stack pointer. - */ -int -_thread_sigframe_find(pthread_t pthread, void *stackp) -{ - int frame; + /* Set handler specific information: */ + psf->sig_has_args = has_args; + psf->signo = sig; + if (has_args) { + /* Copy the signal handler arguments to the signal frame: */ + memcpy(&psf->uc, &_thread_sigq[psf->signo - 1].uc, + sizeof(psf->uc)); + memcpy(&psf->siginfo, &_thread_sigq[psf->signo - 1].siginfo, + sizeof(psf->siginfo)); + } + /* Set up the new frame: */ + thread->curframe = psf; + thread->ctxtype = CTX_JB_NOSIG; + thread->longjmp_val = 1; + thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE | + PTHREAD_FLAGS_IN_SYNCQ; /* - * Find the destination of the target frame based on the - * given stack pointer. + * Set up the context: */ - for (frame = pthread->sigframe_count; frame >= 0; frame--) { - if (stackp < (void *)pthread->sigframes[frame]->stackp) - break; - } - return (frame); + stackp += sizeof(double); + _setjmp(thread->ctx.jb); + SET_STACK_JB(thread->ctx.jb, stackp); + SET_RETURN_ADDR_JB(thread->ctx.jb, _thread_sig_wrapper); } - + void -thread_sigframe_leave(pthread_t thread, int frame) +_thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf) { - struct pthread_state_data *psd; - - psd = &thread->sigframes[frame]->saved_state; - + thread->ctxtype = psf->ctxtype; + memcpy(&thread->ctx.uc, &psf->ctx.uc, sizeof(thread->ctx.uc)); /* - * Perform any necessary cleanup for this signal frame: + * Only restore the signal mask if it hasn't been changed + * by the application during invocation of the signal handler: */ - switch (psd->psd_state) { - case PS_DEAD: - case PS_DEADLOCK: - case PS_RUNNING: - case PS_SIGTHREAD: - case PS_STATE_MAX: - case PS_SUSPENDED: - break; - - /* - * Threads in the following states need to be removed - * from queues. - */ - case PS_COND_WAIT: - _cond_wait_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_FDLR_WAIT: - case PS_FDLW_WAIT: - _fd_lock_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_FILE_WAIT: - _flockfile_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_JOIN: - _join_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_MUTEX_WAIT: - _mutex_lock_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_FDR_WAIT: - case PS_FDW_WAIT: - case PS_POLL_WAIT: - case PS_SELECT_WAIT: - case PS_SIGSUSPEND: - case PS_SIGWAIT: - case PS_SLEEP_WAIT: - case PS_SPINBLOCK: - case PS_WAIT_WAIT: - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) { - PTHREAD_WAITQ_REMOVE(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WORKQ) != 0) - PTHREAD_WORKQ_REMOVE(thread); - } - break; - } -} - -static void -thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf) -{ - thread->interrupted = psf->saved_state.psd_interrupted; - thread->sigmask = psf->saved_state.psd_sigmask; - thread->state = psf->saved_state.psd_state; - thread->flags = psf->saved_state.psd_flags; + if (thread->sigmask_seqno == psf->saved_state.psd_sigmask_seqno) + thread->sigmask = psf->saved_state.psd_sigmask; + thread->curframe = psf->saved_state.psd_curframe; thread->wakeup_time = psf->saved_state.psd_wakeup_time; thread->data = psf->saved_state.psd_wait_data; + thread->state = psf->saved_state.psd_state; + thread->flags = psf->saved_state.psd_flags; + thread->interrupted = psf->saved_state.psd_interrupted; + thread->longjmp_val = psf->saved_state.psd_longjmp_val; + thread->signo = psf->saved_state.psd_signo; + thread->sig_defer_count = psf->saved_state.psd_sig_defer_count; } static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf) { - psf->saved_state.psd_interrupted = thread->interrupted; + psf->ctxtype = thread->ctxtype; + memcpy(&psf->ctx.uc, &thread->ctx.uc, sizeof(thread->ctx.uc)); psf->saved_state.psd_sigmask = thread->sigmask; - psf->saved_state.psd_state = thread->state; - psf->saved_state.psd_flags = thread->flags; - thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE | - PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ | - PTHREAD_FLAGS_IN_JOINQ; + psf->saved_state.psd_curframe = thread->curframe; psf->saved_state.psd_wakeup_time = thread->wakeup_time; psf->saved_state.psd_wait_data = thread->data; + psf->saved_state.psd_state = thread->state; + psf->saved_state.psd_flags = thread->flags & + (PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE); + psf->saved_state.psd_interrupted = thread->interrupted; + psf->saved_state.psd_longjmp_val = thread->longjmp_val; + psf->saved_state.psd_sigmask_seqno = thread->sigmask_seqno; + psf->saved_state.psd_signo = thread->signo; + psf->saved_state.psd_sig_defer_count = thread->sig_defer_count; } #endif Index: head/lib/libkse/thread/thr_sigaction.c =================================================================== --- head/lib/libkse/thread/thr_sigaction.c (revision 68515) +++ head/lib/libkse/thread/thr_sigaction.c (revision 68516) @@ -1,111 +1,111 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" int _sigaction(int sig, const struct sigaction * act, struct sigaction * oact) { int ret = 0; struct sigaction gact; /* Check if the signal number is out of range: */ if (sig < 1 || sig > NSIG) { /* Return an invalid argument: */ errno = EINVAL; ret = -1; } else { /* * Check if the existing signal action structure contents are * to be returned: */ if (oact != NULL) { /* Return the existing signal action contents: */ oact->sa_handler = _thread_sigact[sig - 1].sa_handler; oact->sa_mask = _thread_sigact[sig - 1].sa_mask; oact->sa_flags = _thread_sigact[sig - 1].sa_flags; } /* Check if a signal action was supplied: */ if (act != NULL) { /* Set the new signal handler: */ _thread_sigact[sig - 1].sa_mask = act->sa_mask; _thread_sigact[sig - 1].sa_flags = act->sa_flags; _thread_sigact[sig - 1].sa_handler = act->sa_handler; } /* * Check if the kernel needs to be advised of a change * in signal action: */ if (act != NULL && sig != _SCHED_SIGNAL && sig != SIGCHLD && sig != SIGINFO) { /* * Ensure the signal handler cannot be interrupted * by other signals. Always request the POSIX signal * handler arguments. */ sigfillset(&gact.sa_mask); - gact.sa_flags = SA_SIGINFO; + gact.sa_flags = SA_SIGINFO | SA_ONSTACK; /* * Check if the signal handler is being set to * the default or ignore handlers: */ if (act->sa_handler == SIG_DFL || act->sa_handler == SIG_IGN) /* Specify the built in handler: */ gact.sa_handler = act->sa_handler; else /* * Specify the thread kernel signal * handler: */ gact.sa_handler = (void (*) ()) _thread_sig_handler; /* Change the signal action in the kernel: */ if (_thread_sys_sigaction(sig,&gact,NULL) != 0) ret = -1; } } /* Return the completion status: */ return (ret); } __strong_reference(_sigaction, sigaction); #endif Index: head/lib/libkse/thread/thr_sigmask.c =================================================================== --- head/lib/libkse/thread/thr_sigmask.c (revision 68515) +++ head/lib/libkse/thread/thr_sigmask.c (revision 68516) @@ -1,102 +1,105 @@ /* * Copyright (c) 1997 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset) { sigset_t sigset; int ret = 0; /* Check if the existing signal process mask is to be returned: */ if (oset != NULL) { /* Return the current mask: */ *oset = _thread_run->sigmask; } /* Check if a new signal set was provided by the caller: */ if (set != NULL) { /* Process according to what to do: */ switch (how) { /* Block signals: */ case SIG_BLOCK: /* Add signals to the existing mask: */ SIGSETOR(_thread_run->sigmask, *set); break; /* Unblock signals: */ case SIG_UNBLOCK: /* Clear signals from the existing mask: */ SIGSETNAND(_thread_run->sigmask, *set); break; /* Set the signal process mask: */ case SIG_SETMASK: /* Set the new mask: */ _thread_run->sigmask = *set; break; /* Trap invalid actions: */ default: /* Return an invalid argument: */ errno = EINVAL; ret = -1; break; } + /* Increment the sequence number: */ + _thread_run->sigmask_seqno++; + /* * Check if there are pending signals for the running * thread or process that aren't blocked: */ sigset = _thread_run->sigpend; SIGSETOR(sigset, _process_sigpending); SIGSETNAND(sigset, _thread_run->sigmask); if (SIGNOTEMPTY(sigset)) /* * Call the kernel scheduler which will safely * install a signal frame for the running thread: */ _thread_kern_sched_sig(); } /* Return the completion status: */ return (ret); } #endif Index: head/lib/libpthread/thread/thr_cond.c =================================================================== --- head/lib/libpthread/thread/thr_cond.c (revision 68515) +++ head/lib/libpthread/thread/thr_cond.c (revision 68516) @@ -1,696 +1,755 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" /* * Prototypes */ static inline pthread_t cond_queue_deq(pthread_cond_t); static inline void cond_queue_remove(pthread_cond_t, pthread_t); static inline void cond_queue_enq(pthread_cond_t, pthread_t); /* Reinitialize a condition variable to defaults. */ int -_cond_reinit(pthread_cond_t * cond) +_cond_reinit(pthread_cond_t *cond) { int ret = 0; if (cond == NULL) ret = EINVAL; else if (*cond == NULL) ret = pthread_cond_init(cond, NULL); else { /* * Initialize the condition variable structure: */ TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags = COND_FLAGS_INITED; (*cond)->c_type = COND_TYPE_FAST; (*cond)->c_mutex = NULL; + (*cond)->c_seqno = 0; memset(&(*cond)->lock, 0, sizeof((*cond)->lock)); } return (ret); } int -pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr) +pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { enum pthread_cond_type type; pthread_cond_t pcond; int rval = 0; if (cond == NULL) rval = EINVAL; else { /* * Check if a pointer to a condition variable attribute * structure was passed by the caller: */ if (cond_attr != NULL && *cond_attr != NULL) { /* Default to a fast condition variable: */ type = (*cond_attr)->c_type; } else { /* Default to a fast condition variable: */ type = COND_TYPE_FAST; } /* Process according to condition variable type: */ switch (type) { /* Fast condition variable: */ case COND_TYPE_FAST: /* Nothing to do here. */ break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Check for no errors: */ if (rval == 0) { if ((pcond = (pthread_cond_t) malloc(sizeof(struct pthread_cond))) == NULL) { rval = ENOMEM; } else { /* * Initialise the condition variable * structure: */ TAILQ_INIT(&pcond->c_queue); pcond->c_flags |= COND_FLAGS_INITED; pcond->c_type = type; pcond->c_mutex = NULL; + pcond->c_seqno = 0; memset(&pcond->lock,0,sizeof(pcond->lock)); *cond = pcond; } } } /* Return the completion status: */ return (rval); } int -pthread_cond_destroy(pthread_cond_t * cond) +pthread_cond_destroy(pthread_cond_t *cond) { int rval = 0; if (cond == NULL || *cond == NULL) rval = EINVAL; else { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * Free the memory allocated for the condition * variable structure: */ free(*cond); /* * NULL the caller's pointer now that the condition * variable has been destroyed: */ *cond = NULL; } /* Return the completion status: */ return (rval); } int -pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex) +pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { int rval = 0; + int done = 0; int interrupted = 0; + int unlock_mutex = 1; + int seqno; _thread_enter_cancellation_point(); if (cond == NULL) - rval = EINVAL; + return (EINVAL); /* * If the condition variable is statically initialized, * perform the dynamic initialization: */ - else if (*cond != NULL || - (rval = pthread_cond_init(cond, NULL)) == 0) { + if (*cond == NULL && + (rval = pthread_cond_init(cond, NULL)) != 0) + return (rval); + + /* + * Enter a loop waiting for a condition signal or broadcast + * to wake up this thread. A loop is needed in case the waiting + * thread is interrupted by a signal to execute a signal handler. + * It is not (currently) possible to remain in the waiting queue + * while running a handler. Instead, the thread is interrupted + * and backed out of the waiting queue prior to executing the + * signal handler. + */ + do { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * If the condvar was statically allocated, properly * initialize the tail queue. */ if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) { TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags |= COND_FLAGS_INITED; } /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return invalid argument error: */ rval = EINVAL; } else { /* Reset the timeout and interrupted flags: */ _thread_run->timeout = 0; _thread_run->interrupted = 0; /* * Queue the running thread for the condition * variable: */ cond_queue_enq(*cond, _thread_run); - /* Remember the mutex that is being used: */ + /* Remember the mutex and sequence number: */ (*cond)->c_mutex = *mutex; + seqno = (*cond)->c_seqno; /* Wait forever: */ _thread_run->wakeup_time.tv_sec = -1; /* Unlock the mutex: */ - if ((rval = _mutex_cv_unlock(mutex)) != 0) { + if ((unlock_mutex != 0) && + ((rval = _mutex_cv_unlock(mutex)) != 0)) { /* * Cannot unlock the mutex, so remove * the running thread from the condition * variable queue: */ cond_queue_remove(*cond, _thread_run); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* + * Don't unlock the mutex in the event + * this thread has to be requeued in + * condition variable queue: + */ + unlock_mutex = 0; + + /* * Schedule the next thread and unlock * the condition variable structure: */ _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); - if (_thread_run->interrupted != 0) { - /* - * Remember that this thread - * was interrupted: - */ - interrupted = 1; + done = (seqno != (*cond)->c_seqno); + if ((_thread_run->flags & + PTHREAD_FLAGS_IN_CONDQ) != 0) { /* * Lock the condition variable * while removing the thread. */ _SPINLOCK(&(*cond)->lock); cond_queue_remove(*cond, _thread_run); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; _SPINUNLOCK(&(*cond)->lock); } /* + * Save the interrupted flag; locking + * the mutex will destroy it. + */ + interrupted = _thread_run->interrupted; + + /* * Note that even though this thread may have * been canceled, POSIX requires that the mutex * be reaquired prior to cancellation. */ rval = _mutex_cv_lock(mutex); } } break; /* Trap invalid condition variable types: */ default: /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return an invalid argument error: */ rval = EINVAL; break; } - if (interrupted != 0) { - if (_thread_run->continuation != NULL) - _thread_run->continuation((void *) _thread_run); - } - } + if ((interrupted != 0) && (_thread_run->continuation != NULL)) + _thread_run->continuation((void *) _thread_run); + } while ((done == 0) && (rval == 0)); _thread_leave_cancellation_point(); /* Return the completion status: */ return (rval); } int pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, const struct timespec * abstime) { int rval = 0; + int done = 0; int interrupted = 0; + int unlock_mutex = 1; + int seqno; _thread_enter_cancellation_point(); if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) - rval = EINVAL; + return (EINVAL); /* * If the condition variable is statically initialized, perform dynamic * initialization. */ - else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) { + if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) + return (rval); + + /* + * Enter a loop waiting for a condition signal or broadcast + * to wake up this thread. A loop is needed in case the waiting + * thread is interrupted by a signal to execute a signal handler. + * It is not (currently) possible to remain in the waiting queue + * while running a handler. Instead, the thread is interrupted + * and backed out of the waiting queue prior to executing the + * signal handler. + */ + do { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * If the condvar was statically allocated, properly * initialize the tail queue. */ if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) { TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags |= COND_FLAGS_INITED; } /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { /* Return invalid argument error: */ rval = EINVAL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* Set the wakeup time: */ _thread_run->wakeup_time.tv_sec = abstime->tv_sec; _thread_run->wakeup_time.tv_nsec = abstime->tv_nsec; /* Reset the timeout and interrupted flags: */ _thread_run->timeout = 0; _thread_run->interrupted = 0; /* * Queue the running thread for the condition * variable: */ cond_queue_enq(*cond, _thread_run); - /* Remember the mutex that is being used: */ + /* Remember the mutex and sequence number: */ (*cond)->c_mutex = *mutex; + seqno = (*cond)->c_seqno; /* Unlock the mutex: */ - if ((rval = _mutex_cv_unlock(mutex)) != 0) { + if ((unlock_mutex != 0) && + ((rval = _mutex_cv_unlock(mutex)) != 0)) { /* * Cannot unlock the mutex, so remove * the running thread from the condition * variable queue: */ cond_queue_remove(*cond, _thread_run); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* + * Don't unlock the mutex in the event + * this thread has to be requeued in + * condition variable queue: + */ + unlock_mutex = 0; + + /* * Schedule the next thread and unlock * the condition variable structure: */ _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); + done = (seqno != (*cond)->c_seqno); + /* - * Check if the wait timedout or was - * interrupted (canceled): + * Check if the wait timedout, was + * interrupted (canceled), or needs to + * be resumed after handling a signal. */ if ((_thread_run->timeout == 0) && - (_thread_run->interrupted == 0)) { + (_thread_run->interrupted == 0) && + (done != 0)) { /* Lock the mutex: */ rval = _mutex_cv_lock(mutex); - } else { - /* - * Remember if this thread was - * interrupted: - */ - interrupted = _thread_run->interrupted; - - /* Lock the condition variable structure: */ + /* Lock the CV structure: */ _SPINLOCK(&(*cond)->lock); /* * The wait timed out; remove * the thread from the condition - * variable queue: + * variable queue: */ cond_queue_remove(*cond, _thread_run); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; - /* Unock the condition variable structure: */ + /* Unock the CV structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return a timeout error: */ - rval = ETIMEDOUT; + if (_thread_run->timeout != 0) + rval = ETIMEDOUT; + /* + * Save the interrupted flag; + * locking the mutex will + * destroy it. + */ + interrupted = _thread_run->interrupted; /* * Lock the mutex and ignore any * errors. Note that even though * this thread may have been * canceled, POSIX requires that * the mutex be reaquired prior * to cancellation. */ (void)_mutex_cv_lock(mutex); } } } break; /* Trap invalid condition variable types: */ default: /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return an invalid argument error: */ rval = EINVAL; break; } - if (interrupted != 0) { - if (_thread_run->continuation != NULL) - _thread_run->continuation((void *) _thread_run); - } - } + if ((interrupted != 0) && (_thread_run->continuation != NULL)) + _thread_run->continuation((void *) _thread_run); + } while ((done == 0) && (rval == 0)); _thread_leave_cancellation_point(); /* Return the completion status: */ return (rval); } int pthread_cond_signal(pthread_cond_t * cond) { int rval = 0; pthread_t pthread; if (cond == NULL) rval = EINVAL; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL) == 0)) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: + /* Increment the sequence number: */ + (*cond)->c_seqno++; + if ((pthread = cond_queue_deq(*cond)) != NULL) { /* * Unless the thread is currently suspended, * allow it to run. If the thread is suspended, * make a note that the thread isn't in a wait * queue any more. */ if (pthread->state != PS_SUSPENDED) PTHREAD_NEW_STATE(pthread,PS_RUNNING); else pthread->suspended = SUSP_NOWAIT; } /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (rval); } int pthread_cond_broadcast(pthread_cond_t * cond) { int rval = 0; pthread_t pthread; if (cond == NULL) rval = EINVAL; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL) == 0)) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: + /* Increment the sequence number: */ + (*cond)->c_seqno++; + /* * Enter a loop to bring all threads off the * condition queue: */ while ((pthread = cond_queue_deq(*cond)) != NULL) { /* * Unless the thread is currently suspended, * allow it to run. If the thread is suspended, * make a note that the thread isn't in a wait * queue any more. */ if (pthread->state != PS_SUSPENDED) PTHREAD_NEW_STATE(pthread,PS_RUNNING); else pthread->suspended = SUSP_NOWAIT; } /* There are no more waiting threads: */ (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (rval); } void _cond_wait_backout(pthread_t pthread) { pthread_cond_t cond; cond = pthread->data.cond; if (cond != NULL) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&cond->lock); /* Process according to condition variable type: */ switch (cond->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: cond_queue_remove(cond, pthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&cond->c_queue) == NULL) cond->c_mutex = NULL; break; default: break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&cond->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } } /* * Dequeue a waiting thread from the head of a condition queue in * descending priority order. */ static inline pthread_t cond_queue_deq(pthread_cond_t cond) { pthread_t pthread; while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) { TAILQ_REMOVE(&cond->c_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ; if ((pthread->timeout == 0) && (pthread->interrupted == 0)) /* * Only exit the loop when we find a thread * that hasn't timed out or been canceled; * those threads are already running and don't * need their run state changed. */ break; } return(pthread); } /* * Remove a waiting thread from a condition queue in descending priority * order. */ static inline void cond_queue_remove(pthread_cond_t cond, pthread_t pthread) { /* * Because pthread_cond_timedwait() can timeout as well * as be signaled by another thread, it is necessary to * guard against removing the thread from the queue if * it isn't in the queue. */ if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) { TAILQ_REMOVE(&cond->c_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ; } } /* * Enqueue a waiting thread to a condition queue in descending priority * order. */ static inline void cond_queue_enq(pthread_cond_t cond, pthread_t pthread) { pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head); PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); /* * For the common case of all threads having equal priority, * we perform a quick check against the priority of the thread * at the tail of the queue. */ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe); else { tid = TAILQ_FIRST(&cond->c_queue); while (pthread->active_priority <= tid->active_priority) tid = TAILQ_NEXT(tid, sqe); TAILQ_INSERT_BEFORE(tid, pthread, sqe); } pthread->flags |= PTHREAD_FLAGS_IN_CONDQ; pthread->data.cond = cond; } #endif Index: head/lib/libpthread/thread/thr_create.c =================================================================== --- head/lib/libpthread/thread/thr_create.c (revision 68515) +++ head/lib/libpthread/thread/thr_create.c (revision 68516) @@ -1,324 +1,327 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include #include "pthread_private.h" #include "libc_private.h" static u_int64_t next_uniqueid = 1; #define OFF(f) offsetof(struct pthread, f) -#define SIGFRAME_OFF(f) offsetof(struct pthread_signal_frame, f) int _thread_next_offset = OFF(tle.tqe_next); int _thread_uniqueid_offset = OFF(uniqueid); int _thread_state_offset = OFF(state); int _thread_name_offset = OFF(name); -int _thread_curframe_offset = OFF(curframe); -int _thread_sigframe_ctx_offset = SIGFRAME_OFF(ctx); -int _thread_sigframe_ctxtype_offset = SIGFRAME_OFF(ctxtype); +int _thread_ctxtype_offset = OFF(ctxtype); +int _thread_ctx_offset = OFF(ctx); #undef OFF -#undef SIGFRAME_OFF int _thread_PS_RUNNING_value = PS_RUNNING; int _thread_PS_DEAD_value = PS_DEAD; int _thread_CTX_JB_NOSIG_value = CTX_JB_NOSIG; int _thread_CTX_JB_value = CTX_JB; int _thread_CTX_SJB_value = CTX_SJB; int _thread_CTX_UC_value = CTX_UC; -int _thread_sigframe_size_value = sizeof(struct pthread_signal_frame); int pthread_create(pthread_t * thread, const pthread_attr_t * attr, void *(*start_routine) (void *), void *arg) { + struct itimerval itimer; int f_gc = 0; int ret = 0; pthread_t gc_thread; pthread_t new_thread; pthread_attr_t pattr; void *stack; /* * Locking functions in libc are required when there are * threads other than the initial thread. */ __isthreaded = 1; /* Allocate memory for the thread structure: */ if ((new_thread = (pthread_t) malloc(sizeof(struct pthread))) == NULL) { /* Insufficient memory to create a thread: */ ret = EAGAIN; } else { /* Check if default thread attributes are required: */ if (attr == NULL || *attr == NULL) { /* Use the default thread attributes: */ pattr = &pthread_attr_default; } else { pattr = *attr; } /* Check if a stack was specified in the thread attributes: */ if ((stack = pattr->stackaddr_attr) != NULL) { } /* Allocate memory for a default-size stack: */ else if (pattr->stacksize_attr == PTHREAD_STACK_DEFAULT) { struct stack *spare_stack; /* Allocate or re-use a default-size stack. */ /* * Use the garbage collector mutex for synchronization * of the spare stack list. */ if (pthread_mutex_lock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); if ((spare_stack = SLIST_FIRST(&_stackq)) != NULL) { /* Use the spare stack. */ SLIST_REMOVE_HEAD(&_stackq, qe); /* Unlock the garbage collector mutex. */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot unlock gc mutex"); stack = sizeof(struct stack) + (void *) spare_stack - PTHREAD_STACK_DEFAULT; } else { /* Allocate a new stack. */ stack = _next_stack + PTHREAD_STACK_GUARD; - + /* * Even if stack allocation fails, we don't want * to try to use this location again, so * unconditionally decrement _next_stack. Under * normal operating conditions, the most likely * reason for an mmap() error is a stack * overflow of the adjacent thread stack. */ _next_stack -= (PTHREAD_STACK_DEFAULT + PTHREAD_STACK_GUARD); /* Unlock the garbage collector mutex. */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot unlock gc mutex"); /* Stack: */ if (mmap(stack, PTHREAD_STACK_DEFAULT, PROT_READ | PROT_WRITE, MAP_STACK, -1, 0) == MAP_FAILED) { ret = EAGAIN; free(new_thread); } } } /* * The user wants a stack of a particular size. Lets hope they * really know what they want, and simply malloc the stack. */ else if ((stack = (void *) malloc(pattr->stacksize_attr)) == NULL) { /* Insufficient memory to create a thread: */ ret = EAGAIN; free(new_thread); } /* Check for errors: */ if (ret != 0) { } else { /* Initialise the thread structure: */ memset(new_thread, 0, sizeof(struct pthread)); new_thread->slice_usec = -1; new_thread->stack = stack; new_thread->start_routine = start_routine; new_thread->arg = arg; new_thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; /* * Write a magic value to the thread structure * to help identify valid ones: */ new_thread->magic = PTHREAD_MAGIC; /* Initialise the thread for signals: */ new_thread->sigmask = _thread_run->sigmask; + new_thread->sigmask_seqno = 0; - /* Initialize the first signal frame: */ - new_thread->sigframes[0] = &new_thread->sigframe0; - new_thread->curframe = &new_thread->sigframe0; + /* Initialize the signal frame: */ + new_thread->curframe = NULL; /* Initialise the jump buffer: */ - _setjmp(new_thread->curframe->ctx.jb); + _setjmp(new_thread->ctx.jb); /* * Set up new stack frame so that it looks like it * returned from a longjmp() to the beginning of * _thread_start(). */ - SET_RETURN_ADDR_JB(new_thread->curframe->ctx.jb, - _thread_start); + SET_RETURN_ADDR_JB(new_thread->ctx.jb, _thread_start); /* The stack starts high and builds down: */ - SET_STACK_JB(new_thread->curframe->ctx.jb, + SET_STACK_JB(new_thread->ctx.jb, (long)new_thread->stack + pattr->stacksize_attr - sizeof(double)); /* Initialize the rest of the frame: */ - new_thread->curframe->ctxtype = CTX_JB_NOSIG; - /* Set the base of the stack: */ - new_thread->curframe->stackp = - GET_STACK_JB(new_thread->curframe->ctx.jb); - new_thread->sigframe_count = 0; + new_thread->ctxtype = CTX_JB_NOSIG; /* Copy the thread attributes: */ memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr)); /* * Check if this thread is to inherit the scheduling - * attributes from its parent: + * attributes from its parent: */ if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) { /* Copy the scheduling attributes: */ new_thread->base_priority = _thread_run->base_priority & ~PTHREAD_SIGNAL_PRIORITY; new_thread->attr.prio = _thread_run->base_priority & ~PTHREAD_SIGNAL_PRIORITY; new_thread->attr.sched_policy = _thread_run->attr.sched_policy; } else { /* * Use just the thread priority, leaving the * other scheduling attributes as their - * default values: + * default values: */ new_thread->base_priority = new_thread->attr.prio; } new_thread->active_priority = new_thread->base_priority; new_thread->inherited_priority = 0; /* Initialise the join queue for the new thread: */ TAILQ_INIT(&(new_thread->join_queue)); /* Initialize the mutex queue: */ TAILQ_INIT(&new_thread->mutexq); /* Initialise hooks in the thread structure: */ new_thread->specific_data = NULL; new_thread->cleanup = NULL; new_thread->flags = 0; new_thread->poll_data.nfds = 0; new_thread->poll_data.fds = NULL; new_thread->continuation = NULL; /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* * Initialise the unique id which GDB uses to * track threads. */ new_thread->uniqueid = next_uniqueid++; /* * Check if the garbage collector thread * needs to be started. */ f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial); /* Add the thread to the linked list of all threads: */ TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle); if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) new_thread->state = PS_SUSPENDED; else { new_thread->state = PS_RUNNING; PTHREAD_PRIOQ_INSERT_TAIL(new_thread); } /* * Undefer and handle pending signals, yielding * if necessary. */ _thread_kern_sig_undefer(); /* Return a pointer to the thread structure: */ (*thread) = new_thread; + if (f_gc != 0) { + /* Install the scheduling timer: */ + itimer.it_interval.tv_sec = 0; + itimer.it_interval.tv_usec = _clock_res_usec; + itimer.it_value = itimer.it_interval; + if (setitimer(_ITIMER_SCHED_TIMER, &itimer, + NULL) != 0) + PANIC("Cannot set interval timer"); + } + /* Schedule the new user thread: */ _thread_kern_sched(NULL); + /* * Start a garbage collector thread * if necessary. */ if (f_gc && pthread_create(&gc_thread,NULL, _thread_gc,NULL) != 0) PANIC("Can't create gc thread"); } } /* Return the status: */ return (ret); } void _thread_start(void) { /* We just left the scheduler via longjmp: */ _thread_kern_in_sched = 0; /* Run the current thread's start routine with argument: */ pthread_exit(_thread_run->start_routine(_thread_run->arg)); /* This point should never be reached. */ PANIC("Thread has resumed after exit"); } #endif Index: head/lib/libpthread/thread/thr_detach.c =================================================================== --- head/lib/libpthread/thread/thr_detach.c (revision 68515) +++ head/lib/libpthread/thread/thr_detach.c (revision 68516) @@ -1,83 +1,88 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" int pthread_detach(pthread_t pthread) { int rval = 0; pthread_t next_thread; /* Check for invalid calling parameters: */ if (pthread == NULL || pthread->magic != PTHREAD_MAGIC) /* Return an invalid argument error: */ rval = EINVAL; /* Check if the thread has not been detached: */ else if ((pthread->attr.flags & PTHREAD_DETACHED) == 0) { /* Flag the thread as detached: */ pthread->attr.flags |= PTHREAD_DETACHED; /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Enter a loop to bring all threads off the join queue: */ while ((next_thread = TAILQ_FIRST(&pthread->join_queue)) != NULL) { /* Remove the thread from the queue: */ TAILQ_REMOVE(&pthread->join_queue, next_thread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ; /* Make the thread runnable: */ - PTHREAD_NEW_STATE(next_thread,PS_RUNNING); + PTHREAD_NEW_STATE(next_thread, PS_RUNNING); + + /* + * Set the return value for the woken thread: + */ + next_thread->error = ESRCH; } /* * Undefer and handle pending signals, yielding if a * scheduling signal occurred while in the critical region. */ _thread_kern_sig_undefer(); } else /* Return an error: */ rval = EINVAL; /* Return the completion status: */ return (rval); } #endif Index: head/lib/libpthread/thread/thr_exit.c =================================================================== --- head/lib/libpthread/thread/thr_exit.c (revision 68515) +++ head/lib/libpthread/thread/thr_exit.c (revision 68516) @@ -1,244 +1,237 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" #define FLAGS_IN_SCHEDQ \ (PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ) void __exit(int status) { int flags; int i; struct itimerval itimer; /* Disable the interval timer: */ itimer.it_interval.tv_sec = 0; itimer.it_interval.tv_usec = 0; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = 0; setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL); /* Close the pthread kernel pipe: */ _thread_sys_close(_thread_kern_pipe[0]); _thread_sys_close(_thread_kern_pipe[1]); /* * Enter a loop to set all file descriptors to blocking * if they were not created as non-blocking: */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file descriptor is in use: */ if (_thread_fd_table[i] != NULL && !(_thread_fd_table[i]->flags & O_NONBLOCK)) { /* Get the current flags: */ flags = _thread_sys_fcntl(i, F_GETFL, NULL); /* Clear the nonblocking file descriptor flag: */ _thread_sys_fcntl(i, F_SETFL, flags & ~O_NONBLOCK); } } /* Call the _exit syscall: */ _thread_sys__exit(status); } __strong_reference(__exit, _exit); void _thread_exit(char *fname, int lineno, char *string) { char s[256]; /* Prepare an error message string: */ strcpy(s, "Fatal error '"); strcat(s, string); strcat(s, "' at line ? "); strcat(s, "in file "); strcat(s, fname); strcat(s, " (errno = ?"); strcat(s, ")\n"); /* Write the string to the standard error file descriptor: */ _thread_sys_write(2, s, strlen(s)); /* Force this process to exit: */ /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */ #if defined(_PTHREADS_INVARIANTS) abort(); #else _exit(1); #endif } /* * Only called when a thread is cancelled. It may be more useful * to call it from pthread_exit() if other ways of asynchronous or * abnormal thread termination can be found. */ void _thread_exit_cleanup(void) { /* * POSIX states that cancellation/termination of a thread should * not release any visible resources (such as mutexes) and that * it is the applications responsibility. Resources that are * internal to the threads library, including file and fd locks, * are not visible to the application and need to be released. */ /* Unlock all owned fd locks: */ _thread_fd_unlock_owned(_thread_run); /* Unlock all owned file locks: */ _funlock_owned(_thread_run); /* Unlock all private mutexes: */ _mutex_unlock_private(_thread_run); /* * This still isn't quite correct because we don't account * for held spinlocks (see libc/stdlib/malloc.c). */ } void pthread_exit(void *status) { - int frame; + pthread_t pthread; /* Check if this thread is already in the process of exiting: */ if ((_thread_run->flags & PTHREAD_EXITING) != 0) { char msg[128]; snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",_thread_run); PANIC(msg); } /* Flag this thread as exiting: */ _thread_run->flags |= PTHREAD_EXITING; /* Save the return value: */ _thread_run->ret = status; while (_thread_run->cleanup != NULL) { pthread_cleanup_pop(1); } - if (_thread_run->attr.cleanup_attr != NULL) { _thread_run->attr.cleanup_attr(_thread_run->attr.arg_attr); } /* Check if there is thread specific data: */ if (_thread_run->specific_data != NULL) { /* Run the thread-specific data destructors: */ _thread_cleanupspecific(); } /* Free thread-specific poll_data structure, if allocated: */ if (_thread_run->poll_data.fds != NULL) { free(_thread_run->poll_data.fds); _thread_run->poll_data.fds = NULL; } - if ((frame = _thread_run->sigframe_count) == 0) - _thread_exit_finish(); - else { - /* - * Jump back and unwind the signal frames to gracefully - * cleanup. - */ - ___longjmp(*_thread_run->sigframes[frame]->sig_jb, 1); - } - - /* This point should not be reached. */ - PANIC("Dead thread has resumed"); -} - -void -_thread_exit_finish(void) -{ - pthread_t pthread; - /* * Lock the garbage collector mutex to ensure that the garbage * collector is not using the dead thread list. */ if (pthread_mutex_lock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Add this thread to the list of dead threads. */ TAILQ_INSERT_HEAD(&_dead_list, _thread_run, dle); /* * Signal the garbage collector thread that there is something * to clean up. */ if (pthread_cond_signal(&_gc_cond) != 0) PANIC("Cannot signal gc cond"); /* * Avoid a race condition where a scheduling signal can occur * causing the garbage collector thread to run. If this happens, * the current thread can be cleaned out from under us. */ _thread_kern_sig_defer(); /* Unlock the garbage collector mutex: */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Check if there are any threads joined to this one: */ while ((pthread = TAILQ_FIRST(&(_thread_run->join_queue))) != NULL) { /* Remove the thread from the queue: */ TAILQ_REMOVE(&_thread_run->join_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_JOINQ; /* * Wake the joined thread and let it * detach this thread: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); + + /* + * Set the return value for the woken thread: + */ + if ((_thread_run->attr.flags & PTHREAD_DETACHED) != 0) + pthread->error = ESRCH; + else { + pthread->ret = _thread_run->ret; + pthread->error = 0; + } } /* Remove this thread from the thread list: */ TAILQ_REMOVE(&_thread_list, _thread_run, tle); /* This thread will never be re-scheduled. */ _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__); + + /* This point should not be reached. */ + PANIC("Dead thread has resumed"); } #endif Index: head/lib/libpthread/thread/thr_info.c =================================================================== --- head/lib/libpthread/thread/thr_info.c (revision 68515) +++ head/lib/libpthread/thread/thr_info.c (revision 68516) @@ -1,315 +1,290 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #ifdef _THREAD_SAFE #include #include #include "pthread_private.h" +#ifndef NELEMENTS +#define NELEMENTS(arr) (sizeof(arr) / sizeof(arr[0])) +#endif + +static void dump_thread(int fd, pthread_t pthread, int long_version); + + struct s_thread_info { enum pthread_state state; char *name; }; /* Static variables: */ static const struct s_thread_info thread_info[] = { {PS_RUNNING , "Running"}, {PS_SIGTHREAD , "Waiting on signal thread"}, {PS_MUTEX_WAIT , "Waiting on a mutex"}, {PS_COND_WAIT , "Waiting on a condition variable"}, {PS_FDLR_WAIT , "Waiting for a file read lock"}, {PS_FDLW_WAIT , "Waiting for a file write lock"}, {PS_FDR_WAIT , "Waiting for read"}, {PS_FDW_WAIT , "Waiting for write"}, {PS_FILE_WAIT , "Waiting for FILE lock"}, {PS_POLL_WAIT , "Waiting on poll"}, {PS_SELECT_WAIT , "Waiting on select"}, {PS_SLEEP_WAIT , "Sleeping"}, {PS_WAIT_WAIT , "Waiting process"}, {PS_SIGSUSPEND , "Suspended, waiting for a signal"}, {PS_SIGWAIT , "Waiting for a signal"}, {PS_SPINBLOCK , "Waiting for a spinlock"}, {PS_JOIN , "Waiting to join"}, {PS_SUSPENDED , "Suspended"}, {PS_DEAD , "Dead"}, {PS_DEADLOCK , "Deadlocked"}, {PS_STATE_MAX , "Not a real state!"} }; void _thread_dump_info(void) { char s[512]; int fd; int i; - int j; pthread_t pthread; char tmpfile[128]; pq_list_t *pq_list; - for (i = 0; i < 100000; i++) { + for (i = 0; i < 100000; i++) { snprintf(tmpfile, sizeof(tmpfile), "/tmp/uthread.dump.%u.%i", getpid(), i); /* Open the dump file for append and create it if necessary: */ if ((fd = _thread_sys_open(tmpfile, O_RDWR | O_CREAT | O_EXCL, 0666)) < 0) { /* Can't open the dump file. */ if (errno == EEXIST) continue; /* * We only need to continue in case of * EEXIT error. Most other error * codes means that we will fail all * the times. */ return; } else { break; } } if (i==100000) { /* all 100000 possibilities are in use :( */ return; } else { /* Output a header for active threads: */ strcpy(s, "\n\n=============\nACTIVE THREADS\n\n"); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report each thread in the global list: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { - /* Find the state: */ - for (j = 0; j < (sizeof(thread_info) / - sizeof(struct s_thread_info)) - 1; j++) - if (thread_info[j].state == pthread->state) - break; - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", - pthread, (pthread->name == NULL) ? - "":pthread->name, pthread->base_priority, - thread_info[j].name, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); - - /* Check if this is the running thread: */ - if (pthread == _thread_run) { - /* Output a record for the running thread: */ - strcpy(s, "This is the running thread\n"); - _thread_sys_write(fd, s, strlen(s)); - } - /* Check if this is the initial thread: */ - if (pthread == _thread_initial) { - /* Output a record for the initial thread: */ - strcpy(s, "This is the initial thread\n"); - _thread_sys_write(fd, s, strlen(s)); - } - /* Process according to thread state: */ - switch (pthread->state) { - /* File descriptor read lock wait: */ - case PS_FDLR_WAIT: - case PS_FDLW_WAIT: - case PS_FDR_WAIT: - case PS_FDW_WAIT: - /* Write the lock details: */ - snprintf(s, sizeof(s), "fd %d[%s:%d]", - pthread->data.fd.fd, - pthread->data.fd.fname, - pthread->data.fd.branch); - _thread_sys_write(fd, s, strlen(s)); - snprintf(s, sizeof(s), "owner %pr/%pw\n", - _thread_fd_table[pthread->data.fd.fd]->r_owner, - _thread_fd_table[pthread->data.fd.fd]->w_owner); - _thread_sys_write(fd, s, strlen(s)); - break; - case PS_SIGWAIT: - snprintf(s, sizeof(s), "sigmask (hi)"); - _thread_sys_write(fd, s, strlen(s)); - for (i = _SIG_WORDS - 1; i >= 0; i--) { - snprintf(s, sizeof(s), "%08x\n", - pthread->sigmask.__bits[i]); - _thread_sys_write(fd, s, strlen(s)); - } - snprintf(s, sizeof(s), "(lo)\n"); - _thread_sys_write(fd, s, strlen(s)); - break; - - /* - * Trap other states that are not explicitly - * coded to dump information: - */ - default: - /* Nothing to do here. */ - break; - } + dump_thread(fd, pthread, /*long_verson*/ 1); } /* Output a header for ready threads: */ strcpy(s, "\n\n=============\nREADY THREADS\n\n"); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report each thread in the ready queue: */ TAILQ_FOREACH (pq_list, &_readyq.pq_queue, pl_link) { TAILQ_FOREACH(pthread, &pq_list->pl_head, pqe) { - /* Find the state: */ - for (j = 0; j < (sizeof(thread_info) / - sizeof(struct s_thread_info)) - 1; j++) - if (thread_info[j].state == pthread->state) - break; - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", - pthread, (pthread->name == NULL) ? - "":pthread->name, pthread->base_priority, - thread_info[j].name, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); + dump_thread(fd, pthread, /*long_version*/ 0); } } /* Output a header for waiting threads: */ strcpy(s, "\n\n=============\nWAITING THREADS\n\n"); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report each thread in the waiting queue: */ TAILQ_FOREACH (pthread, &_waitingq, pqe) { - /* Find the state: */ - for (j = 0; j < (sizeof(thread_info) / - sizeof(struct s_thread_info)) - 1; j++) - if (thread_info[j].state == pthread->state) - break; - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", - pthread, (pthread->name == NULL) ? - "":pthread->name, pthread->base_priority, - thread_info[j].name, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); + dump_thread(fd, pthread, /*long_version*/ 0); } /* Output a header for threads in the work queue: */ strcpy(s, "\n\n=============\nTHREADS IN WORKQ\n\n"); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report each thread in the waiting queue: */ TAILQ_FOREACH (pthread, &_workq, qe) { - /* Find the state: */ - for (j = 0; j < (sizeof(thread_info) / - sizeof(struct s_thread_info)) - 1; j++) - if (thread_info[j].state == pthread->state) - break; - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", - pthread, (pthread->name == NULL) ? - "":pthread->name, pthread->base_priority, - thread_info[j].name, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); + dump_thread(fd, pthread, /*long_version*/ 0); } /* Check if there are no dead threads: */ if (TAILQ_FIRST(&_dead_list) == NULL) { /* Output a record: */ strcpy(s, "\n\nTHERE ARE NO DEAD THREADS\n"); _thread_sys_write(fd, s, strlen(s)); } else { /* Output a header for dead threads: */ strcpy(s, "\n\nDEAD THREADS\n\n"); _thread_sys_write(fd, s, strlen(s)); /* * Enter a loop to report each thread in the global - * dead thread list: + * dead thread list: */ TAILQ_FOREACH(pthread, &_dead_list, dle) { - /* Output a record for the current thread: */ - snprintf(s, sizeof(s), - "Thread %p prio %3d [%s:%d]\n", - pthread, pthread->base_priority, - pthread->fname,pthread->lineno); - _thread_sys_write(fd, s, strlen(s)); + dump_thread(fd, pthread, /*long_version*/ 0); } } /* Output a header for file descriptors: */ - snprintf(s, sizeof(s), "\n\n=============\nFILE DESCRIPTOR TABLE (table size %d)\n\n",_thread_dtablesize); + snprintf(s, sizeof(s), "\n\n=============\nFILE DESCRIPTOR " + "TABLE (table size %d)\n\n", _thread_dtablesize); _thread_sys_write(fd, s, strlen(s)); /* Enter a loop to report file descriptor lock usage: */ for (i = 0; i < _thread_dtablesize; i++) { /* * Check if memory is allocated for this file - * descriptor: + * descriptor: */ if (_thread_fd_table[i] != NULL) { /* Report the file descriptor lock status: */ snprintf(s, sizeof(s), - "fd[%3d] read owner %p count %d [%s:%d]\n write owner %p count %d [%s:%d]\n", - i, - _thread_fd_table[i]->r_owner, - _thread_fd_table[i]->r_lockcount, - _thread_fd_table[i]->r_fname, - _thread_fd_table[i]->r_lineno, - _thread_fd_table[i]->w_owner, - _thread_fd_table[i]->w_lockcount, - _thread_fd_table[i]->w_fname, - _thread_fd_table[i]->w_lineno); - _thread_sys_write(fd, s, strlen(s)); + "fd[%3d] read owner %p count %d [%s:%d]\n" + " write owner %p count %d [%s:%d]\n", + i, _thread_fd_table[i]->r_owner, + _thread_fd_table[i]->r_lockcount, + _thread_fd_table[i]->r_fname, + _thread_fd_table[i]->r_lineno, + _thread_fd_table[i]->w_owner, + _thread_fd_table[i]->w_lockcount, + _thread_fd_table[i]->w_fname, + _thread_fd_table[i]->w_lineno); + _thread_sys_write(fd, s, strlen(s)); } } /* Close the dump file: */ _thread_sys_close(fd); + } +} + +static void +dump_thread(int fd, pthread_t pthread, int long_version) +{ + char s[512]; + int i; + + /* Find the state: */ + for (i = 0; i < NELEMENTS(thread_info) - 1; i++) + if (thread_info[i].state == pthread->state) + break; + + /* Output a record for the thread: */ + snprintf(s, sizeof(s), + "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n", + pthread, (pthread->name == NULL) ? "" : pthread->name, + pthread->active_priority, thread_info[i].name, pthread->fname, + pthread->lineno); + _thread_sys_write(fd, s, strlen(s)); + + if (long_version != 0) { + /* Check if this is the running thread: */ + if (pthread == _thread_run) { + /* Output a record for the running thread: */ + strcpy(s, "This is the running thread\n"); + _thread_sys_write(fd, s, strlen(s)); + } + /* Check if this is the initial thread: */ + if (pthread == _thread_initial) { + /* Output a record for the initial thread: */ + strcpy(s, "This is the initial thread\n"); + _thread_sys_write(fd, s, strlen(s)); + } + /* Process according to thread state: */ + switch (pthread->state) { + /* File descriptor read lock wait: */ + case PS_FDLR_WAIT: + case PS_FDLW_WAIT: + case PS_FDR_WAIT: + case PS_FDW_WAIT: + /* Write the lock details: */ + snprintf(s, sizeof(s), "fd %d[%s:%d]", + pthread->data.fd.fd, + pthread->data.fd.fname, + pthread->data.fd.branch); + _thread_sys_write(fd, s, strlen(s)); + snprintf(s, sizeof(s), "owner %pr/%pw\n", + _thread_fd_table[pthread->data.fd.fd]->r_owner, + _thread_fd_table[pthread->data.fd.fd]->w_owner); + _thread_sys_write(fd, s, strlen(s)); + break; + case PS_SIGWAIT: + snprintf(s, sizeof(s), "sigmask (hi)"); + _thread_sys_write(fd, s, strlen(s)); + for (i = _SIG_WORDS - 1; i >= 0; i--) { + snprintf(s, sizeof(s), "%08x\n", + pthread->sigmask.__bits[i]); + _thread_sys_write(fd, s, strlen(s)); + } + snprintf(s, sizeof(s), "(lo)\n"); + _thread_sys_write(fd, s, strlen(s)); + break; + /* + * Trap other states that are not explicitly + * coded to dump information: + */ + default: + /* Nothing to do here. */ + break; + } } } /* Set the thread name for debug: */ void pthread_set_name_np(pthread_t thread, char *name) { /* Check if the caller has specified a valid thread: */ if (thread != NULL && thread->magic == PTHREAD_MAGIC) { if (thread->name != NULL) { /* Free space for previous name. */ free(thread->name); } thread->name = strdup(name); } } #endif Index: head/lib/libpthread/thread/thr_init.c =================================================================== --- head/lib/libpthread/thread/thr_init.c (revision 68515) +++ head/lib/libpthread/thread/thr_init.c (revision 68516) @@ -1,425 +1,421 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* Allocate space for global thread variables here: */ #define GLOBAL_PTHREAD_PRIVATE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include #include "pthread_private.h" #ifdef GCC_2_8_MADE_THREAD_AWARE typedef void *** (*dynamic_handler_allocator)(); extern void __set_dynamic_handler_allocator(dynamic_handler_allocator); static pthread_key_t except_head_key; typedef struct { void **__dynamic_handler_chain; void *top_elt[2]; } except_struct; static void ***dynamic_allocator_handler_fn() { except_struct *dh = (except_struct *)pthread_getspecific(except_head_key); if(dh == NULL) { dh = (except_struct *)malloc( sizeof(except_struct) ); memset(dh, '\0', sizeof(except_struct)); dh->__dynamic_handler_chain= dh->top_elt; pthread_setspecific(except_head_key, (void *)dh); } return &dh->__dynamic_handler_chain; } #endif /* GCC_2_8_MADE_THREAD_AWARE */ /* * Threaded process initialization */ void _thread_init(void) { int fd; int flags; int i; size_t len; int mib[2]; struct clockinfo clockinfo; struct sigaction act; - struct itimerval itimer; + struct sigaltstack alt; /* Check if this function has already been called: */ if (_thread_initial) /* Only initialise the threaded application once. */ return; /* * Check for the special case of this process running as * or in place of init as pid = 1: */ if (getpid() == 1) { /* * Setup a new session for this process which is * assumed to be running as root. */ if (setsid() == -1) PANIC("Can't set session ID"); if (revoke(_PATH_CONSOLE) != 0) PANIC("Can't revoke console"); if ((fd = _thread_sys_open(_PATH_CONSOLE, O_RDWR)) < 0) PANIC("Can't open console"); if (setlogin("root") == -1) PANIC("Can't set login to root"); if (_thread_sys_ioctl(fd,TIOCSCTTY, (char *) NULL) == -1) PANIC("Can't set controlling terminal"); if (_thread_sys_dup2(fd,0) == -1 || _thread_sys_dup2(fd,1) == -1 || _thread_sys_dup2(fd,2) == -1) PANIC("Can't dup2"); } /* Get the standard I/O flags before messing with them : */ for (i = 0; i < 3; i++) if (((_pthread_stdio_flags[i] = _thread_sys_fcntl(i,F_GETFL, NULL)) == -1) && (errno != EBADF)) PANIC("Cannot get stdio flags"); /* * Create a pipe that is written to by the signal handler to prevent - * signals being missed in calls to _select: + * signals being missed in calls to _select: */ if (_thread_sys_pipe(_thread_kern_pipe) != 0) { /* Cannot create pipe, so abort: */ PANIC("Cannot create kernel pipe"); } /* Get the flags for the read pipe: */ else if ((flags = _thread_sys_fcntl(_thread_kern_pipe[0], F_GETFL, NULL)) == -1) { /* Abort this application: */ PANIC("Cannot get kernel read pipe flags"); } /* Make the read pipe non-blocking: */ else if (_thread_sys_fcntl(_thread_kern_pipe[0], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ PANIC("Cannot make kernel read pipe non-blocking"); } /* Get the flags for the write pipe: */ else if ((flags = _thread_sys_fcntl(_thread_kern_pipe[1], F_GETFL, NULL)) == -1) { /* Abort this application: */ PANIC("Cannot get kernel write pipe flags"); } /* Make the write pipe non-blocking: */ else if (_thread_sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ PANIC("Cannot get kernel write pipe flags"); } /* Allocate and initialize the ready queue: */ else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) != 0) { /* Abort this application: */ PANIC("Cannot allocate priority ready queue."); } /* Allocate memory for the thread structure of the initial thread: */ else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) { /* * Insufficient memory to initialise this application, so - * abort: + * abort: */ PANIC("Cannot allocate memory for initial thread"); } /* Allocate memory for the scheduler stack: */ - else if ((_thread_kern_sched_stack = malloc(PAGE_SIZE * 10)) == NULL) + else if ((_thread_kern_sched_stack = malloc(SCHED_STACK_SIZE)) == NULL) PANIC("Failed to allocate stack for scheduler"); else { /* Zero the global kernel thread structure: */ memset(&_thread_kern_thread, 0, sizeof(struct pthread)); _thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE; memset(_thread_initial, 0, sizeof(struct pthread)); /* Initialize the waiting and work queues: */ TAILQ_INIT(&_waitingq); TAILQ_INIT(&_workq); /* Initialize the scheduling switch hook routine: */ _sched_switch_hook = NULL; /* Give this thread default attributes: */ memcpy((void *) &_thread_initial->attr, &pthread_attr_default, sizeof(struct pthread_attr)); /* Initialize the thread stack cache: */ SLIST_INIT(&_stackq); /* * Create a red zone below the main stack. All other stacks are * constrained to a maximum size by the paramters passed to * mmap(), but this stack is only limited by resource limits, so * this stack needs an explicitly mapped red zone to protect the * thread stack that is just beyond. */ if (mmap((void *) USRSTACK - PTHREAD_STACK_INITIAL - PTHREAD_STACK_GUARD, PTHREAD_STACK_GUARD, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* Set the main thread stack pointer. */ _thread_initial->stack = (void *) USRSTACK - PTHREAD_STACK_INITIAL; /* Set the stack attributes: */ _thread_initial->attr.stackaddr_attr = _thread_initial->stack; _thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL; /* Setup the context for the scheduler: */ _setjmp(_thread_kern_sched_jb); - SET_STACK_JB(_thread_kern_sched_jb, - _thread_kern_sched_stack + PAGE_SIZE*10 - sizeof(double)); + SET_STACK_JB(_thread_kern_sched_jb, _thread_kern_sched_stack + + SCHED_STACK_SIZE - sizeof(double)); SET_RETURN_ADDR_JB(_thread_kern_sched_jb, _thread_kern_scheduler); /* * Write a magic value to the thread structure * to help identify valid ones: */ _thread_initial->magic = PTHREAD_MAGIC; /* Set the initial cancel state */ _thread_initial->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; /* Default the priority of the initial thread: */ _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY; _thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY; _thread_initial->inherited_priority = 0; /* Initialise the state of the initial thread: */ _thread_initial->state = PS_RUNNING; /* Initialise the queue: */ TAILQ_INIT(&(_thread_initial->join_queue)); /* Initialize the owned mutex queue and count: */ TAILQ_INIT(&(_thread_initial->mutexq)); _thread_initial->priority_mutex_count = 0; /* Initialize the global scheduling time: */ _sched_ticks = 0; gettimeofday((struct timeval *) &_sched_tod, NULL); /* Initialize last active: */ _thread_initial->last_active = (long) _sched_ticks; - /* Initialize the initial signal frame: */ - _thread_initial->sigframes[0] = &_thread_initial->sigframe0; - _thread_initial->curframe = &_thread_initial->sigframe0; - _thread_initial->curframe->ctxtype = CTX_JB_NOSIG; - /* Set the base of the stack: */ - _thread_initial->curframe->stackp = (unsigned long) USRSTACK; + /* Initialize the initial context: */ + _thread_initial->curframe = NULL; + _thread_initial->ctxtype = CTX_JB_NOSIG; /* Initialise the rest of the fields: */ _thread_initial->poll_data.nfds = 0; _thread_initial->poll_data.fds = NULL; _thread_initial->sig_defer_count = 0; _thread_initial->yield_on_sig_undefer = 0; _thread_initial->specific_data = NULL; _thread_initial->cleanup = NULL; _thread_initial->flags = 0; _thread_initial->error = 0; TAILQ_INIT(&_thread_list); TAILQ_INSERT_HEAD(&_thread_list, _thread_initial, tle); _thread_run = _thread_initial; /* Initialise the global signal action structure: */ sigfillset(&act.sa_mask); act.sa_handler = (void (*) ()) _thread_sig_handler; - act.sa_flags = SA_SIGINFO; + act.sa_flags = SA_SIGINFO | SA_ONSTACK; /* Clear pending signals for the process: */ sigemptyset(&_process_sigpending); /* Clear the signal queue: */ memset(_thread_sigq, 0, sizeof(_thread_sigq)); + /* Create and install an alternate signal stack: */ + alt.ss_sp = malloc(SIGSTKSZ); /* recommended stack size */ + alt.ss_size = SIGSTKSZ; + alt.ss_flags = 0; + if (_thread_sys_sigaltstack(&alt, NULL) != 0) + PANIC("Unable to install alternate signal stack"); + /* Enter a loop to get the existing signal status: */ for (i = 1; i < NSIG; i++) { /* Check for signals which cannot be trapped: */ if (i == SIGKILL || i == SIGSTOP) { } /* Get the signal handler details: */ else if (_thread_sys_sigaction(i, NULL, &_thread_sigact[i - 1]) != 0) { /* * Abort this process if signal - * initialisation fails: + * initialisation fails: */ PANIC("Cannot read signal handler info"); } /* Initialize the SIG_DFL dummy handler count. */ _thread_dfl_count[i] = 0; } /* * Install the signal handler for the most important * signals that the user-thread kernel needs. Actually * SIGINFO isn't really needed, but it is nice to have. */ if (_thread_sys_sigaction(_SCHED_SIGNAL, &act, NULL) != 0 || _thread_sys_sigaction(SIGINFO, &act, NULL) != 0 || _thread_sys_sigaction(SIGCHLD, &act, NULL) != 0) { /* - * Abort this process if signal initialisation fails: + * Abort this process if signal initialisation fails: */ PANIC("Cannot initialise signal handler"); } _thread_sigact[_SCHED_SIGNAL - 1].sa_flags = SA_SIGINFO; _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO; _thread_sigact[SIGCHLD - 1].sa_flags = SA_SIGINFO; /* Get the process signal mask: */ _thread_sys_sigprocmask(SIG_SETMASK, NULL, &_process_sigmask); /* Get the kernel clockrate: */ mib[0] = CTL_KERN; mib[1] = KERN_CLOCKRATE; len = sizeof (struct clockinfo); if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0) _clock_res_usec = clockinfo.tick; /* Get the table size: */ if ((_thread_dtablesize = getdtablesize()) < 0) { /* * Cannot get the system defined table size, so abort - * this process. + * this process. */ PANIC("Cannot get dtablesize"); } /* Allocate memory for the file descriptor table: */ if ((_thread_fd_table = (struct fd_table_entry **) malloc(sizeof(struct fd_table_entry *) * _thread_dtablesize)) == NULL) { /* Avoid accesses to file descriptor table on exit: */ _thread_dtablesize = 0; /* * Cannot allocate memory for the file descriptor - * table, so abort this process. + * table, so abort this process. */ PANIC("Cannot allocate memory for file descriptor table"); } /* Allocate memory for the pollfd table: */ if ((_thread_pfd_table = (struct pollfd *) malloc(sizeof(struct pollfd) * _thread_dtablesize)) == NULL) { /* * Cannot allocate memory for the file descriptor - * table, so abort this process. + * table, so abort this process. */ PANIC("Cannot allocate memory for pollfd table"); } else { /* * Enter a loop to initialise the file descriptor - * table: + * table: */ for (i = 0; i < _thread_dtablesize; i++) { /* Initialise the file descriptor table: */ _thread_fd_table[i] = NULL; } /* Initialize stdio file descriptor table entries: */ for (i = 0; i < 3; i++) { if ((_thread_fd_table_init(i) != 0) && (errno != EBADF)) PANIC("Cannot initialize stdio file " "descriptor table entry"); } - - /* Install the scheduling timer: */ - itimer.it_interval.tv_sec = 0; - itimer.it_interval.tv_usec = _clock_res_usec; - itimer.it_value = itimer.it_interval; - if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0) - PANIC("Cannot set interval timer"); - } } #ifdef GCC_2_8_MADE_THREAD_AWARE /* Create the thread-specific data for the exception linked list. */ if(pthread_key_create(&except_head_key, NULL) != 0) PANIC("Failed to create thread specific execption head"); /* Setup the gcc exception handler per thread. */ __set_dynamic_handler_allocator( dynamic_allocator_handler_fn ); #endif /* GCC_2_8_MADE_THREAD_AWARE */ /* Initialise the garbage collector mutex and condition variable. */ if (pthread_mutex_init(&_gc_mutex,NULL) != 0 || pthread_cond_init(&_gc_cond,NULL) != 0) PANIC("Failed to initialise garbage collector mutex or condvar"); } /* - * Special start up code for NetBSD/Alpha + * Special start up code for NetBSD/Alpha */ #if defined(__NetBSD__) && defined(__alpha__) -int +int main(int argc, char *argv[], char *env); int _thread_main(int argc, char *argv[], char *env) { _thread_init(); return (main(argc, argv, env)); } #endif #else /* * A stub for non-threaded programs. */ void _thread_init(void) { } #endif Index: head/lib/libpthread/thread/thr_join.c =================================================================== --- head/lib/libpthread/thread/thr_join.c (revision 68515) +++ head/lib/libpthread/thread/thr_join.c (revision 68516) @@ -1,138 +1,166 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" int pthread_join(pthread_t pthread, void **thread_return) { int ret = 0; _thread_enter_cancellation_point(); /* Check if the caller has specified an invalid thread: */ if (pthread == NULL || pthread->magic != PTHREAD_MAGIC) { /* Invalid thread: */ _thread_leave_cancellation_point(); return(EINVAL); } /* Check if the caller has specified itself: */ if (pthread == _thread_run) { /* Avoid a deadlock condition: */ _thread_leave_cancellation_point(); return(EDEADLK); } /* * Find the thread in the list of active threads or in the * list of dead threads: */ if ((_find_thread(pthread) != 0) && (_find_dead_thread(pthread) != 0)) /* Return an error: */ ret = ESRCH; /* Check if this thread has been detached: */ else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) /* Return an error: */ ret = ESRCH; /* Check if the thread is not dead: */ else if (pthread->state != PS_DEAD) { PTHREAD_ASSERT_NOT_IN_SYNCQ(_thread_run); - /* Clear the interrupted flag: */ - _thread_run->interrupted = 0; - /* - * Protect against being context switched out while - * adding this thread to the join queue. + * Enter a loop in case this thread is woken prematurely + * in order to invoke a signal handler: */ - _thread_kern_sig_defer(); + for (;;) { + /* Clear the interrupted flag: */ + _thread_run->interrupted = 0; - /* Add the running thread to the join queue: */ - TAILQ_INSERT_TAIL(&(pthread->join_queue), _thread_run, sqe); - _thread_run->flags |= PTHREAD_FLAGS_IN_JOINQ; - _thread_run->data.thread = pthread; + /* + * Protect against being context switched out while + * adding this thread to the join queue. + */ + _thread_kern_sig_defer(); - /* Schedule the next thread: */ - _thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__); + /* Add the running thread to the join queue: */ + TAILQ_INSERT_TAIL(&(pthread->join_queue), + _thread_run, sqe); + _thread_run->flags |= PTHREAD_FLAGS_IN_JOINQ; + _thread_run->data.thread = pthread; - if (_thread_run->interrupted != 0) { - TAILQ_REMOVE(&(pthread->join_queue), _thread_run, sqe); - _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ; - } - _thread_run->data.thread = NULL; + /* Schedule the next thread: */ + _thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__); - _thread_kern_sig_undefer(); + if ((_thread_run->flags & PTHREAD_FLAGS_IN_JOINQ) != 0) { + TAILQ_REMOVE(&(pthread->join_queue), + _thread_run, sqe); + _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ; + } + _thread_run->data.thread = NULL; - if (_thread_run->interrupted != 0 && - _thread_run->continuation != NULL) - _thread_run->continuation(_thread_run); + _thread_kern_sig_undefer(); - /* Check if the thread is not detached: */ - if ((pthread->attr.flags & PTHREAD_DETACHED) == 0) { - /* Check if the return value is required: */ - if (thread_return) - /* Return the thread's return value: */ - *thread_return = pthread->ret; - } - else - /* Return an error: */ - ret = ESRCH; + if (_thread_run->interrupted != 0) { + if (_thread_run->continuation != NULL) + _thread_run->continuation(_thread_run); + /* + * This thread was interrupted, probably to + * invoke a signal handler. Make sure the + * target thread is still joinable. + */ + if (((_find_thread(pthread) != 0) && + (_find_dead_thread(pthread) != 0)) || + ((pthread->attr.flags & + PTHREAD_DETACHED) != 0)) { + /* Return an error: */ + ret = ESRCH; + /* We're done; break out of the loop. */ + break; + } + else if (pthread->state == PS_DEAD) { + /* We're done; break out of the loop. */ + break; + } + } else { + /* + * The thread return value and error are set + * by the thread we're joining to when it + * exits or detaches: + */ + ret = _thread_run->error; + if ((ret == 0) && (thread_return != NULL)) + *thread_return = _thread_run->ret; + + /* We're done; break out of the loop. */ + break; + } + } /* Check if the return value is required: */ } else if (thread_return != NULL) /* Return the thread's return value: */ *thread_return = pthread->ret; _thread_leave_cancellation_point(); /* Return the completion status: */ return (ret); } void _join_backout(pthread_t pthread) { _thread_kern_sig_defer(); - if (pthread->state == PS_JOIN) { + if ((pthread->flags & PTHREAD_FLAGS_IN_JOINQ) != 0) { TAILQ_REMOVE(&pthread->data.thread->join_queue, pthread, sqe); _thread_run->flags &= ~PTHREAD_FLAGS_IN_JOINQ; } _thread_kern_sig_undefer(); } #endif Index: head/lib/libpthread/thread/thr_kern.c =================================================================== --- head/lib/libpthread/thread/thr_kern.c (revision 68515) +++ head/lib/libpthread/thread/thr_kern.c (revision 68516) @@ -1,1108 +1,1111 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" /* #define DEBUG_THREAD_KERN */ #ifdef DEBUG_THREAD_KERN #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif /* Static function prototype definitions: */ -static void +static void thread_kern_poll(int wait_reqd); static void dequeue_signals(void); static inline void thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in); /* Static variables: */ static int last_tick = 0; /* * This is called when a signal handler finishes and wants to * return to a previous frame. */ void -_thread_kern_sched_frame(int frame) +_thread_kern_sched_frame(struct pthread_signal_frame *psf) { /* * Flag the pthread kernel as executing scheduler code * to avoid a signal from interrupting this execution and * corrupting the (soon-to-be) current frame. */ _thread_kern_in_sched = 1; - /* Return to the specified frame: */ - _thread_run->curframe = _thread_run->sigframes[frame]; - _thread_run->sigframe_count = frame; + /* Restore the signal frame: */ + _thread_sigframe_restore(_thread_run, psf); - if (_thread_run->sigframe_count == 0) - /* Restore the threads priority: */ - _thread_run->active_priority &= ~PTHREAD_SIGNAL_PRIORITY; - /* Switch to the thread scheduler: */ ___longjmp(_thread_kern_sched_jb, 1); } void _thread_kern_sched(ucontext_t *scp) { /* * Flag the pthread kernel as executing scheduler code * to avoid a scheduler signal from interrupting this * execution and calling the scheduler again. */ _thread_kern_in_sched = 1; /* Check if this function was called from the signal handler: */ if (scp != NULL) { /* * The signal handler should have saved the state of * the current thread. Restore the process signal * mask. */ if (_thread_sys_sigprocmask(SIG_SETMASK, &_process_sigmask, NULL) != 0) PANIC("Unable to restore process mask after signal"); /* * We're running on the signal stack; just call the * kernel scheduler directly. */ DBG_MSG("Entering scheduler due to signal\n"); _thread_kern_scheduler(); } else { /* Save the state of the current thread: */ - if (_setjmp(_thread_run->curframe->ctx.jb) == 0) { + if (_setjmp(_thread_run->ctx.jb) == 0) { /* Flag the jump buffer was the last state saved: */ - _thread_run->curframe->ctxtype = CTX_JB_NOSIG; - _thread_run->curframe->longjmp_val = 1; + _thread_run->ctxtype = CTX_JB_NOSIG; + _thread_run->longjmp_val = 1; } else { DBG_MSG("Returned from ___longjmp, thread %p\n", _thread_run); /* * This point is reached when a longjmp() is called - * to restore the state of a thread. + * to restore the state of a thread. * * This is the normal way out of the scheduler. */ _thread_kern_in_sched = 0; if (_thread_run->sig_defer_count == 0) { if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) && ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) - /* + /* * Cancellations override signals. * * Stick a cancellation point at the * start of each async-cancellable * thread's resumption. * * We allow threads woken at cancel * points to do their own checks. */ pthread_testcancel(); } if (_sched_switch_hook != NULL) { /* Run the installed switch hook: */ thread_run_switch_hook(_last_user_thread, _thread_run); } return; } /* Switch to the thread scheduler: */ ___longjmp(_thread_kern_sched_jb, 1); } } void _thread_kern_sched_sig(void) { _thread_run->check_pending = 1; _thread_kern_sched(NULL); } void _thread_kern_scheduler(void) { - struct pthread_signal_frame *psf; struct timespec ts; struct timeval tv; pthread_t pthread, pthread_h; unsigned int current_tick; int add_to_prioq; /* If the currently running thread is a user thread, save it: */ if ((_thread_run->flags & PTHREAD_FLAGS_PRIVATE) == 0) _last_user_thread = _thread_run; /* Are there pending signals for this thread? */ if (_thread_run->check_pending != 0) { _thread_run->check_pending = 0; _thread_sig_check_pending(_thread_run); } /* * Enter a scheduling loop that finds the next thread that is * ready to run. This loop completes when there are no more threads * in the global list or when a thread has its state restored by * either a sigreturn (if the state was saved as a sigcontext) or a - * longjmp (if the state was saved by a setjmp). + * longjmp (if the state was saved by a setjmp). */ while (!(TAILQ_EMPTY(&_thread_list))) { /* Get the current time of day: */ GET_CURRENT_TOD(tv); TIMEVAL_TO_TIMESPEC(&tv, &ts); current_tick = _sched_ticks; /* * Protect the scheduling queues from access by the signal * handler. */ _queue_signals = 1; add_to_prioq = 0; if (_thread_run != &_thread_kern_thread) { /* * This thread no longer needs to yield the CPU. */ _thread_run->yield_on_sig_undefer = 0; if (_thread_run->state != PS_RUNNING) { /* * Save the current time as the time that the - * thread became inactive: + * thread became inactive: */ _thread_run->last_inactive = (long)current_tick; if (_thread_run->last_inactive < _thread_run->last_active) { /* Account for a rollover: */ _thread_run->last_inactive =+ UINT_MAX + 1; } } /* * Place the currently running thread into the * appropriate queue(s). */ switch (_thread_run->state) { case PS_DEAD: case PS_STATE_MAX: /* to silence -Wall */ case PS_SUSPENDED: /* * Dead and suspended threads are not placed * in any queue: */ break; case PS_RUNNING: /* * Runnable threads can't be placed in the * priority queue until after waiting threads * are polled (to preserve round-robin * scheduling). */ add_to_prioq = 1; break; /* * States which do not depend on file descriptor I/O - * operations or timeouts: + * operations or timeouts: */ case PS_DEADLOCK: case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: case PS_JOIN: case PS_MUTEX_WAIT: case PS_SIGSUSPEND: case PS_SIGTHREAD: case PS_SIGWAIT: case PS_WAIT_WAIT: /* No timeouts for these states: */ _thread_run->wakeup_time.tv_sec = -1; _thread_run->wakeup_time.tv_nsec = -1; /* Restart the time slice: */ _thread_run->slice_usec = -1; /* Insert into the waiting queue: */ PTHREAD_WAITQ_INSERT(_thread_run); break; /* States which can timeout: */ case PS_COND_WAIT: case PS_SLEEP_WAIT: /* Restart the time slice: */ _thread_run->slice_usec = -1; /* Insert into the waiting queue: */ PTHREAD_WAITQ_INSERT(_thread_run); break; /* States that require periodic work: */ case PS_SPINBLOCK: /* No timeouts for this state: */ _thread_run->wakeup_time.tv_sec = -1; _thread_run->wakeup_time.tv_nsec = -1; /* Increment spinblock count: */ _spinblock_count++; /* FALLTHROUGH */ case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* Restart the time slice: */ _thread_run->slice_usec = -1; /* Insert into the waiting queue: */ PTHREAD_WAITQ_INSERT(_thread_run); /* Insert into the work queue: */ PTHREAD_WORKQ_INSERT(_thread_run); break; } } /* + * Avoid polling file descriptors if there are none + * waiting: + */ + if (TAILQ_EMPTY(&_workq) == 0) { + } + /* * Poll file descriptors only if a new scheduling signal * has occurred or if we have no more runnable threads. */ - if (((current_tick = _sched_ticks) != last_tick) || + else if (((current_tick = _sched_ticks) != last_tick) || ((_thread_run->state != PS_RUNNING) && (PTHREAD_PRIOQ_FIRST() == NULL))) { /* Unprotect the scheduling queues: */ _queue_signals = 0; /* * Poll file descriptors to update the state of threads - * waiting on file I/O where data may be available: + * waiting on file I/O where data may be available: */ thread_kern_poll(0); /* Protect the scheduling queues: */ _queue_signals = 1; } last_tick = current_tick; /* * Wake up threads that have timedout. This has to be * done after polling in case a thread does a poll or * select with zero time. */ PTHREAD_WAITQ_SETACTIVE(); while (((pthread = TAILQ_FIRST(&_waitingq)) != NULL) && (pthread->wakeup_time.tv_sec != -1) && (((pthread->wakeup_time.tv_sec == 0) && (pthread->wakeup_time.tv_nsec == 0)) || (pthread->wakeup_time.tv_sec < ts.tv_sec) || ((pthread->wakeup_time.tv_sec == ts.tv_sec) && (pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) { switch (pthread->state) { case PS_POLL_WAIT: case PS_SELECT_WAIT: /* Return zero file descriptors ready: */ pthread->data.poll_data->nfds = 0; /* fall through */ default: /* * Remove this thread from the waiting queue * (and work queue if necessary) and place it * in the ready queue. */ PTHREAD_WAITQ_CLEARACTIVE(); if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ) PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread, PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); break; } /* * Flag the timeout in the thread structure: */ pthread->timeout = 1; } PTHREAD_WAITQ_CLEARACTIVE(); /* * Check to see if the current thread needs to be added * to the priority queue: */ if (add_to_prioq != 0) { /* * Save the current time as the time that the - * thread became inactive: + * thread became inactive: */ current_tick = _sched_ticks; _thread_run->last_inactive = (long)current_tick; if (_thread_run->last_inactive < _thread_run->last_active) { /* Account for a rollover: */ _thread_run->last_inactive =+ UINT_MAX + 1; } if ((_thread_run->slice_usec != -1) && (_thread_run->attr.sched_policy != SCHED_FIFO)) { /* * Accumulate the number of microseconds for * which the current thread has run: */ _thread_run->slice_usec += (_thread_run->last_inactive - _thread_run->last_active) * (long)_clock_res_usec; /* Check for time quantum exceeded: */ if (_thread_run->slice_usec > TIMESLICE_USEC) _thread_run->slice_usec = -1; } if (_thread_run->slice_usec == -1) { /* * The thread exceeded its time * quantum or it yielded the CPU; * place it at the tail of the * queue for its priority. */ PTHREAD_PRIOQ_INSERT_TAIL(_thread_run); } else { /* * The thread hasn't exceeded its * interval. Place it at the head * of the queue for its priority. */ PTHREAD_PRIOQ_INSERT_HEAD(_thread_run); } } /* * Get the highest priority thread in the ready queue. */ pthread_h = PTHREAD_PRIOQ_FIRST(); /* Check if there are no threads ready to run: */ if (pthread_h == NULL) { /* * Lock the pthread kernel by changing the pointer to * the running thread to point to the global kernel - * thread structure: + * thread structure: */ _thread_run = &_thread_kern_thread; DBG_MSG("No runnable threads, using kernel thread %p\n", _thread_run); /* Unprotect the scheduling queues: */ _queue_signals = 0; /* * There are no threads ready to run, so wait until - * something happens that changes this condition: + * something happens that changes this condition: */ thread_kern_poll(1); /* * This process' usage will likely be very small * while waiting in a poll. Since the scheduling * clock is based on the profiling timer, it is * unlikely that the profiling timer will fire * and update the time of day. To account for this, * get the time of day after polling with a timeout. */ gettimeofday((struct timeval *) &_sched_tod, NULL); /* Check once more for a runnable thread: */ _queue_signals = 1; pthread_h = PTHREAD_PRIOQ_FIRST(); _queue_signals = 0; } if (pthread_h != NULL) { /* Remove the thread from the ready queue: */ PTHREAD_PRIOQ_REMOVE(pthread_h); /* Unprotect the scheduling queues: */ _queue_signals = 0; /* * Check for signals queued while the scheduling * queues were protected: */ while (_sigq_check_reqd != 0) { /* Clear before handling queued signals: */ _sigq_check_reqd = 0; /* Protect the scheduling queues again: */ _queue_signals = 1; dequeue_signals(); /* * Check for a higher priority thread that * became runnable due to signal handling. */ if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) && (pthread->active_priority > pthread_h->active_priority)) { /* Remove the thread from the ready queue: */ PTHREAD_PRIOQ_REMOVE(pthread); /* * Insert the lower priority thread * at the head of its priority list: */ PTHREAD_PRIOQ_INSERT_HEAD(pthread_h); /* There's a new thread in town: */ pthread_h = pthread; } /* Unprotect the scheduling queues: */ _queue_signals = 0; } /* Make the selected thread the current thread: */ _thread_run = pthread_h; /* * Save the current time as the time that the thread - * became active: + * became active: */ current_tick = _sched_ticks; _thread_run->last_active = (long) current_tick; /* * Check if this thread is running for the first time * or running again after using its full time slice - * allocation: + * allocation: */ if (_thread_run->slice_usec == -1) { /* Reset the accumulated time slice period: */ _thread_run->slice_usec = 0; } /* * If we had a context switch, run any * installed switch hooks. */ if ((_sched_switch_hook != NULL) && (_last_user_thread != _thread_run)) { thread_run_switch_hook(_last_user_thread, _thread_run); } /* * Continue the thread at its current frame: */ - psf = _thread_run->curframe; - switch(psf->ctxtype) { + switch(_thread_run->ctxtype) { case CTX_JB_NOSIG: - ___longjmp(psf->ctx.jb, psf->longjmp_val); + ___longjmp(_thread_run->ctx.jb, + _thread_run->longjmp_val); break; case CTX_JB: - __longjmp(psf->ctx.jb, psf->longjmp_val); + __longjmp(_thread_run->ctx.jb, + _thread_run->longjmp_val); break; case CTX_SJB: - __siglongjmp(psf->ctx.sigjb, psf->longjmp_val); + __siglongjmp(_thread_run->ctx.sigjb, + _thread_run->longjmp_val); break; case CTX_UC: /* XXX - Restore FP regsisters? */ - FP_RESTORE_UC(&psf->ctx.uc); + FP_RESTORE_UC(&_thread_run->ctx.uc); /* * Do a sigreturn to restart the thread that - * was interrupted by a signal: + * was interrupted by a signal: */ _thread_kern_in_sched = 0; #if NOT_YET - _setcontext(&psf->ctx.uc); + _setcontext(&_thread_run->ctx.uc); #else /* * Ensure the process signal mask is set * correctly: */ - psf->ctx.uc.uc_sigmask = _process_sigmask; - _thread_sys_sigreturn(&psf->ctx.uc); + _thread_run->ctx.uc.uc_sigmask = + _process_sigmask; + _thread_sys_sigreturn(&_thread_run->ctx.uc); #endif break; } /* This point should not be reached. */ PANIC("Thread has returned from sigreturn or longjmp"); } } /* There are no more threads, so exit this process: */ exit(0); } void _thread_kern_sched_state(enum pthread_state state, char *fname, int lineno) { /* * Flag the pthread kernel as executing scheduler code * to avoid a scheduler signal from interrupting this * execution and calling the scheduler again. */ _thread_kern_in_sched = 1; /* * Prevent the signal handler from fiddling with this thread * before its state is set and is placed into the proper queue. */ _queue_signals = 1; /* Change the state of the current thread: */ _thread_run->state = state; _thread_run->fname = fname; _thread_run->lineno = lineno; /* Schedule the next thread that is ready: */ _thread_kern_sched(NULL); } void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno) { /* * Flag the pthread kernel as executing scheduler code * to avoid a scheduler signal from interrupting this * execution and calling the scheduler again. */ _thread_kern_in_sched = 1; /* * Prevent the signal handler from fiddling with this thread * before its state is set and it is placed into the proper * queue(s). */ _queue_signals = 1; /* Change the state of the current thread: */ _thread_run->state = state; _thread_run->fname = fname; _thread_run->lineno = lineno; _SPINUNLOCK(lock); /* Schedule the next thread that is ready: */ _thread_kern_sched(NULL); } static void thread_kern_poll(int wait_reqd) { int count = 0; int i, found; int kern_pipe_added = 0; int nfds = 0; int timeout_ms = 0; struct pthread *pthread; struct timespec ts; struct timeval tv; /* Check if the caller wants to wait: */ if (wait_reqd == 0) { timeout_ms = 0; } else { /* Get the current time of day: */ GET_CURRENT_TOD(tv); TIMEVAL_TO_TIMESPEC(&tv, &ts); _queue_signals = 1; pthread = TAILQ_FIRST(&_waitingq); _queue_signals = 0; if ((pthread == NULL) || (pthread->wakeup_time.tv_sec == -1)) { /* * Either there are no threads in the waiting queue, * or there are no threads that can timeout. */ timeout_ms = INFTIM; } else { /* * Calculate the time left for the next thread to * timeout: */ timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) * 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec) / 1000000); /* * Don't allow negative timeouts: */ if (timeout_ms < 0) timeout_ms = 0; } } /* Protect the scheduling queues: */ _queue_signals = 1; /* * Check to see if the signal queue needs to be walked to look * for threads awoken by a signal while in the scheduler. */ if (_sigq_check_reqd != 0) { /* Reset flag before handling queued signals: */ _sigq_check_reqd = 0; dequeue_signals(); } /* * Check for a thread that became runnable due to a signal: */ if (PTHREAD_PRIOQ_FIRST() != NULL) { /* * Since there is at least one runnable thread, * disable the wait. */ timeout_ms = 0; } /* * Form the poll table: */ nfds = 0; if (timeout_ms != 0) { /* Add the kernel pipe to the poll table: */ _thread_pfd_table[nfds].fd = _thread_kern_pipe[0]; _thread_pfd_table[nfds].events = POLLRDNORM; _thread_pfd_table[nfds].revents = 0; nfds++; kern_pipe_added = 1; } PTHREAD_WAITQ_SETACTIVE(); TAILQ_FOREACH(pthread, &_workq, qe) { switch (pthread->state) { case PS_SPINBLOCK: /* * If the lock is available, let the thread run. */ if (pthread->data.spinlock->access_lock == 0) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); /* One less thread in a spinblock state: */ _spinblock_count--; /* * Since there is at least one runnable * thread, disable the wait. */ timeout_ms = 0; } break; /* File descriptor read wait: */ case PS_FDR_WAIT: /* Limit number of polled files to table size: */ if (nfds < _thread_dtablesize) { _thread_pfd_table[nfds].events = POLLRDNORM; _thread_pfd_table[nfds].fd = pthread->data.fd.fd; nfds++; } break; /* File descriptor write wait: */ case PS_FDW_WAIT: /* Limit number of polled files to table size: */ if (nfds < _thread_dtablesize) { _thread_pfd_table[nfds].events = POLLWRNORM; _thread_pfd_table[nfds].fd = pthread->data.fd.fd; nfds++; } break; /* File descriptor poll or select wait: */ case PS_POLL_WAIT: case PS_SELECT_WAIT: /* Limit number of polled files to table size: */ if (pthread->data.poll_data->nfds + nfds < _thread_dtablesize) { for (i = 0; i < pthread->data.poll_data->nfds; i++) { _thread_pfd_table[nfds + i].fd = pthread->data.poll_data->fds[i].fd; _thread_pfd_table[nfds + i].events = pthread->data.poll_data->fds[i].events; } nfds += pthread->data.poll_data->nfds; } break; /* Other states do not depend on file I/O. */ default: break; } } PTHREAD_WAITQ_CLEARACTIVE(); /* * Wait for a file descriptor to be ready for read, write, or - * an exception, or a timeout to occur: + * an exception, or a timeout to occur: */ count = _thread_sys_poll(_thread_pfd_table, nfds, timeout_ms); if (kern_pipe_added != 0) /* * Remove the pthread kernel pipe file descriptor - * from the pollfd table: + * from the pollfd table: */ nfds = 1; else nfds = 0; /* * Check if it is possible that there are bytes in the kernel * read pipe waiting to be read: */ if (count < 0 || ((kern_pipe_added != 0) && (_thread_pfd_table[0].revents & POLLRDNORM))) { /* * If the kernel read pipe was included in the - * count: + * count: */ if (count > 0) { /* Decrement the count of file descriptors: */ count--; } if (_sigq_check_reqd != 0) { /* Reset flag before handling signals: */ _sigq_check_reqd = 0; dequeue_signals(); } } /* * Check if any file descriptors are ready: */ if (count > 0) { /* * Enter a loop to look for threads waiting on file * descriptors that are flagged as available by the - * _poll syscall: + * _poll syscall: */ PTHREAD_WAITQ_SETACTIVE(); TAILQ_FOREACH(pthread, &_workq, qe) { switch (pthread->state) { case PS_SPINBLOCK: /* * If the lock is available, let the thread run. */ if (pthread->data.spinlock->access_lock == 0) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); /* * One less thread in a spinblock state: */ _spinblock_count--; } break; /* File descriptor read wait: */ case PS_FDR_WAIT: if ((nfds < _thread_dtablesize) && (_thread_pfd_table[nfds].revents & POLLRDNORM)) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); } nfds++; break; /* File descriptor write wait: */ case PS_FDW_WAIT: if ((nfds < _thread_dtablesize) && (_thread_pfd_table[nfds].revents & POLLWRNORM)) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); } nfds++; break; /* File descriptor poll or select wait: */ case PS_POLL_WAIT: case PS_SELECT_WAIT: if (pthread->data.poll_data->nfds + nfds < _thread_dtablesize) { /* * Enter a loop looking for I/O * readiness: */ found = 0; for (i = 0; i < pthread->data.poll_data->nfds; i++) { if (_thread_pfd_table[nfds + i].revents != 0) { pthread->data.poll_data->fds[i].revents = _thread_pfd_table[nfds + i].revents; found++; } } /* Increment before destroying: */ nfds += pthread->data.poll_data->nfds; if (found != 0) { pthread->data.poll_data->nfds = found; PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); } } else nfds += pthread->data.poll_data->nfds; break; /* Other states do not depend on file I/O. */ default: break; } } PTHREAD_WAITQ_CLEARACTIVE(); } else if (_spinblock_count != 0) { /* * Enter a loop to look for threads waiting on a spinlock * that is now available. */ PTHREAD_WAITQ_SETACTIVE(); TAILQ_FOREACH(pthread, &_workq, qe) { if (pthread->state == PS_SPINBLOCK) { /* * If the lock is available, let the thread run. */ if (pthread->data.spinlock->access_lock == 0) { PTHREAD_WAITQ_CLEARACTIVE(); PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread,PS_RUNNING); PTHREAD_WAITQ_SETACTIVE(); /* * One less thread in a spinblock state: */ _spinblock_count--; } } } PTHREAD_WAITQ_CLEARACTIVE(); } /* Unprotect the scheduling queues: */ _queue_signals = 0; while (_sigq_check_reqd != 0) { /* Handle queued signals: */ _sigq_check_reqd = 0; /* Protect the scheduling queues: */ _queue_signals = 1; dequeue_signals(); /* Unprotect the scheduling queues: */ _queue_signals = 0; } } void _thread_kern_set_timeout(const struct timespec * timeout) { struct timespec current_time; struct timeval tv; /* Reset the timeout flag for the running thread: */ _thread_run->timeout = 0; /* Check if the thread is to wait forever: */ if (timeout == NULL) { /* * Set the wakeup time to something that can be recognised as - * different to an actual time of day: + * different to an actual time of day: */ _thread_run->wakeup_time.tv_sec = -1; _thread_run->wakeup_time.tv_nsec = -1; } /* Check if no waiting is required: */ else if (timeout->tv_sec == 0 && timeout->tv_nsec == 0) { /* Set the wake up time to 'immediately': */ _thread_run->wakeup_time.tv_sec = 0; _thread_run->wakeup_time.tv_nsec = 0; } else { /* Get the current time: */ GET_CURRENT_TOD(tv); TIMEVAL_TO_TIMESPEC(&tv, ¤t_time); /* Calculate the time for the current thread to wake up: */ _thread_run->wakeup_time.tv_sec = current_time.tv_sec + timeout->tv_sec; _thread_run->wakeup_time.tv_nsec = current_time.tv_nsec + timeout->tv_nsec; /* Check if the nanosecond field needs to wrap: */ if (_thread_run->wakeup_time.tv_nsec >= 1000000000) { /* Wrap the nanosecond field: */ _thread_run->wakeup_time.tv_sec += 1; _thread_run->wakeup_time.tv_nsec -= 1000000000; } } } void _thread_kern_sig_defer(void) { /* Allow signal deferral to be recursive. */ _thread_run->sig_defer_count++; } void _thread_kern_sig_undefer(void) { /* * Perform checks to yield only if we are about to undefer * signals. */ if (_thread_run->sig_defer_count > 1) { /* Decrement the signal deferral count. */ _thread_run->sig_defer_count--; } else if (_thread_run->sig_defer_count == 1) { /* Reenable signals: */ _thread_run->sig_defer_count = 0; /* * Check if there are queued signals: */ if (_sigq_check_reqd != 0) _thread_kern_sched(NULL); - /* + /* * Check for asynchronous cancellation before delivering any * pending signals: */ if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) && ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) pthread_testcancel(); /* * If there are pending signals or this thread has * to yield the CPU, call the kernel scheduler: * * XXX - Come back and revisit the pending signal problem */ if ((_thread_run->yield_on_sig_undefer != 0) || SIGNOTEMPTY(_thread_run->sigpend)) { _thread_run->yield_on_sig_undefer = 0; _thread_kern_sched(NULL); } } } static void dequeue_signals(void) { char bufr[128]; int num; /* - * Enter a loop to clear the pthread kernel pipe: + * Enter a loop to clear the pthread kernel pipe: */ while (((num = _thread_sys_read(_thread_kern_pipe[0], bufr, sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) { } if ((num < 0) && (errno != EAGAIN)) { /* * The only error we should expect is if there is * no data to read. */ PANIC("Unable to read from thread kernel pipe"); } /* Handle any pending signals: */ _thread_sig_handle_pending(); } static inline void thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in) { pthread_t tid_out = thread_out; pthread_t tid_in = thread_in; if ((tid_out != NULL) && (tid_out->flags & PTHREAD_FLAGS_PRIVATE) != 0) tid_out = NULL; if ((tid_in != NULL) && (tid_in->flags & PTHREAD_FLAGS_PRIVATE) != 0) tid_in = NULL; if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) { /* Run the scheduler switch hook: */ _sched_switch_hook(tid_out, tid_in); } } #endif Index: head/lib/libpthread/thread/thr_mutex.c =================================================================== --- head/lib/libpthread/thread/thr_mutex.c (revision 68515) +++ head/lib/libpthread/thread/thr_mutex.c (revision 68516) @@ -1,1467 +1,1480 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" #if defined(_PTHREADS_INVARIANTS) #define _MUTEX_INIT_LINK(m) do { \ (m)->m_qe.tqe_prev = NULL; \ (m)->m_qe.tqe_next = NULL; \ } while (0) #define _MUTEX_ASSERT_IS_OWNED(m) do { \ if ((m)->m_qe.tqe_prev == NULL) \ PANIC("mutex is not on list"); \ } while (0) #define _MUTEX_ASSERT_NOT_OWNED(m) do { \ if (((m)->m_qe.tqe_prev != NULL) || \ ((m)->m_qe.tqe_next != NULL)) \ PANIC("mutex is on list"); \ } while (0) #else #define _MUTEX_INIT_LINK(m) #define _MUTEX_ASSERT_IS_OWNED(m) #define _MUTEX_ASSERT_NOT_OWNED(m) #endif /* * Prototypes */ static inline int mutex_self_trylock(pthread_mutex_t); static inline int mutex_self_lock(pthread_mutex_t); static inline int mutex_unlock_common(pthread_mutex_t *, int); static void mutex_priority_adjust(pthread_mutex_t); static void mutex_rescan_owned (pthread_t, pthread_mutex_t); static inline pthread_t mutex_queue_deq(pthread_mutex_t); static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; /* Reinitialize a mutex to defaults. */ int _mutex_reinit(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; else if (*mutex == NULL) ret = pthread_mutex_init(mutex, NULL); else { /* * Initialize the mutex structure: */ (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT; (*mutex)->m_protocol = PTHREAD_PRIO_NONE; TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_owner = NULL; (*mutex)->m_data.m_count = 0; (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE; (*mutex)->m_flags |= MUTEX_FLAGS_INITED; (*mutex)->m_refcount = 0; (*mutex)->m_prio = 0; (*mutex)->m_saved_prio = 0; _MUTEX_INIT_LINK(*mutex); memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock)); } return (ret); } int pthread_mutex_init(pthread_mutex_t * mutex, const pthread_mutexattr_t * mutex_attr) { enum pthread_mutextype type; int protocol; int ceiling; pthread_mutex_t pmutex; int ret = 0; if (mutex == NULL) ret = EINVAL; /* Check if default mutex attributes: */ else if (mutex_attr == NULL || *mutex_attr == NULL) { /* Default to a (error checking) POSIX mutex: */ type = PTHREAD_MUTEX_ERRORCHECK; protocol = PTHREAD_PRIO_NONE; ceiling = PTHREAD_MAX_PRIORITY; } /* Check mutex type: */ else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) /* Return an invalid argument error: */ ret = EINVAL; /* Check mutex protocol: */ else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) /* Return an invalid argument error: */ ret = EINVAL; else { /* Use the requested mutex type and protocol: */ type = (*mutex_attr)->m_type; protocol = (*mutex_attr)->m_protocol; ceiling = (*mutex_attr)->m_ceiling; } /* Check no errors so far: */ if (ret == 0) { if ((pmutex = (pthread_mutex_t) malloc(sizeof(struct pthread_mutex))) == NULL) ret = ENOMEM; else { /* Reset the mutex flags: */ pmutex->m_flags = 0; /* Process according to mutex type: */ switch (type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: /* Nothing to do here. */ break; /* Single UNIX Spec 2 recursive mutex: */ case PTHREAD_MUTEX_RECURSIVE: /* Reset the mutex count: */ pmutex->m_data.m_count = 0; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } if (ret == 0) { /* Initialise the rest of the mutex: */ TAILQ_INIT(&pmutex->m_queue); pmutex->m_flags |= MUTEX_FLAGS_INITED; pmutex->m_owner = NULL; pmutex->m_type = type; pmutex->m_protocol = protocol; pmutex->m_refcount = 0; if (protocol == PTHREAD_PRIO_PROTECT) pmutex->m_prio = ceiling; else pmutex->m_prio = 0; pmutex->m_saved_prio = 0; _MUTEX_INIT_LINK(pmutex); memset(&pmutex->lock, 0, sizeof(pmutex->lock)); *mutex = pmutex; } else { free(pmutex); *mutex = NULL; } } } /* Return the completion status: */ return(ret); } int pthread_mutex_destroy(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL || *mutex == NULL) ret = EINVAL; else { /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * Check to see if this mutex is in use: */ if (((*mutex)->m_owner != NULL) || (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || ((*mutex)->m_refcount != 0)) { ret = EBUSY; /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); } else { /* * Free the memory allocated for the mutex * structure: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); free(*mutex); /* * Leave the caller's pointer NULL now that * the mutex has been destroyed: */ *mutex = NULL; } } /* Return the completion status: */ return (ret); } static int init_static(pthread_mutex_t *mutex) { int ret; _SPINLOCK(&static_init_lock); if (*mutex == NULL) ret = pthread_mutex_init(mutex, NULL); else ret = 0; _SPINUNLOCK(&static_init_lock); return(ret); } int pthread_mutex_trylock(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization: */ else if (*mutex != NULL || (ret = init_static(mutex)) == 0) { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); _MUTEX_INIT_LINK(*mutex); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; } /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = _thread_run; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = _thread_run; /* Track number of priority mutexes owned: */ _thread_run->priority_mutex_count++; /* * The mutex takes on the attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = _thread_run->active_priority; (*mutex)->m_saved_prio = _thread_run->inherited_priority; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (_thread_run->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = _thread_run; /* Track number of priority mutexes owned: */ _thread_run->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority. */ _thread_run->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = _thread_run->inherited_priority; _thread_run->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (ret); } int pthread_mutex_lock(pthread_mutex_t * mutex) { int ret = 0; if (_thread_initial == NULL) _thread_init(); if (mutex == NULL) - ret = EINVAL; + return (EINVAL); /* * If the mutex is statically initialized, perform the dynamic * initialization: */ - else if (*mutex != NULL || (ret = init_static(mutex)) == 0) { + if ((*mutex == NULL) && + ((ret = init_static(mutex)) != 0)) + return (ret); + + /* Reset the interrupted flag: */ + _thread_run->interrupted = 0; + + /* + * Enter a loop waiting to become the mutex owner. We need a + * loop in case the waiting thread is interrupted by a signal + * to execute a signal handler. It is not (currently) possible + * to remain in the waiting queue while running a handler. + * Instead, the thread is interrupted and backed out of the + * waiting queue prior to executing the signal handler. + */ + while (((*mutex)->m_owner != _thread_run) && (ret == 0) && + (_thread_run->interrupted == 0)) { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; _MUTEX_INIT_LINK(*mutex); } - /* Reset the interrupted flag: */ - _thread_run->interrupted = 0; - /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = _thread_run; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, _thread_run); /* * Keep a pointer to the mutex this thread * is waiting on: */ _thread_run->data.mutex = *mutex; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = _thread_run; /* Track number of priority mutexes owned: */ _thread_run->priority_mutex_count++; /* * The mutex takes on attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = _thread_run->active_priority; (*mutex)->m_saved_prio = _thread_run->inherited_priority; _thread_run->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, _thread_run); /* * Keep a pointer to the mutex this thread * is waiting on: */ _thread_run->data.mutex = *mutex; if (_thread_run->active_priority > (*mutex)->m_prio) /* Adjust priorities: */ mutex_priority_adjust(*mutex); /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (_thread_run->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* * Lock the mutex for the running * thread: */ (*mutex)->m_owner = _thread_run; /* Track number of priority mutexes owned: */ _thread_run->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority: */ _thread_run->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = _thread_run->inherited_priority; _thread_run->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&_thread_run->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == _thread_run) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, _thread_run); /* * Keep a pointer to the mutex this thread * is waiting on: */ _thread_run->data.mutex = *mutex; /* Clear any previous error: */ _thread_run->error = 0; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); /* * The threads priority may have changed while * waiting for the mutex causing a ceiling * violation. */ ret = _thread_run->error; _thread_run->error = 0; } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* * Check to see if this thread was interrupted and * is still in the mutex queue of waiting threads: */ if (_thread_run->interrupted != 0) mutex_queue_remove(*mutex, _thread_run); /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); - - if (_thread_run->interrupted != 0 && - _thread_run->continuation != NULL) - _thread_run->continuation((void *) _thread_run); } + if (_thread_run->interrupted != 0 && + _thread_run->continuation != NULL) + _thread_run->continuation((void *) _thread_run); + /* Return the completion status: */ return (ret); } int pthread_mutex_unlock(pthread_mutex_t * mutex) { return (mutex_unlock_common(mutex, /* add reference */ 0)); } int _mutex_cv_unlock(pthread_mutex_t * mutex) { return (mutex_unlock_common(mutex, /* add reference */ 1)); } int _mutex_cv_lock(pthread_mutex_t * mutex) { int ret; if ((ret = pthread_mutex_lock(mutex)) == 0) (*mutex)->m_refcount--; return (ret); } static inline int mutex_self_trylock(pthread_mutex_t mutex) { int ret = 0; switch (mutex->m_type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: /* * POSIX specifies that mutexes should return EDEADLK if a * recursive lock is detected. */ ret = EBUSY; break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ mutex->m_data.m_count++; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return(ret); } static inline int mutex_self_lock(pthread_mutex_t mutex) { int ret = 0; switch (mutex->m_type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: /* * POSIX specifies that mutexes should return EDEADLK if a * recursive lock is detected. */ ret = EDEADLK; break; case PTHREAD_MUTEX_NORMAL: /* * What SS2 define as a 'normal' mutex. Intentionally * deadlock on attempts to get a lock you already own. */ _thread_kern_sched_state_unlock(PS_DEADLOCK, &mutex->lock, __FILE__, __LINE__); break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ mutex->m_data.m_count++; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return(ret); } static inline int mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) { int ret = 0; if (mutex == NULL || *mutex == NULL) { ret = EINVAL; } else { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != _thread_run) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Get the next thread from the queue of * threads waiting on the mutex: */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) { /* * Unless the new owner of the mutex is * currently suspended, allow the owner * to run. If the thread is suspended, * make a note that the thread isn't in * a wait queue any more. */ if (((*mutex)->m_owner->state != PS_SUSPENDED)) { PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); } else { (*mutex)->m_owner->suspended = SUSP_NOWAIT; } /* * Add the mutex to the threads list of * owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; } } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != _thread_run) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* * Restore the threads inherited priority and * recompute the active priority (being careful * not to override changes in the threads base * priority subsequent to locking the mutex). */ _thread_run->inherited_priority = (*mutex)->m_saved_prio; _thread_run->active_priority = MAX(_thread_run->inherited_priority, _thread_run->base_priority); /* * This thread now owns one less priority mutex. */ _thread_run->priority_mutex_count--; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Get the next thread from the queue of threads * waiting on the mutex: */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) == NULL) /* This mutex has no priority. */ (*mutex)->m_prio = 0; else { /* * Track number of priority mutexes owned: */ (*mutex)->m_owner->priority_mutex_count++; /* * Add the mutex to the threads list * of owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; /* * Set the priority of the mutex. Since * our waiting threads are in descending * priority order, the priority of the * mutex becomes the active priority of * the thread we just dequeued. */ (*mutex)->m_prio = (*mutex)->m_owner->active_priority; /* * Save the owning threads inherited * priority: */ (*mutex)->m_saved_prio = (*mutex)->m_owner->inherited_priority; /* * The owning threads inherited priority * now becomes his active priority (the * priority of the mutex). */ (*mutex)->m_owner->inherited_priority = (*mutex)->m_prio; /* * Unless the new owner of the mutex is * currently suspended, allow the owner * to run. If the thread is suspended, * make a note that the thread isn't in * a wait queue any more. */ if (((*mutex)->m_owner->state != PS_SUSPENDED)) { PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); } else { (*mutex)->m_owner->suspended = SUSP_NOWAIT; } } } break; /* POSIX priority ceiling mutex: */ case PTHREAD_PRIO_PROTECT: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != _thread_run) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* * Restore the threads inherited priority and * recompute the active priority (being careful * not to override changes in the threads base * priority subsequent to locking the mutex). */ _thread_run->inherited_priority = (*mutex)->m_saved_prio; _thread_run->active_priority = MAX(_thread_run->inherited_priority, _thread_run->base_priority); /* * This thread now owns one less priority mutex. */ _thread_run->priority_mutex_count--; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Enter a loop to find a waiting thread whose * active priority will not cause a ceiling * violation: */ while ((((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) && ((*mutex)->m_owner->active_priority > (*mutex)->m_prio)) { /* * Either the mutex ceiling priority * been lowered and/or this threads * priority has been raised subsequent * to this thread being queued on the * waiting list. */ (*mutex)->m_owner->error = EINVAL; PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); /* * The thread is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; } /* Check for a new owner: */ if ((*mutex)->m_owner != NULL) { /* * Track number of priority mutexes owned: */ (*mutex)->m_owner->priority_mutex_count++; /* * Add the mutex to the threads list * of owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; /* * Save the owning threads inherited * priority: */ (*mutex)->m_saved_prio = (*mutex)->m_owner->inherited_priority; /* * The owning thread inherits the * ceiling priority of the mutex and * executes at that priority: */ (*mutex)->m_owner->inherited_priority = (*mutex)->m_prio; (*mutex)->m_owner->active_priority = (*mutex)->m_prio; /* * Unless the new owner of the mutex is * currently suspended, allow the owner * to run. If the thread is suspended, * make a note that the thread isn't in * a wait queue any more. */ if (((*mutex)->m_owner->state != PS_SUSPENDED)) { PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); } else { (*mutex)->m_owner->suspended = SUSP_NOWAIT; } } } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } if ((ret == 0) && (add_reference != 0)) { /* Increment the reference count: */ (*mutex)->m_refcount++; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (ret); } /* * This function is called when a change in base priority occurs for * a thread that is holding or waiting for a priority protection or * inheritence mutex. A change in a threads base priority can effect * changes to active priorities of other threads and to the ordering * of mutex locking by waiting threads. * * This must be called while thread scheduling is deferred. */ void _mutex_notify_priochange(pthread_t pthread) { /* Adjust the priorites of any owned priority mutexes: */ if (pthread->priority_mutex_count > 0) { /* * Rescan the mutexes owned by this thread and correct * their priorities to account for this threads change * in priority. This has the side effect of changing * the threads active priority. */ mutex_rescan_owned(pthread, /* rescan all owned */ NULL); } /* * If this thread is waiting on a priority inheritence mutex, * check for priority adjustments. A change in priority can * also effect a ceiling violation(*) for a thread waiting on * a priority protection mutex; we don't perform the check here * as it is done in pthread_mutex_unlock. * * (*) It should be noted that a priority change to a thread * _after_ taking and owning a priority ceiling mutex * does not affect ownership of that mutex; the ceiling * priority is only checked before mutex ownership occurs. */ if (pthread->state == PS_MUTEX_WAIT) { /* Lock the mutex structure: */ _SPINLOCK(&pthread->data.mutex->lock); /* * Check to make sure this thread is still in the same state * (the spinlock above can yield the CPU to another thread): */ if (pthread->state == PS_MUTEX_WAIT) { /* * Remove and reinsert this thread into the list of * waiting threads to preserve decreasing priority * order. */ mutex_queue_remove(pthread->data.mutex, pthread); mutex_queue_enq(pthread->data.mutex, pthread); if (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT) { /* Adjust priorities: */ mutex_priority_adjust(pthread->data.mutex); } } /* Unlock the mutex structure: */ _SPINUNLOCK(&pthread->data.mutex->lock); } } /* * Called when a new thread is added to the mutex waiting queue or * when a threads priority changes that is already in the mutex * waiting queue. */ static void mutex_priority_adjust(pthread_mutex_t mutex) { pthread_t pthread_next, pthread = mutex->m_owner; int temp_prio; pthread_mutex_t m = mutex; /* * Calculate the mutex priority as the maximum of the highest * active priority of any waiting threads and the owning threads * active priority(*). * * (*) Because the owning threads current active priority may * reflect priority inherited from this mutex (and the mutex * priority may have changed) we must recalculate the active * priority based on the threads saved inherited priority * and its base priority. */ pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ temp_prio = MAX(pthread_next->active_priority, MAX(m->m_saved_prio, pthread->base_priority)); /* See if this mutex really needs adjusting: */ if (temp_prio == m->m_prio) /* No need to propagate the priority: */ return; /* Set new priority of the mutex: */ m->m_prio = temp_prio; while (m != NULL) { /* * Save the threads priority before rescanning the * owned mutexes: */ temp_prio = pthread->active_priority; /* * Fix the priorities for all the mutexes this thread has * locked since taking this mutex. This also has a * potential side-effect of changing the threads priority. */ mutex_rescan_owned(pthread, m); /* * If the thread is currently waiting on a mutex, check * to see if the threads new priority has affected the * priority of the mutex. */ if ((temp_prio != pthread->active_priority) && (pthread->state == PS_MUTEX_WAIT) && (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) { /* Grab the mutex this thread is waiting on: */ m = pthread->data.mutex; /* * The priority for this thread has changed. Remove * and reinsert this thread into the list of waiting * threads to preserve decreasing priority order. */ mutex_queue_remove(m, pthread); mutex_queue_enq(m, pthread); /* Grab the waiting thread with highest priority: */ pthread_next = TAILQ_FIRST(&m->m_queue); /* * Calculate the mutex priority as the maximum of the * highest active priority of any waiting threads and * the owning threads active priority. */ temp_prio = MAX(pthread_next->active_priority, MAX(m->m_saved_prio, m->m_owner->base_priority)); if (temp_prio != m->m_prio) { /* * The priority needs to be propagated to the * mutex this thread is waiting on and up to * the owner of that mutex. */ m->m_prio = temp_prio; pthread = m->m_owner; } else /* We're done: */ m = NULL; } else /* We're done: */ m = NULL; } } static void mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex) { int active_prio, inherited_prio; pthread_mutex_t m; pthread_t pthread_next; /* * Start walking the mutexes the thread has taken since * taking this mutex. */ if (mutex == NULL) { /* * A null mutex means start at the beginning of the owned * mutex list. */ m = TAILQ_FIRST(&pthread->mutexq); /* There is no inherited priority yet. */ inherited_prio = 0; } else { /* * The caller wants to start after a specific mutex. It * is assumed that this mutex is a priority inheritence * mutex and that its priority has been correctly * calculated. */ m = TAILQ_NEXT(mutex, m_qe); /* Start inheriting priority from the specified mutex. */ inherited_prio = mutex->m_prio; } active_prio = MAX(inherited_prio, pthread->base_priority); while (m != NULL) { /* * We only want to deal with priority inheritence * mutexes. This might be optimized by only placing * priority inheritence mutexes into the owned mutex * list, but it may prove to be useful having all * owned mutexes in this list. Consider a thread * exiting while holding mutexes... */ if (m->m_protocol == PTHREAD_PRIO_INHERIT) { /* * Fix the owners saved (inherited) priority to * reflect the priority of the previous mutex. */ m->m_saved_prio = inherited_prio; if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) /* Recalculate the priority of the mutex: */ m->m_prio = MAX(active_prio, pthread_next->active_priority); else m->m_prio = active_prio; /* Recalculate new inherited and active priorities: */ inherited_prio = m->m_prio; active_prio = MAX(m->m_prio, pthread->base_priority); } /* Advance to the next mutex owned by this thread: */ m = TAILQ_NEXT(m, m_qe); } /* * Fix the threads inherited priority and recalculate its * active priority. */ pthread->inherited_priority = inherited_prio; active_prio = MAX(inherited_prio, pthread->base_priority); if (active_prio != pthread->active_priority) { /* * If this thread is in the priority queue, it must be * removed and reinserted for its new priority. */ if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) { /* * Remove the thread from the priority queue * before changing its priority: */ PTHREAD_PRIOQ_REMOVE(pthread); /* * POSIX states that if the priority is being * lowered, the thread must be inserted at the * head of the queue for its priority if it owns * any priority protection or inheritence mutexes. */ if ((active_prio < pthread->active_priority) && (pthread->priority_mutex_count > 0)) { /* Set the new active priority. */ pthread->active_priority = active_prio; PTHREAD_PRIOQ_INSERT_HEAD(pthread); } else { /* Set the new active priority. */ pthread->active_priority = active_prio; PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } else { /* Set the new active priority. */ pthread->active_priority = active_prio; } } } void _mutex_unlock_private(pthread_t pthread) { struct pthread_mutex *m, *m_next; for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { m_next = TAILQ_NEXT(m, m_qe); if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) pthread_mutex_unlock(&m); } } void _mutex_lock_backout(pthread_t pthread) { struct pthread_mutex *mutex; /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); - if (pthread->state == PS_MUTEX_WAIT) { + if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { mutex = pthread->data.mutex; /* Lock the mutex structure: */ _SPINLOCK(&mutex->lock); mutex_queue_remove(mutex, pthread); /* This thread is no longer waiting for the mutex: */ - mutex->m_owner->data.mutex = NULL; + pthread->data.mutex = NULL; /* Unlock the mutex structure: */ _SPINUNLOCK(&mutex->lock); } /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* * Dequeue a waiting thread from the head of a mutex queue in descending * priority order. */ static inline pthread_t mutex_queue_deq(pthread_mutex_t mutex) { pthread_t pthread; while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; /* * Only exit the loop if the thread hasn't been * cancelled. */ if (pthread->interrupted == 0) break; } return(pthread); } /* * Remove a waiting thread from a mutex queue in descending priority order. */ static inline void mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) { if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; } } /* * Enqueue a waiting thread to a queue in descending priority order. */ static inline void mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) { pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); /* * For the common case of all threads having equal priority, * we perform a quick check against the priority of the thread * at the tail of the queue. */ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); else { tid = TAILQ_FIRST(&mutex->m_queue); while (pthread->active_priority <= tid->active_priority) tid = TAILQ_NEXT(tid, sqe); TAILQ_INSERT_BEFORE(tid, pthread, sqe); } pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; } #endif Index: head/lib/libpthread/thread/thr_private.h =================================================================== --- head/lib/libpthread/thread/thr_private.h (revision 68515) +++ head/lib/libpthread/thread/thr_private.h (revision 68516) @@ -1,1450 +1,1457 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Private thread definitions for the uthread kernel. * * $FreeBSD$ */ #ifndef _PTHREAD_PRIVATE_H #define _PTHREAD_PRIVATE_H /* * Evaluate the storage class specifier. */ #ifdef GLOBAL_PTHREAD_PRIVATE #define SCLASS #else #define SCLASS extern #endif /* * Include files. */ #include #include #include #include #include #include #include #include #include #include /* * Define machine dependent macros to get and set the stack pointer * from the supported contexts. Also define a macro to set the return * address in a jmp_buf context. * * XXX - These need to be moved into architecture dependent support files. */ #if defined(__i386__) #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2])) #define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp)) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk) #define FP_SAVE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("fnsave %0": :"m"(*fdata)); \ } while (0) #define FP_RESTORE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("frstor %0": :"m"(*fdata)); \ } while (0) #define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra) #elif defined(__alpha__) #include #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[R_SP + 4])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[R_SP + 4])) #define GET_STACK_UC(ucp) ((ucp)->uc_mcontext.mc_regs[R_SP]) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk) #define FP_SAVE_UC(ucp) #define FP_RESTORE_UC(ucp) #define SET_RETURN_ADDR_JB(jb, ra) do { \ (jb)[0]._jb[2] = (long)(ra); \ (jb)[0]._jb[R_RA + 4] = 0; \ (jb)[0]._jb[R_T12 + 4] = (long)(ra); \ } while (0) #else #error "Don't recognize this architecture!" #endif /* * Kernel fatal error handler macro. */ #define PANIC(string) _thread_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ _thread_sys_write(1, buf, strlen(buf)); \ } while (0) #define stderr_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ _thread_sys_write(2, buf, strlen(buf)); \ } while (0) /* * Priority queue manipulation macros (using pqe link): */ #define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd) #define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd) #define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd) #define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq) /* * Waiting queue manipulation macros (using pqe link): */ #define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd) #define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd) #if defined(_PTHREADS_INVARIANTS) #define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive() #define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive() #else #define PTHREAD_WAITQ_CLEARACTIVE() #define PTHREAD_WAITQ_SETACTIVE() #endif /* * Work queue manipulation macros (using qe link): */ #define PTHREAD_WORKQ_INSERT(thrd) do { \ TAILQ_INSERT_TAIL(&_workq,thrd,qe); \ (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \ } while (0) #define PTHREAD_WORKQ_REMOVE(thrd) do { \ TAILQ_REMOVE(&_workq,thrd,qe); \ (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \ } while (0) /* * State change macro without scheduling queue change: */ #define PTHREAD_SET_STATE(thrd, newstate) do { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } while (0) /* * State change macro with scheduling queue change - This must be * called with preemption deferred (see thread_kern_sched_[un]defer). */ #if defined(_PTHREADS_INVARIANTS) #include #define PTHREAD_ASSERT(cond, msg) do { \ if (!(cond)) \ PANIC(msg); \ } while (0) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \ PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \ "Illegal call from signal handler"); #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if (_thread_kern_new_state != 0) \ PANIC("Recursive PTHREAD_NEW_STATE"); \ _thread_kern_new_state = 1; \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ _thread_kern_new_state = 0; \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #else #define PTHREAD_ASSERT(cond, msg) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #endif /* * Define the signals to be used for scheduling. */ #if defined(_PTHREADS_COMPAT_SCHED) #define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL #define _SCHED_SIGNAL SIGVTALRM #else #define _ITIMER_SCHED_TIMER ITIMER_PROF #define _SCHED_SIGNAL SIGPROF #endif /* * Priority queues. * * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. */ typedef struct pq_list { TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ int pl_prio; /* the priority of this list */ int pl_queued; /* is this in the priority queue */ } pq_list_t; typedef struct pq_queue { TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ pq_list_t *pq_lists; /* array of all priority lists */ int pq_size; /* number of priority lists */ } pq_queue_t; /* * TailQ initialization values. */ #define TAILQ_INITIALIZER { NULL, NULL } /* * Mutex definitions. */ union pthread_mutex_data { void *m_ptr; int m_count; }; struct pthread_mutex { enum pthread_mutextype m_type; int m_protocol; TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; union pthread_mutex_data m_data; long m_flags; int m_refcount; /* * Used for priority inheritence and protection. * * m_prio - For priority inheritence, the highest active * priority (threads locking the mutex inherit * this priority). For priority protection, the * ceiling priority of this mutex. * m_saved_prio - mutex owners inherited priority before * taking the mutex, restored when the owner * unlocks the mutex. */ int m_prio; int m_saved_prio; /* * Link for list of all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; /* * Lock for accesses to this structure. */ spinlock_t lock; }; /* * Flags for mutexes. */ #define MUTEX_FLAGS_PRIVATE 0x01 #define MUTEX_FLAGS_INITED 0x02 #define MUTEX_FLAGS_BUSY 0x04 /* * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \ NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \ _SPINLOCK_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; long m_flags; }; /* * Condition variable definitions. */ enum pthread_cond_type { COND_TYPE_FAST, COND_TYPE_MAX }; struct pthread_cond { enum pthread_cond_type c_type; TAILQ_HEAD(cond_head, pthread) c_queue; pthread_mutex_t c_mutex; void *c_data; long c_flags; + int c_seqno; /* * Lock for accesses to this structure. */ spinlock_t lock; }; struct pthread_cond_attr { enum pthread_cond_type c_type; long c_flags; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \ - 0, _SPINLOCK_INITIALIZER } + 0, 0, _SPINLOCK_INITIALIZER } /* * Semaphore definitions. */ struct sem { #define SEM_MAGIC ((u_int32_t) 0x09fa4012) u_int32_t magic; pthread_mutex_t lock; pthread_cond_t gtzero; u_int32_t count; u_int32_t nwaiters; }; /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *next; void (*routine) (); void *routine_arg; }; struct pthread_attr { int sched_policy; int sched_inherit; int sched_interval; int prio; int suspend; int flags; void *arg_attr; void (*cleanup_attr) (); void *stackaddr_attr; size_t stacksize_attr; }; /* * Thread creation state attributes. */ #define PTHREAD_CREATE_RUNNING 0 #define PTHREAD_CREATE_SUSPENDED 1 /* * Additional state for a thread suspended with pthread_suspend_np(). */ enum pthread_susp { SUSP_NO, /* Not suspended. */ SUSP_YES, /* Suspended. */ SUSP_NOWAIT, /* Suspended, was in a mutex or condition queue. */ SUSP_MUTEX_WAIT,/* Suspended, still in a mutex queue. */ SUSP_COND_WAIT /* Suspended, still in a condition queue. */ }; /* * Miscellaneous definitions. */ #define PTHREAD_STACK_DEFAULT 65536 /* * Size of red zone at the end of each stack. In actuality, this "red zone" is * merely an unmapped region, except in the case of the initial stack. Since * mmap() makes it possible to specify the maximum growth of a MAP_STACK region, * an unmapped gap between thread stacks achieves the same effect as explicitly * mapped red zones. */ #define PTHREAD_STACK_GUARD PAGE_SIZE /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define PTHREAD_STACK_INITIAL 0x100000 +/* Size of the scheduler stack: */ +#define SCHED_STACK_SIZE PAGE_SIZE + /* * Define the different priority ranges. All applications have thread * priorities constrained within 0-31. The threads library raises the * priority when delivering signals in order to ensure that signal * delivery happens (from the POSIX spec) "as soon as possible". * In the future, the threads library will also be able to map specific * threads into real-time (cooperating) processes or kernel threads. * The RT and SIGNAL priorities will be used internally and added to * thread base priorities so that the scheduling queue can handle both * normal and RT priority threads with and without signal handling. * * The approach taken is that, within each class, signal delivery * always has priority over thread execution. */ #define PTHREAD_DEFAULT_PRIORITY 15 #define PTHREAD_MIN_PRIORITY 0 #define PTHREAD_MAX_PRIORITY 31 /* 0x1F */ #define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */ #define PTHREAD_RT_PRIORITY 64 /* 0x40 */ #define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY #define PTHREAD_LAST_PRIORITY \ (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY) #define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY) /* * Clock resolution in microseconds. */ #define CLOCK_RES_USEC 10000 /* * Time slice period in microseconds. */ #define TIMESLICE_USEC 20000 /* * Define a thread-safe macro to get the current time of day * which is updated at regular intervals by the scheduling signal * handler. */ #define GET_CURRENT_TOD(tv) \ do { \ tv.tv_sec = _sched_tod.tv_sec; \ tv.tv_usec = _sched_tod.tv_usec; \ } while (tv.tv_sec != _sched_tod.tv_sec) struct pthread_key { spinlock_t lock; volatile int allocated; volatile int count; void (*destructor) (); }; struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ int state; /* 0 = idle >0 = # of readers -1 = writer */ pthread_cond_t read_signal; pthread_cond_t write_signal; int blocked_writers; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_SIGTHREAD, PS_MUTEX_WAIT, PS_COND_WAIT, PS_FDLR_WAIT, PS_FDLW_WAIT, PS_FDR_WAIT, PS_FDW_WAIT, PS_FILE_WAIT, PS_POLL_WAIT, PS_SELECT_WAIT, PS_SLEEP_WAIT, PS_WAIT_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, PS_SPINBLOCK, PS_JOIN, PS_SUSPENDED, PS_DEAD, PS_DEADLOCK, PS_STATE_MAX }; /* * File descriptor locking definitions. */ #define FD_READ 0x1 #define FD_WRITE 0x2 #define FD_RDWR (FD_READ | FD_WRITE) /* * File descriptor table structure. */ struct fd_table_entry { /* * Lock for accesses to this file descriptor table * entry. This is passed to _spinlock() to provide atomic * access to this structure. It does *not* represent the * state of the lock on the file descriptor. */ spinlock_t lock; TAILQ_HEAD(, pthread) r_queue; /* Read queue. */ TAILQ_HEAD(, pthread) w_queue; /* Write queue. */ struct pthread *r_owner; /* Ptr to thread owning read lock. */ struct pthread *w_owner; /* Ptr to thread owning write lock. */ char *r_fname; /* Ptr to read lock source file name */ int r_lineno; /* Read lock source line number. */ char *w_fname; /* Ptr to write lock source file name */ int w_lineno; /* Write lock source line number. */ int r_lockcount; /* Count for FILE read locks. */ int w_lockcount; /* Count for FILE write locks. */ int flags; /* Flags used in open. */ }; struct pthread_poll_data { int nfds; struct pollfd *fds; }; union pthread_wait_data { pthread_mutex_t mutex; pthread_cond_t cond; const sigset_t *sigwait; /* Waiting on a signal in sigwait */ struct { short fd; /* Used when thread waiting on fd */ short branch; /* Line number, for debugging. */ char *fname; /* Source file name for debugging.*/ } fd; FILE *fp; struct pthread_poll_data *poll_data; spinlock_t *spinlock; struct pthread *thread; }; /* * Define a continuation routine that can be used to perform a * transfer of control: */ typedef void (*thread_continuation_t) (void *); +struct pthread_signal_frame; + struct pthread_state_data { - int psd_interrupted; + struct pthread_signal_frame *psd_curframe; sigset_t psd_sigmask; - enum pthread_state psd_state; - int psd_flags; struct timespec psd_wakeup_time; union pthread_wait_data psd_wait_data; + enum pthread_state psd_state; + int psd_flags; + int psd_interrupted; + int psd_longjmp_val; + int psd_sigmask_seqno; + int psd_signo; + int psd_sig_defer_count; /* XXX - What about thread->timeout and/or thread->error? */ }; /* * Normally thread contexts are stored as jmp_bufs via _setjmp()/_longjmp(), * but they may also be sigjmp_buf and ucontext_t. When a thread is * interrupted by a signal, it's context is saved as a ucontext_t. An * application is also free to use [_]longjmp()/[_]siglongjmp() to jump * between contexts within the same thread. Future support will also * include setcontext()/getcontext(). * * Define an enumerated type that can identify the 4 different context * types. */ typedef enum { CTX_JB_NOSIG, /* context is jmp_buf without saved sigset */ CTX_JB, /* context is jmp_buf (with saved sigset) */ CTX_SJB, /* context is sigjmp_buf (with saved sigset) */ CTX_UC /* context is ucontext_t (with saved sigset) */ } thread_context_t; /* * There are 2 basic contexts that a frame may contain at any * one time: * * o ctx - The context that the thread should return to after normal * completion of the signal handler. * o sig_jb - The context just before the signal handler is invoked. * Attempts at abnormal returns from user supplied signal handlers * will return back to the signal context to perform any necessary * cleanup. */ struct pthread_signal_frame { /* * This stores the threads state before the signal. */ struct pthread_state_data saved_state; - /* Beginning (bottom) of threads stack frame for this signal. */ - unsigned long stackp; - /* * Threads return context; ctxtype identifies the type of context. * For signal frame 0, these point to the context storage area * within the pthread structure. When handling signals (frame > 0), * these point to a context storage area that is allocated off the * threads stack. */ union { jmp_buf jb; sigjmp_buf sigjb; ucontext_t uc; } ctx; thread_context_t ctxtype; int longjmp_val; - - /* Threads "jump out of signal handler" destination frame. */ - int dst_frame; - - /* - * Used to return back to the signal handling frame in case - * the application tries to change contexts from the handler. - */ - jmp_buf *sig_jb; - int signo; /* signal, arg 1 to sighandler */ int sig_has_args; /* use signal args if true */ + ucontext_t uc; + siginfo_t siginfo; }; /* * Thread structure. */ struct pthread { /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; char *name; u_int64_t uniqueid; /* for gdb */ /* * Lock for accesses to this thread structure. */ spinlock_t lock; /* Queue entry for list of all threads: */ TAILQ_ENTRY(pthread) tle; /* Queue entry for list of dead threads: */ TAILQ_ENTRY(pthread) dle; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; void *stack; struct pthread_attr attr; /* - * Used for tracking delivery of nested signal handlers. - * Signal frame 0 is used for normal context (when no - * signal handlers are active for the thread). Frame - * 1 is used as the context for the first signal, and - * frames 2 .. NSIG-1 are used when additional signals - * arrive interrupting already active signal handlers. + * Threads return context; ctxtype identifies the type of context. */ - struct pthread_signal_frame *sigframes[NSIG]; - struct pthread_signal_frame sigframe0; + union { + jmp_buf jb; + sigjmp_buf sigjb; + ucontext_t uc; + } ctx; + thread_context_t ctxtype; + int longjmp_val; + + /* + * Used for tracking delivery of signal handlers. + */ struct pthread_signal_frame *curframe; - int sigframe_count; - int sigframe_done; /* * Cancelability flags - the lower 2 bits are used by cancel * definitions in pthread.h */ #define PTHREAD_AT_CANCEL_POINT 0x0004 #define PTHREAD_CANCELLING 0x0008 #define PTHREAD_CANCEL_NEEDED 0x0010 int cancelflags; enum pthread_susp suspended; thread_continuation_t continuation; /* * Current signal mask and pending signals. */ sigset_t sigmask; sigset_t sigpend; + int sigmask_seqno; int check_pending; /* Thread state: */ enum pthread_state state; /* Scheduling clock when this thread was last made active. */ long last_active; /* Scheduling clock when this thread was last made inactive. */ long last_inactive; /* * Number of microseconds accumulated by this thread when * time slicing is active. */ long slice_usec; /* * Time to wake up thread. This is used for sleeping threads and * for any operation which may time out (such as select). */ struct timespec wakeup_time; /* TRUE if operation has timed out. */ int timeout; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* Join queue head and link for waiting threads: */ TAILQ_HEAD(join_head, pthread) join_queue; /* * The current thread can belong to only one scheduling queue at * a time (ready or waiting queue). It can also belong to: * * o A queue of threads waiting for a mutex * o A queue of threads waiting for a condition variable * o A queue of threads waiting for another thread to terminate * (the join queue above) * o A queue of threads waiting for a file descriptor lock * o A queue of threads needing work done by the kernel thread * (waiting for a spinlock or file I/O) * * It is possible for a thread to belong to more than one of the * above queues if it is handling a signal. A thread may only * enter a mutex, condition variable, or join queue when it is * not being called from a signal handler. If a thread is a * member of one of these queues when a signal handler is invoked, * it must remain in the queue. For this reason, the links for * these queues must not be (re)used for other queues. * * Use pqe for the scheduling queue link (both ready and waiting), * sqe for synchronization (mutex, condition variable, and join) * queue links, and qe for all other links. */ TAILQ_ENTRY(pthread) pqe; /* priority queue link */ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ TAILQ_ENTRY(pthread) qe; /* all other queues link */ /* Wait data. */ union pthread_wait_data data; /* * Allocated for converting select into poll. */ struct pthread_poll_data poll_data; /* * Set to TRUE if a blocking operation was * interrupted by a signal: */ int interrupted; /* Signal number when in state PS_SIGWAIT: */ int signo; /* * Set to non-zero when this thread has deferred signals. * We allow for recursive deferral. */ int sig_defer_count; /* * Set to TRUE if this thread should yield after undeferring * signals. */ int yield_on_sig_undefer; /* Miscellaneous flags; only set with signals deferred. */ int flags; #define PTHREAD_FLAGS_PRIVATE 0x0001 #define PTHREAD_EXITING 0x0002 #define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */ #define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */ #define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */ #define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */ #define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */ #define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/ #define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */ #define PTHREAD_FLAGS_IN_JOINQ 0x0200 /* in join queue using sqe link */ #define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */ #define PTHREAD_FLAGS_IN_SYNCQ \ (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ | PTHREAD_FLAGS_IN_JOINQ) /* * Base priority is the user setable and retrievable priority * of the thread. It is only affected by explicit calls to * set thread priority and upon thread creation via a thread * attribute or default priority. */ char base_priority; /* * Inherited priority is the priority a thread inherits by * taking a priority inheritence or protection mutex. It * is not affected by base priority changes. Inherited * priority defaults to and remains 0 until a mutex is taken * that is being waited on by any other thread whose priority * is non-zero. */ char inherited_priority; /* * Active priority is always the maximum of the threads base * priority and inherited priority. When there is a change * in either the base or inherited priority, the active * priority must be recalculated. */ char active_priority; /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; /* * Queue of currently owned mutexes. */ TAILQ_HEAD(, pthread_mutex) mutexq; void *ret; const void **specific_data; int specific_data_count; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; char *fname; /* Ptr to source file name */ int lineno; /* Source line number. */ }; /* Spare thread stack. */ struct stack { SLIST_ENTRY(stack) qe; /* Queue entry for this stack. */ }; /* * Global variables for the uthread kernel. */ /* Kernel thread structure used when there are no running threads: */ SCLASS struct pthread _thread_kern_thread; /* Ptr to the thread structure for the running thread: */ SCLASS struct pthread * volatile _thread_run #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* Ptr to the thread structure for the last user thread to run: */ SCLASS struct pthread * volatile _last_user_thread #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* * Ptr to the thread running in single-threaded mode or NULL if * running multi-threaded (default POSIX behaviour). */ SCLASS struct pthread * volatile _thread_single #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* List of all threads: */ SCLASS TAILQ_HEAD(, pthread) _thread_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_thread_list); #else ; #endif /* * Array of kernel pipe file descriptors that are used to ensure that * no signals are missed in calls to _select. */ SCLASS int _thread_kern_pipe[2] #ifdef GLOBAL_PTHREAD_PRIVATE = { -1, -1 }; #else ; #endif SCLASS int volatile _queue_signals #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _thread_kern_in_sched #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _sig_in_handler #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Time of day at last scheduling timer signal: */ SCLASS struct timeval volatile _sched_tod #ifdef GLOBAL_PTHREAD_PRIVATE = { 0, 0 }; #else ; #endif /* * Current scheduling timer ticks; used as resource usage. */ SCLASS unsigned int volatile _sched_ticks #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Dead threads: */ SCLASS TAILQ_HEAD(, pthread) _dead_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_dead_list); #else ; #endif /* Initial thread: */ SCLASS struct pthread *_thread_initial #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Default thread attributes: */ SCLASS struct pthread_attr pthread_attr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, PTHREAD_STACK_DEFAULT }; #else ; #endif /* Default mutex attributes: */ SCLASS struct pthread_mutex_attr pthread_mutexattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }; #else ; #endif /* Default condition variable attributes: */ SCLASS struct pthread_cond_attr pthread_condattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { COND_TYPE_FAST, 0 }; #else ; #endif /* * Standard I/O file descriptors need special flag treatment since * setting one to non-blocking does all on *BSD. Sigh. This array * is used to store the initial flag settings. */ SCLASS int _pthread_stdio_flags[3]; /* File table information: */ SCLASS struct fd_table_entry **_thread_fd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Table for polling file descriptors: */ SCLASS struct pollfd *_thread_pfd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif SCLASS const int dtablecount #ifdef GLOBAL_PTHREAD_PRIVATE = 4096/sizeof(struct fd_table_entry); #else ; #endif SCLASS int _thread_dtablesize /* Descriptor table size. */ #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _clock_res_usec /* Clock resolution in usec. */ #ifdef GLOBAL_PTHREAD_PRIVATE = CLOCK_RES_USEC; #else ; #endif /* Garbage collector mutex and condition variable. */ SCLASS pthread_mutex_t _gc_mutex #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; SCLASS pthread_cond_t _gc_cond #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Array of signal actions for this process. */ SCLASS struct sigaction _thread_sigact[NSIG]; /* * Array of counts of dummy handlers for SIG_DFL signals. This is used to * assure that there is always a dummy signal handler installed while there is a * thread sigwait()ing on the corresponding signal. */ SCLASS int _thread_dfl_count[NSIG]; /* * Pending signals and mask for this process: */ SCLASS sigset_t _process_sigpending; -SCLASS sigset_t _process_sigmask; +SCLASS sigset_t _process_sigmask +#ifdef GLOBAL_PTHREAD_PRIVATE += { {0, 0, 0, 0} } +#endif +; /* * Scheduling queues: */ SCLASS pq_queue_t _readyq; SCLASS TAILQ_HEAD(, pthread) _waitingq; /* * Work queue: */ SCLASS TAILQ_HEAD(, pthread) _workq; /* Tracks the number of threads blocked while waiting for a spinlock. */ SCLASS volatile int _spinblock_count #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Used to maintain pending and active signals: */ struct sigstatus { int pending; /* Is this a pending signal? */ int blocked; /* * A handler is currently active for * this signal; ignore subsequent * signals until the handler is done. */ int signo; /* arg 1 to signal handler */ siginfo_t siginfo; /* arg 2 to signal handler */ ucontext_t uc; /* arg 3 to signal handler */ }; SCLASS struct sigstatus _thread_sigq[NSIG]; /* Indicates that the signal queue needs to be checked. */ SCLASS volatile int _sigq_check_reqd #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Thread switch hook. */ SCLASS pthread_switch_routine_t _sched_switch_hook #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Spare stack queue. Stacks of default size are cached in order to reduce * thread creation time. Spare stacks are used in LIFO order to increase cache * locality. */ SCLASS SLIST_HEAD(, stack) _stackq; /* * Base address of next unallocated default-size {stack, red zone}. Stacks are * allocated contiguously, starting below the bottom of the main stack. When a * new stack is created, a red zone is created (actually, the red zone is simply * left unmapped) below the bottom of the stack, such that the stack will not be * able to grow all the way to the top of the next stack. This isn't * fool-proof. It is possible for a stack to grow by a large amount, such that * it grows into the next stack, and as long as the memory within the red zone * is never accessed, nothing will prevent one thread stack from trouncing all * over the next. */ SCLASS void * _next_stack #ifdef GLOBAL_PTHREAD_PRIVATE /* main stack top - main stack size - stack size - (red zone + main stack red zone) */ = (void *) USRSTACK - PTHREAD_STACK_INITIAL - PTHREAD_STACK_DEFAULT - (2 * PTHREAD_STACK_GUARD) #endif ; /* * Declare the kernel scheduler jump buffer and stack: */ SCLASS jmp_buf _thread_kern_sched_jb; SCLASS void * _thread_kern_sched_stack #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* Used for _PTHREADS_INVARIANTS checking. */ SCLASS int _thread_kern_new_state #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Undefine the storage class specifier: */ #undef SCLASS #ifdef _LOCK_DEBUG #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \ _ts, __FILE__, __LINE__) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \ __FILE__, __LINE__) #else #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type) #endif /* * Function prototype definitions. */ __BEGIN_DECLS char *__ttyname_basic(int); char *__ttyname_r_basic(int, char *, size_t); char *ttyname_r(int, char *, size_t); void _cond_wait_backout(pthread_t); void _fd_lock_backout(pthread_t); int _find_dead_thread(pthread_t); int _find_thread(pthread_t); void _flockfile_backout(pthread_t); void _funlock_owned(pthread_t); void _join_backout(pthread_t); int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t); int _thread_fd_lock(int, int, struct timespec *); int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); void _mutex_lock_backout(pthread_t); void _mutex_notify_priochange(pthread_t); int _mutex_reinit(pthread_mutex_t *); void _mutex_unlock_private(pthread_t); int _cond_reinit(pthread_cond_t *); int _pq_alloc(struct pq_queue *, int, int); int _pq_init(struct pq_queue *); void _pq_remove(struct pq_queue *pq, struct pthread *); void _pq_insert_head(struct pq_queue *pq, struct pthread *); void _pq_insert_tail(struct pq_queue *pq, struct pthread *); struct pthread *_pq_first(struct pq_queue *pq); void _waitq_insert(pthread_t pthread); void _waitq_remove(pthread_t pthread); #if defined(_PTHREADS_INVARIANTS) void _waitq_setactive(void); void _waitq_clearactive(void); #endif void _thread_exit(char *, int, char *); void _thread_exit_cleanup(void); -void _thread_exit_finish(void); void _thread_fd_unlock(int, int); void _thread_fd_unlock_debug(int, int, char *, int); void _thread_fd_unlock_owned(pthread_t); void *_thread_cleanup(pthread_t); void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_init(void); void _thread_kern_sched(ucontext_t *); void _thread_kern_scheduler(void); -void _thread_kern_sched_frame(int frame); +void _thread_kern_sched_frame(struct pthread_signal_frame *psf); void _thread_kern_sched_sig(void); void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno); void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno); void _thread_kern_set_timeout(const struct timespec *); void _thread_kern_sig_defer(void); void _thread_kern_sig_undefer(void); void _thread_sig_handler(int, siginfo_t *, ucontext_t *); void _thread_sig_check_pending(pthread_t pthread); void _thread_sig_handle_pending(void); void _thread_sig_send(pthread_t pthread, int sig); void _thread_sig_wrapper(void); -int _thread_sigframe_find(pthread_t pthread, void *stackp); +void _thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf); void _thread_start(void); void _thread_seterrno(pthread_t, int); int _thread_fd_table_init(int fd); pthread_addr_t _thread_gc(pthread_addr_t); void _thread_enter_cancellation_point(void); void _thread_leave_cancellation_point(void); void _thread_cancellation_point(void); /* #include */ int _thread_sys_sigaction(int, const struct sigaction *, struct sigaction *); int _thread_sys_sigpending(sigset_t *); int _thread_sys_sigprocmask(int, const sigset_t *, sigset_t *); int _thread_sys_sigsuspend(const sigset_t *); int _thread_sys_siginterrupt(int, int); int _thread_sys_sigpause(int); int _thread_sys_sigreturn(ucontext_t *); +int _thread_sys_sigaltstack(const struct sigaltstack *, struct sigstack *); int _thread_sys_sigstack(const struct sigstack *, struct sigstack *); int _thread_sys_sigvec(int, struct sigvec *, struct sigvec *); void _thread_sys_psignal(unsigned int, const char *); void (*_thread_sys_signal(int, void (*)(int)))(int); /* #include */ #ifdef _SYS_STAT_H_ int _thread_sys_fchmod(int, mode_t); int _thread_sys_fstat(int, struct stat *); int _thread_sys_fchflags(int, u_long); #endif /* #include */ #ifdef _SYS_MOUNT_H_ int _thread_sys_fstatfs(int, struct statfs *); #endif int _thread_sys_pipe(int *); /* #include */ #ifdef _SYS_SOCKET_H_ int _thread_sys_accept(int, struct sockaddr *, int *); int _thread_sys_bind(int, const struct sockaddr *, int); int _thread_sys_connect(int, const struct sockaddr *, int); int _thread_sys_getpeername(int, struct sockaddr *, int *); int _thread_sys_getsockname(int, struct sockaddr *, int *); int _thread_sys_getsockopt(int, int, int, void *, int *); int _thread_sys_listen(int, int); int _thread_sys_setsockopt(int, int, int, const void *, int); int _thread_sys_shutdown(int, int); int _thread_sys_socket(int, int, int); int _thread_sys_socketpair(int, int, int, int *); ssize_t _thread_sys_recv(int, void *, size_t, int); ssize_t _thread_sys_recvfrom(int, void *, size_t, int, struct sockaddr *, int *); ssize_t _thread_sys_recvmsg(int, struct msghdr *, int); ssize_t _thread_sys_send(int, const void *, size_t, int); ssize_t _thread_sys_sendmsg(int, const struct msghdr *, int); ssize_t _thread_sys_sendto(int, const void *,size_t, int, const struct sockaddr *, int); #endif /* #include */ #ifdef _STDIO_H_ FILE *_thread_sys_fdopen(int, const char *); FILE *_thread_sys_fopen(const char *, const char *); FILE *_thread_sys_freopen(const char *, const char *, FILE *); FILE *_thread_sys_popen(const char *, const char *); FILE *_thread_sys_tmpfile(void); char *_thread_sys_ctermid(char *); char *_thread_sys_cuserid(char *); char *_thread_sys_fgetln(FILE *, size_t *); char *_thread_sys_fgets(char *, int, FILE *); char *_thread_sys_gets(char *); char *_thread_sys_tempnam(const char *, const char *); char *_thread_sys_tmpnam(char *); int _thread_sys_fclose(FILE *); int _thread_sys_feof(FILE *); int _thread_sys_ferror(FILE *); int _thread_sys_fflush(FILE *); int _thread_sys_fgetc(FILE *); int _thread_sys_fgetpos(FILE *, fpos_t *); int _thread_sys_fileno(FILE *); int _thread_sys_fprintf(FILE *, const char *, ...); int _thread_sys_fpurge(FILE *); int _thread_sys_fputc(int, FILE *); int _thread_sys_fputs(const char *, FILE *); int _thread_sys_fscanf(FILE *, const char *, ...); int _thread_sys_fseek(FILE *, long, int); int _thread_sys_fsetpos(FILE *, const fpos_t *); int _thread_sys_getc(FILE *); int _thread_sys_getchar(void); int _thread_sys_getw(FILE *); int _thread_sys_pclose(FILE *); int _thread_sys_printf(const char *, ...); int _thread_sys_putc(int, FILE *); int _thread_sys_putchar(int); int _thread_sys_puts(const char *); int _thread_sys_putw(int, FILE *); int _thread_sys_remove(const char *); int _thread_sys_rename (const char *, const char *); int _thread_sys_scanf(const char *, ...); int _thread_sys_setlinebuf(FILE *); int _thread_sys_setvbuf(FILE *, char *, int, size_t); int _thread_sys_snprintf(char *, size_t, const char *, ...); int _thread_sys_sprintf(char *, const char *, ...); int _thread_sys_sscanf(const char *, const char *, ...); int _thread_sys_ungetc(int, FILE *); int _thread_sys_vfprintf(FILE *, const char *, _BSD_VA_LIST_); int _thread_sys_vprintf(const char *, _BSD_VA_LIST_); int _thread_sys_vscanf(const char *, _BSD_VA_LIST_); int _thread_sys_vsnprintf(char *, size_t, const char *, _BSD_VA_LIST_); int _thread_sys_vsprintf(char *, const char *, _BSD_VA_LIST_); int _thread_sys_vsscanf(const char *, const char *, _BSD_VA_LIST_); long _thread_sys_ftell(FILE *); size_t _thread_sys_fread(void *, size_t, size_t, FILE *); size_t _thread_sys_fwrite(const void *, size_t, size_t, FILE *); void _thread_sys_clearerr(FILE *); void _thread_sys_perror(const char *); void _thread_sys_rewind(FILE *); void _thread_sys_setbuf(FILE *, char *); void _thread_sys_setbuffer(FILE *, char *, int); #endif /* #include */ #ifdef _UNISTD_H_ char *_thread_sys_ttyname(int); int _thread_sys_close(int); int _thread_sys_dup(int); int _thread_sys_dup2(int, int); int _thread_sys_exect(const char *, char * const *, char * const *); int _thread_sys_execve(const char *, char * const *, char * const *); int _thread_sys_fchdir(int); int _thread_sys_fchown(int, uid_t, gid_t); int _thread_sys_fsync(int); int _thread_sys_ftruncate(int, off_t); int _thread_sys_pause(void); int _thread_sys_pipe(int *); int _thread_sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); long _thread_sys_fpathconf(int, int); off_t _thread_sys_lseek(int, off_t, int); pid_t _thread_sys_fork(void); pid_t _thread_sys_tcgetpgrp(int); ssize_t _thread_sys_read(int, void *, size_t); ssize_t _thread_sys_write(int, const void *, size_t); void _thread_sys__exit(int); #endif /* #include */ #ifdef _SYS_FCNTL_H_ int _thread_sys_creat(const char *, mode_t); int _thread_sys_fcntl(int, int, ...); int _thread_sys_flock(int, int); int _thread_sys_open(const char *, int, ...); #endif /* #include */ #ifdef _SYS_IOCTL_H_ int _thread_sys_ioctl(int, unsigned long, ...); #endif /* #include */ #ifdef _DIRENT_H_ DIR *___thread_sys_opendir2(const char *, int); DIR *_thread_sys_opendir(const char *); int _thread_sys_alphasort(const void *, const void *); int _thread_sys_scandir(const char *, struct dirent ***, int (*)(struct dirent *), int (*)(const void *, const void *)); int _thread_sys_closedir(DIR *); int _thread_sys_getdirentries(int, char *, int, long *); long _thread_sys_telldir(const DIR *); struct dirent *_thread_sys_readdir(DIR *); void _thread_sys_rewinddir(DIR *); void _thread_sys_seekdir(DIR *, long); #endif /* #include */ #ifdef _SYS_UIO_H_ ssize_t _thread_sys_readv(int, const struct iovec *, int); ssize_t _thread_sys_writev(int, const struct iovec *, int); #endif /* #include */ #ifdef WNOHANG pid_t _thread_sys_wait(int *); pid_t _thread_sys_waitpid(pid_t, int *, int); pid_t _thread_sys_wait3(int *, int, struct rusage *); pid_t _thread_sys_wait4(pid_t, int *, int, struct rusage *); #endif /* #include */ #ifdef _SYS_POLL_H_ int _thread_sys_poll(struct pollfd *, unsigned, int); #endif /* #include */ #ifdef _SYS_MMAN_H_ int _thread_sys_msync(void *, size_t, int); #endif /* #include */ #ifdef _SETJMP_H_ extern void __siglongjmp(sigjmp_buf, int) __dead2; extern void __longjmp(jmp_buf, int) __dead2; extern void ___longjmp(jmp_buf, int) __dead2; #endif __END_DECLS #endif /* !_PTHREAD_PRIVATE_H */ Index: head/lib/libpthread/thread/thr_sig.c =================================================================== --- head/lib/libpthread/thread/thr_sig.c (revision 68515) +++ head/lib/libpthread/thread/thr_sig.c (revision 68516) @@ -1,1267 +1,1116 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" /* Prototypes: */ static void thread_sig_add(pthread_t pthread, int sig, int has_args); static void thread_sig_check_state(pthread_t pthread, int sig); static pthread_t thread_sig_find(int sig); static void thread_sig_handle_special(int sig); static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp); -static void thread_sigframe_add(pthread_t thread, int sig); -static void thread_sigframe_leave(pthread_t thread, int frame); -static void thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf); +static void thread_sigframe_add(pthread_t thread, int sig, int has_args); static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf); /* #define DEBUG_SIGNAL */ #ifdef DEBUG_SIGNAL #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif #if defined(_PTHREADS_INVARIANTS) #define SIG_SET_ACTIVE() _sig_in_handler = 1 #define SIG_SET_INACTIVE() _sig_in_handler = 0 #else #define SIG_SET_ACTIVE() #define SIG_SET_INACTIVE() #endif void _thread_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) { - pthread_t pthread; - int current_frame; + pthread_t pthread, pthread_h; + void *stackp; + int in_sched = 0; char c; if (ucp == NULL) PANIC("Thread signal handler received null context"); DBG_MSG("Got signal %d, current thread %p\n", sig, _thread_run); + if (_thread_kern_in_sched != 0) + in_sched = 1; + else { + stackp = (void *)GET_STACK_UC(ucp); + if ((stackp >= _thread_kern_sched_stack) && + (stackp <= _thread_kern_sched_stack + SCHED_STACK_SIZE)) + in_sched = 1; + } /* Check if an interval timer signal: */ if (sig == _SCHED_SIGNAL) { /* Update the scheduling clock: */ gettimeofday((struct timeval *)&_sched_tod, NULL); _sched_ticks++; - if (_thread_kern_in_sched != 0) { + if (in_sched != 0) { /* * The scheduler is already running; ignore this * signal. */ } /* * Check if the scheduler interrupt has come when * the currently running thread has deferred thread * signals. */ else if (_thread_run->sig_defer_count > 0) _thread_run->yield_on_sig_undefer = 1; else { /* * Save the context of the currently running thread: */ thread_sig_savecontext(_thread_run, ucp); /* * Schedule the next thread. This function is not * expected to return because it will do a longjmp - * instead. + * instead. */ _thread_kern_sched(ucp); /* * This point should not be reached, so abort the - * process: + * process: */ PANIC("Returned to signal function from scheduler"); } } /* * Check if the kernel has been interrupted while the scheduler * is accessing the scheduling queues or if there is a currently * running thread that has deferred signals. */ - else if ((_thread_kern_in_sched != 0) || - (_thread_run->sig_defer_count > 0)) { + else if ((in_sched != 0) || (_thread_run->sig_defer_count > 0)) { /* Cast the signal number to a character variable: */ c = sig; /* * Write the signal number to the kernel pipe so that it will * be ready to read when this signal handler returns. */ if (_queue_signals != 0) { _thread_sys_write(_thread_kern_pipe[1], &c, 1); DBG_MSG("Got signal %d, queueing to kernel pipe\n", sig); } if (_thread_sigq[sig - 1].blocked == 0) { DBG_MSG("Got signal %d, adding to _thread_sigq\n", sig); /* * Do not block this signal; it will be blocked * when the pending signals are run down. */ /* _thread_sigq[sig - 1].blocked = 1; */ /* * Queue the signal, saving siginfo and sigcontext * (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); /* Indicate that there are queued signals: */ _thread_sigq[sig - 1].pending = 1; _sigq_check_reqd = 1; } /* These signals need special handling: */ else if (sig == SIGCHLD || sig == SIGTSTP || sig == SIGTTIN || sig == SIGTTOU) { _thread_sigq[sig - 1].pending = 1; _thread_sigq[sig - 1].signo = sig; _sigq_check_reqd = 1; } else DBG_MSG("Got signal %d, ignored.\n", sig); } /* * The signal handlers should have been installed so that they * cannot be interrupted by other signals. */ else if (_thread_sigq[sig - 1].blocked == 0) { - /* The signal is not blocked; handle the signal: */ - current_frame = _thread_run->sigframe_count; - /* + * The signal is not blocked; handle the signal. + * * Ignore subsequent occurrences of this signal * until the current signal is handled: */ _thread_sigq[sig - 1].blocked = 1; /* This signal will be handled; clear the pending flag: */ _thread_sigq[sig - 1].pending = 0; /* * Save siginfo and sigcontext (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); SIG_SET_ACTIVE(); /* Handle special signals: */ thread_sig_handle_special(sig); + pthread_h = NULL; if ((pthread = thread_sig_find(sig)) != NULL) { DBG_MSG("Got signal %d, adding frame to thread %p\n", sig, pthread); /* * A thread was found that can handle the signal. * Save the context of the currently running thread * so that we can switch to another thread without * losing track of where the current thread left off. * This also applies if the current thread is the * thread to be signaled. */ thread_sig_savecontext(_thread_run, ucp); /* Setup the target thread to receive the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); /* Take a peek at the next ready to run thread: */ - pthread = PTHREAD_PRIOQ_FIRST(); + pthread_h = PTHREAD_PRIOQ_FIRST(); DBG_MSG("Finished adding frame, head of prio list %p\n", - pthread); + pthread_h); } else DBG_MSG("No thread to handle signal %d\n", sig); SIG_SET_INACTIVE(); /* * Switch to a different context if the currently running * thread takes a signal, or if another thread takes a * signal and the currently running thread is not in a * signal handler. */ - if ((_thread_run->sigframe_count > current_frame) || - ((pthread != NULL) && - (pthread->active_priority > _thread_run->active_priority))) { + if ((pthread == _thread_run) || ((pthread_h != NULL) && + (pthread_h->active_priority > _thread_run->active_priority))) { /* Enter the kernel scheduler: */ - DBG_MSG("Entering scheduler from signal handler\n"); _thread_kern_sched(ucp); } } else { SIG_SET_ACTIVE(); thread_sig_handle_special(sig); SIG_SET_INACTIVE(); } } static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp) { - struct pthread_signal_frame *psf; + memcpy(&pthread->ctx.uc, ucp, sizeof(*ucp)); - psf = _thread_run->curframe; - - memcpy(&psf->ctx.uc, ucp, sizeof(*ucp)); - /* XXX - Save FP registers too? */ - FP_SAVE_UC(&psf->ctx.uc); + FP_SAVE_UC(&pthread->ctx.uc); /* Mark the context saved as a ucontext: */ - psf->ctxtype = CTX_UC; + pthread->ctxtype = CTX_UC; } /* * Find a thread that can handle the signal. */ pthread_t thread_sig_find(int sig) { int handler_installed; pthread_t pthread, pthread_next; pthread_t suspended_thread, signaled_thread; DBG_MSG("Looking for thread to handle signal %d\n", sig); /* Check if the signal requires a dump of thread information: */ - if (sig == SIGINFO) + if (sig == SIGINFO) { /* Dump thread information to file: */ _thread_dump_info(); + /* Unblock this signal to allow further dumps: */ + _thread_sigq[sig - 1].blocked = 0; + } /* Check if an interval timer signal: */ else if (sig == _SCHED_SIGNAL) { /* * This shouldn't ever occur (should this panic?). */ } else { /* * Enter a loop to look for threads that have the signal * unmasked. POSIX specifies that a thread in a sigwait * will get the signal over any other threads. Second * preference will be threads in in a sigsuspend. Third * preference will be the current thread. If none of the * above, then the signal is delivered to the first thread * that is found. Note that if a custom handler is not * installed, the signal only affects threads in sigwait. */ suspended_thread = NULL; if ((_thread_run != &_thread_kern_thread) && !sigismember(&_thread_run->sigmask, sig)) signaled_thread = _thread_run; else signaled_thread = NULL; if ((_thread_sigact[sig - 1].sa_handler == SIG_IGN) || (_thread_sigact[sig - 1].sa_handler == SIG_DFL)) handler_installed = 0; else handler_installed = 1; for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly destroying * the link entry. */ pthread_next = TAILQ_NEXT(pthread, pqe); if ((pthread->state == PS_SIGWAIT) && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* * A signal handler is not invoked for threads * in sigwait. Clear the blocked and pending * flags. - */ + */ _thread_sigq[sig - 1].blocked = 0; _thread_sigq[sig - 1].pending = 0; /* Return the signal number: */ pthread->signo = sig; /* * POSIX doesn't doesn't specify which thread * will get the signal if there are multiple * waiters, so we give it to the first thread * we find. * * Do not attempt to deliver this signal * to other threads and do not add the signal * to the process pending set. */ return (NULL); } else if ((handler_installed != 0) && !sigismember(&pthread->sigmask, sig)) { if (pthread->state == PS_SIGSUSPEND) { if (suspended_thread == NULL) suspended_thread = pthread; } else if (signaled_thread == NULL) signaled_thread = pthread; } } /* * Only perform wakeups and signal delivery if there is a * custom handler installed: */ if (handler_installed == 0) { /* * There is no handler installed. Unblock the * signal so that if a handler _is_ installed, any * subsequent signals can be handled. */ _thread_sigq[sig - 1].blocked = 0; } else { /* * If we didn't find a thread in the waiting queue, * check the all threads queue: */ if (suspended_thread == NULL && signaled_thread == NULL) { /* * Enter a loop to look for other threads - * capable of receiving the signal: + * capable of receiving the signal: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { if (!sigismember(&pthread->sigmask, sig)) { signaled_thread = pthread; break; } } } if (suspended_thread == NULL && signaled_thread == NULL) /* * Add it to the set of signals pending * on the process: */ sigaddset(&_process_sigpending, sig); else { /* * We only deliver the signal to one thread; * give preference to the suspended thread: */ if (suspended_thread != NULL) pthread = suspended_thread; else pthread = signaled_thread; return (pthread); } } } /* Returns nothing. */ return (NULL); } void _thread_sig_check_pending(pthread_t pthread) { sigset_t sigset; int i; /* * Check if there are pending signals for the running * thread or process that aren't blocked: */ sigset = pthread->sigpend; SIGSETOR(sigset, _process_sigpending); SIGSETNAND(sigset, pthread->sigmask); if (SIGNOTEMPTY(sigset)) { for (i = 1; i < NSIG; i++) { if (sigismember(&sigset, i) != 0) { if (sigismember(&pthread->sigpend, i) != 0) thread_sig_add(pthread, i, /*has_args*/ 0); else { thread_sig_add(pthread, i, /*has_args*/ 1); sigdelset(&_process_sigpending, i); } } } } } /* * This can only be called from the kernel scheduler. It assumes that * all thread contexts are saved and that a signal frame can safely be * added to any user thread. */ void _thread_sig_handle_pending(void) { pthread_t pthread; int i, sig; PTHREAD_ASSERT(_thread_kern_in_sched != 0, "_thread_sig_handle_pending called from outside kernel schedule"); /* * Check the array of pending signals: */ for (i = 0; i < NSIG; i++) { if (_thread_sigq[i].pending != 0) { /* This signal is no longer pending. */ _thread_sigq[i].pending = 0; sig = _thread_sigq[i].signo; /* Some signals need special handling: */ thread_sig_handle_special(sig); if (_thread_sigq[i].blocked == 0) { /* * Block future signals until this one * is handled: */ _thread_sigq[i].blocked = 1; if ((pthread = thread_sig_find(sig)) != NULL) { /* * Setup the target thread to receive * the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); } } } } } static void thread_sig_handle_special(int sig) { pthread_t pthread, pthread_next; int i; switch (sig) { case SIGCHLD: /* * Go through the file list and set all files * to non-blocking again in case the child * set some of them to block. Sigh. */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file is used: */ if (_thread_fd_table[i] != NULL) { /* * Set the file descriptor to non-blocking: */ _thread_sys_fcntl(i, F_SETFL, _thread_fd_table[i]->flags | O_NONBLOCK); } } /* * Enter a loop to wake up all threads waiting * for a process to complete: */ for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly * destroying the link entry: */ pthread_next = TAILQ_NEXT(pthread, pqe); /* * If this thread is waiting for a child * process to complete, wake it up: */ if (pthread->state == PS_WAIT_WAIT) { /* Make the thread runnable: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } } break; /* * POSIX says that pending SIGCONT signals are * discarded when one of these signals occurs. */ case SIGTSTP: case SIGTTIN: case SIGTTOU: /* * Enter a loop to discard pending SIGCONT * signals: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { sigdelset(&pthread->sigpend, SIGCONT); } break; default: break; } } /* * Perform thread specific actions in response to a signal. * This function is only called if there is a handler installed * for the signal, and if the target thread has the signal * unmasked. */ static void thread_sig_add(pthread_t pthread, int sig, int has_args) { - int restart, frame; - int block_signals = 0; + int restart; int suppress_handler = 0; restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: /* * You can't call a signal handler for threads in these * states. */ suppress_handler = 1; break; /* * States which do not need any cleanup handling when signals * occur: */ case PS_RUNNING: /* * Remove the thread from the queue before changing its * priority: */ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) PTHREAD_PRIOQ_REMOVE(pthread); break; case PS_SUSPENDED: break; case PS_SPINBLOCK: /* Remove the thread from the workq and waitq: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); /* Make the thread runnable: */ PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGWAIT: /* The signal handler is not called for threads in SIGWAIT. */ suppress_handler = 1; /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* Change the state of the thread to run: */ PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else { /* * Mark the thread as interrupted only if the * restart flag is not set on the signal action: */ if (restart == 0) pthread->interrupted = 1; PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); } break; /* * States which cannot be interrupted but still require the * signal handler to run: */ - case PS_COND_WAIT: case PS_JOIN: + /* Only set the interrupted flag for PS_JOIN: */ + pthread->interrupted = 1; + /* FALLTHROUGH */ + case PS_COND_WAIT: case PS_MUTEX_WAIT: /* * Remove the thread from the wait queue. It will * be added back to the wait queue once all signal * handlers have been invoked. */ PTHREAD_WAITQ_REMOVE(pthread); break; /* * States which are interruptible but may need to be removed * from queues before any signal handler is called. * * XXX - We may not need to handle this condition, but will * mark it as a potential problem. */ case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: if (restart == 0) pthread->interrupted = 1; /* * Remove the thread from the wait queue. Our * signal handler hook will remove this thread * from the fd or file queue before invoking * the actual handler. */ PTHREAD_WAITQ_REMOVE(pthread); - /* - * To ensure the thread is removed from the fd and file - * queues before any other signal interrupts it, set the - * signal mask to block all signals. As soon as the thread - * is removed from the queue the signal mask will be - * restored. - */ - block_signals = 1; break; /* * States which are interruptible: */ case PS_FDR_WAIT: case PS_FDW_WAIT: if (restart == 0) { /* * Flag the operation as interrupted and * set the state to running: */ pthread->interrupted = 1; PTHREAD_SET_STATE(pthread, PS_RUNNING); } PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); break; case PS_POLL_WAIT: case PS_SELECT_WAIT: case PS_SLEEP_WAIT: /* * Unmasked signals always cause poll, select, and sleep * to terminate early, regardless of SA_RESTART: */ pthread->interrupted = 1; /* Remove threads in poll and select from the workq: */ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGSUSPEND: PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; } if (suppress_handler == 0) { + /* Setup a signal frame and save the current threads state: */ + thread_sigframe_add(pthread, sig, has_args); + /* - * Save the current state of the thread and add a - * new signal frame. + * Signals are deferred until just before the threads + * signal handler is invoked: */ - frame = pthread->sigframe_count; - thread_sigframe_save(pthread, pthread->curframe); - thread_sigframe_add(pthread, sig); - pthread->sigframes[frame + 1]->sig_has_args = has_args; - SIGSETOR(pthread->sigmask, _thread_sigact[sig - 1].sa_mask); - if (block_signals != 0) { - /* Save the signal mask and block all signals: */ - pthread->sigframes[frame + 1]->saved_state.psd_sigmask = - pthread->sigmask; - sigfillset(&pthread->sigmask); - } - + pthread->sig_defer_count = 1; + /* Make sure the thread is runnable: */ if (pthread->state != PS_RUNNING) PTHREAD_SET_STATE(pthread, PS_RUNNING); /* * The thread should be removed from all scheduling * queues at this point. Raise the priority and place * the thread in the run queue. */ pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY; if (pthread != _thread_run) PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } static void thread_sig_check_state(pthread_t pthread, int sig) { /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: case PS_RUNNING: case PS_SUSPENDED: case PS_SPINBLOCK: case PS_COND_WAIT: case PS_JOIN: case PS_MUTEX_WAIT: break; case PS_SIGWAIT: /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* * Remove the thread from the wait queue and * make it runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } break; case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_SIGSUSPEND: case PS_SLEEP_WAIT: /* * Remove the thread from the wait queue and make it * runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; /* * These states are additionally in the work queue: */ case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_FILE_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* * Remove the thread from the wait and work queues, and * make it runnable: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; } } /* * Send a signal to a specific thread (ala pthread_kill): */ void _thread_sig_send(pthread_t pthread, int sig) { /* Check for signals whose actions are SIG_DFL: */ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) { /* * Check to see if a temporary signal handler is * installed for sigwaiters: */ if (_thread_dfl_count[sig] == 0) /* * Deliver the signal to the process if a handler * is not installed: */ kill(getpid(), sig); /* * Assuming we're still running after the above kill(), * make any necessary state changes to the thread: */ thread_sig_check_state(pthread, sig); } /* * Check that the signal is not being ignored: */ else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) { if (pthread->state == PS_SIGWAIT && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else if (pthread == _thread_run) { /* Add the signal to the pending set: */ sigaddset(&pthread->sigpend, sig); if (!sigismember(&pthread->sigmask, sig)) { /* * Call the kernel scheduler which will safely * install a signal frame for this thread: */ _thread_kern_sched_sig(); } } else if (!sigismember(&pthread->sigmask, sig)) { /* Protect the scheduling queues: */ _thread_kern_sig_defer(); /* * Perform any state changes due to signal * arrival: */ thread_sig_add(pthread, sig, /* has args */ 0); /* Unprotect the scheduling queues: */ _thread_kern_sig_undefer(); } else { /* Increment the pending signal count. */ sigaddset(&pthread->sigpend,sig); } } } /* * User thread signal handler wrapper. * * thread - current running thread */ void _thread_sig_wrapper(void) { void (*sigfunc)(int, siginfo_t *, void *); struct pthread_signal_frame *psf; - pthread_t thread; - int dead = 0; - int i, sig, has_args; - int frame, dst_frame; + pthread_t thread; thread = _thread_run; /* Get the current frame and state: */ - frame = thread->sigframe_count; - PTHREAD_ASSERT(frame > 0, "Invalid signal frame in signal handler"); psf = thread->curframe; + thread->curframe = NULL; + PTHREAD_ASSERT(psf != NULL, "Invalid signal frame in signal handler"); - /* Check the threads previous state: */ + /* Check the threads previous state: */ if (psf->saved_state.psd_state != PS_RUNNING) { /* * Do a little cleanup handling for those threads in * queues before calling the signal handler. Signals * for these threads are temporarily blocked until * after cleanup handling. */ switch (psf->saved_state.psd_state) { case PS_FDLR_WAIT: case PS_FDLW_WAIT: _fd_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; - /* Reenable signals: */ - thread->sigmask = psf->saved_state.psd_sigmask; break; case PS_FILE_WAIT: _flockfile_backout(thread); psf->saved_state.psd_state = PS_RUNNING; - /* Reenable signals: */ - thread->sigmask = psf->saved_state.psd_sigmask; break; + case PS_COND_WAIT: + _cond_wait_backout(thread); + psf->saved_state.psd_state = PS_RUNNING; + break; + + case PS_JOIN: + _join_backout(thread); + psf->saved_state.psd_state = PS_RUNNING; + break; + + case PS_MUTEX_WAIT: + _mutex_lock_backout(thread); + psf->saved_state.psd_state = PS_RUNNING; + break; + default: break; } } + /* Unblock the signal in case we don't return from the handler: */ + _thread_sigq[psf->signo - 1].blocked = 0; + /* - * Unless the thread exits or longjmps out of the signal handler, - * return to the previous frame: + * Lower the priority before calling the handler in case + * it never returns (longjmps back): */ - dst_frame = frame - 1; + thread->active_priority &= ~PTHREAD_SIGNAL_PRIORITY; /* + * Reenable interruptions without checking for the need to + * context switch: + */ + thread->sig_defer_count = 0; + + /* * Check that a custom handler is installed and if the signal * is not blocked: */ sigfunc = _thread_sigact[psf->signo - 1].sa_sigaction; if (((__sighandler_t *)sigfunc != SIG_DFL) && ((__sighandler_t *)sigfunc != SIG_IGN)) { + DBG_MSG("_thread_sig_wrapper: Calling signal handler for " + "thread 0x%p\n", thread); /* - * The signal jump buffer is allocated off the stack. - * If the signal handler tries to [_][sig]longjmp() or - * setcontext(), our wrapped versions of these routines - * will copy the user supplied jump buffer or context - * to the destination signal frame, set the destination - * signal frame in psf->dst_frame, and _longjmp() back - * to here. + * Dispatch the signal via the custom signal + * handler: */ - jmp_buf jb; - - /* - * Set up the context for abnormal returns out of signal - * handlers. - */ - psf->sig_jb = &jb; - if (_setjmp(jb) == 0) { - DBG_MSG("_thread_sig_wrapper: Entering frame %d, " - "stack 0x%lx\n", frame, GET_STACK_JB(jb)); - /* - * Invalidate the destination frame before calling - * the signal handler. - */ - psf->dst_frame = -1; - - /* - * Dispatch the signal via the custom signal - * handler: - */ - if (psf->sig_has_args == 0) - (*(sigfunc))(psf->signo, NULL, NULL); - else if ((_thread_sigact[psf->signo - 1].sa_flags & - SA_SIGINFO) != 0) - (*(sigfunc))(psf->signo, - &_thread_sigq[psf->signo - 1].siginfo, - &_thread_sigq[psf->signo - 1].uc); - else - (*(sigfunc))(psf->signo, - (siginfo_t *)_thread_sigq[psf->signo - 1].siginfo.si_code, - &_thread_sigq[psf->signo - 1].uc); - } - else { - /* - * The return from _setjmp() should only be non-zero - * when the signal handler wants to xxxlongjmp() or - * setcontext() to a different context, or if the - * thread has exited (via pthread_exit). - */ - /* - * Grab a copy of the destination frame before it - * gets clobbered after unwinding. - */ - dst_frame = psf->dst_frame; - DBG_MSG("Abnormal exit from handler for signal %d, " - "frame %d\n", psf->signo, frame); - - /* Has the thread exited? */ - if ((dead = thread->flags & PTHREAD_EXITING) != 0) - /* When exiting, unwind to frame 0. */ - dst_frame = 0; - else if ((dst_frame < 0) || (dst_frame > frame)) - PANIC("Attempt to unwind to invalid " - "signal frame"); - - /* Unwind to the target frame: */ - for (i = frame; i > dst_frame; i--) { - DBG_MSG("Leaving frame %d, signal %d\n", i, - thread->sigframes[i]->signo); - /* Leave the current signal frame: */ - thread_sigframe_leave(thread, i); - - /* - * Save whatever is needed out of the state - * data; as soon as the frame count is - * is decremented, another signal can arrive - * and corrupt this view of the state data. - */ - sig = thread->sigframes[i]->signo; - has_args = thread->sigframes[i]->sig_has_args; - - /* - * We're done with this signal frame: - */ - thread->curframe = thread->sigframes[i - 1]; - thread->sigframe_count = i - 1; - - /* - * Only unblock the signal if it was a - * process signal as opposed to a signal - * generated by pthread_kill(). - */ - if (has_args != 0) - _thread_sigq[sig - 1].blocked = 0; - } - } + if (psf->sig_has_args == 0) + (*(sigfunc))(psf->signo, NULL, NULL); + else if ((_thread_sigact[psf->signo - 1].sa_flags & + SA_SIGINFO) != 0) + (*(sigfunc))(psf->signo, &psf->siginfo, &psf->uc); + else + (*(sigfunc))(psf->signo, + (siginfo_t *)psf->siginfo.si_code, &psf->uc); } - /* - * Call the kernel scheduler to schedule the next - * thread. + * Call the kernel scheduler to safely restore the frame and + * schedule the next thread: */ - if (dead == 0) { - /* Restore the threads state: */ - thread_sigframe_restore(thread, thread->sigframes[dst_frame]); - _thread_kern_sched_frame(dst_frame); - } - else { - PTHREAD_ASSERT(dst_frame == 0, - "Invalid signal frame for dead thread"); - - /* Perform any necessary cleanup before exiting. */ - thread_sigframe_leave(thread, 0); - - /* This should never return: */ - _thread_exit_finish(); - PANIC("Return from _thread_exit_finish in signal wrapper"); - } + _thread_kern_sched_frame(psf); } static void -thread_sigframe_add(pthread_t thread, int sig) +thread_sigframe_add(pthread_t thread, int sig, int has_args) { + struct pthread_signal_frame *psf = NULL; unsigned long stackp = 0; /* Get the top of the threads stack: */ - switch (thread->curframe->ctxtype) { + switch (thread->ctxtype) { case CTX_JB: case CTX_JB_NOSIG: - stackp = GET_STACK_JB(thread->curframe->ctx.jb); + stackp = GET_STACK_JB(thread->ctx.jb); break; case CTX_SJB: - stackp = GET_STACK_SJB(thread->curframe->ctx.sigjb); + stackp = GET_STACK_SJB(thread->ctx.sigjb); break; case CTX_UC: - stackp = GET_STACK_UC(&thread->curframe->ctx.uc); + stackp = GET_STACK_UC(&thread->ctx.uc); break; default: PANIC("Invalid thread context type"); break; } /* * Leave a little space on the stack and round down to the * nearest aligned word: */ stackp -= sizeof(double); stackp &= ~0x3UL; /* Allocate room on top of the stack for a new signal frame: */ stackp -= sizeof(struct pthread_signal_frame); - /* Set up the new frame: */ - thread->sigframe_count++; - thread->sigframes[thread->sigframe_count] = - (struct pthread_signal_frame *) stackp; - thread->curframe = thread->sigframes[thread->sigframe_count]; - thread->curframe->stackp = stackp; - thread->curframe->ctxtype = CTX_JB_NOSIG; - thread->curframe->longjmp_val = 1; - thread->curframe->signo = sig; + psf = (struct pthread_signal_frame *) stackp; - /* - * Set up the context: - */ - _setjmp(thread->curframe->ctx.jb); - SET_STACK_JB(thread->curframe->ctx.jb, stackp); - SET_RETURN_ADDR_JB(thread->curframe->ctx.jb, _thread_sig_wrapper); -} + /* Save the current context in the signal frame: */ + thread_sigframe_save(thread, psf); -/* - * Locate the signal frame from the specified stack pointer. - */ -int -_thread_sigframe_find(pthread_t pthread, void *stackp) -{ - int frame; + /* Set handler specific information: */ + psf->sig_has_args = has_args; + psf->signo = sig; + if (has_args) { + /* Copy the signal handler arguments to the signal frame: */ + memcpy(&psf->uc, &_thread_sigq[psf->signo - 1].uc, + sizeof(psf->uc)); + memcpy(&psf->siginfo, &_thread_sigq[psf->signo - 1].siginfo, + sizeof(psf->siginfo)); + } + /* Set up the new frame: */ + thread->curframe = psf; + thread->ctxtype = CTX_JB_NOSIG; + thread->longjmp_val = 1; + thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE | + PTHREAD_FLAGS_IN_SYNCQ; /* - * Find the destination of the target frame based on the - * given stack pointer. + * Set up the context: */ - for (frame = pthread->sigframe_count; frame >= 0; frame--) { - if (stackp < (void *)pthread->sigframes[frame]->stackp) - break; - } - return (frame); + stackp += sizeof(double); + _setjmp(thread->ctx.jb); + SET_STACK_JB(thread->ctx.jb, stackp); + SET_RETURN_ADDR_JB(thread->ctx.jb, _thread_sig_wrapper); } - + void -thread_sigframe_leave(pthread_t thread, int frame) +_thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf) { - struct pthread_state_data *psd; - - psd = &thread->sigframes[frame]->saved_state; - + thread->ctxtype = psf->ctxtype; + memcpy(&thread->ctx.uc, &psf->ctx.uc, sizeof(thread->ctx.uc)); /* - * Perform any necessary cleanup for this signal frame: + * Only restore the signal mask if it hasn't been changed + * by the application during invocation of the signal handler: */ - switch (psd->psd_state) { - case PS_DEAD: - case PS_DEADLOCK: - case PS_RUNNING: - case PS_SIGTHREAD: - case PS_STATE_MAX: - case PS_SUSPENDED: - break; - - /* - * Threads in the following states need to be removed - * from queues. - */ - case PS_COND_WAIT: - _cond_wait_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_FDLR_WAIT: - case PS_FDLW_WAIT: - _fd_lock_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_FILE_WAIT: - _flockfile_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_JOIN: - _join_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_MUTEX_WAIT: - _mutex_lock_backout(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) - PTHREAD_WAITQ_REMOVE(thread); - break; - - case PS_FDR_WAIT: - case PS_FDW_WAIT: - case PS_POLL_WAIT: - case PS_SELECT_WAIT: - case PS_SIGSUSPEND: - case PS_SIGWAIT: - case PS_SLEEP_WAIT: - case PS_SPINBLOCK: - case PS_WAIT_WAIT: - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) { - PTHREAD_WAITQ_REMOVE(thread); - if ((psd->psd_flags & PTHREAD_FLAGS_IN_WORKQ) != 0) - PTHREAD_WORKQ_REMOVE(thread); - } - break; - } -} - -static void -thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf) -{ - thread->interrupted = psf->saved_state.psd_interrupted; - thread->sigmask = psf->saved_state.psd_sigmask; - thread->state = psf->saved_state.psd_state; - thread->flags = psf->saved_state.psd_flags; + if (thread->sigmask_seqno == psf->saved_state.psd_sigmask_seqno) + thread->sigmask = psf->saved_state.psd_sigmask; + thread->curframe = psf->saved_state.psd_curframe; thread->wakeup_time = psf->saved_state.psd_wakeup_time; thread->data = psf->saved_state.psd_wait_data; + thread->state = psf->saved_state.psd_state; + thread->flags = psf->saved_state.psd_flags; + thread->interrupted = psf->saved_state.psd_interrupted; + thread->longjmp_val = psf->saved_state.psd_longjmp_val; + thread->signo = psf->saved_state.psd_signo; + thread->sig_defer_count = psf->saved_state.psd_sig_defer_count; } static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf) { - psf->saved_state.psd_interrupted = thread->interrupted; + psf->ctxtype = thread->ctxtype; + memcpy(&psf->ctx.uc, &thread->ctx.uc, sizeof(thread->ctx.uc)); psf->saved_state.psd_sigmask = thread->sigmask; - psf->saved_state.psd_state = thread->state; - psf->saved_state.psd_flags = thread->flags; - thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE | - PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ | - PTHREAD_FLAGS_IN_JOINQ; + psf->saved_state.psd_curframe = thread->curframe; psf->saved_state.psd_wakeup_time = thread->wakeup_time; psf->saved_state.psd_wait_data = thread->data; + psf->saved_state.psd_state = thread->state; + psf->saved_state.psd_flags = thread->flags & + (PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE); + psf->saved_state.psd_interrupted = thread->interrupted; + psf->saved_state.psd_longjmp_val = thread->longjmp_val; + psf->saved_state.psd_sigmask_seqno = thread->sigmask_seqno; + psf->saved_state.psd_signo = thread->signo; + psf->saved_state.psd_sig_defer_count = thread->sig_defer_count; } #endif Index: head/lib/libpthread/thread/thr_sigaction.c =================================================================== --- head/lib/libpthread/thread/thr_sigaction.c (revision 68515) +++ head/lib/libpthread/thread/thr_sigaction.c (revision 68516) @@ -1,111 +1,111 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" int _sigaction(int sig, const struct sigaction * act, struct sigaction * oact) { int ret = 0; struct sigaction gact; /* Check if the signal number is out of range: */ if (sig < 1 || sig > NSIG) { /* Return an invalid argument: */ errno = EINVAL; ret = -1; } else { /* * Check if the existing signal action structure contents are * to be returned: */ if (oact != NULL) { /* Return the existing signal action contents: */ oact->sa_handler = _thread_sigact[sig - 1].sa_handler; oact->sa_mask = _thread_sigact[sig - 1].sa_mask; oact->sa_flags = _thread_sigact[sig - 1].sa_flags; } /* Check if a signal action was supplied: */ if (act != NULL) { /* Set the new signal handler: */ _thread_sigact[sig - 1].sa_mask = act->sa_mask; _thread_sigact[sig - 1].sa_flags = act->sa_flags; _thread_sigact[sig - 1].sa_handler = act->sa_handler; } /* * Check if the kernel needs to be advised of a change * in signal action: */ if (act != NULL && sig != _SCHED_SIGNAL && sig != SIGCHLD && sig != SIGINFO) { /* * Ensure the signal handler cannot be interrupted * by other signals. Always request the POSIX signal * handler arguments. */ sigfillset(&gact.sa_mask); - gact.sa_flags = SA_SIGINFO; + gact.sa_flags = SA_SIGINFO | SA_ONSTACK; /* * Check if the signal handler is being set to * the default or ignore handlers: */ if (act->sa_handler == SIG_DFL || act->sa_handler == SIG_IGN) /* Specify the built in handler: */ gact.sa_handler = act->sa_handler; else /* * Specify the thread kernel signal * handler: */ gact.sa_handler = (void (*) ()) _thread_sig_handler; /* Change the signal action in the kernel: */ if (_thread_sys_sigaction(sig,&gact,NULL) != 0) ret = -1; } } /* Return the completion status: */ return (ret); } __strong_reference(_sigaction, sigaction); #endif Index: head/lib/libpthread/thread/thr_sigmask.c =================================================================== --- head/lib/libpthread/thread/thr_sigmask.c (revision 68515) +++ head/lib/libpthread/thread/thr_sigmask.c (revision 68516) @@ -1,102 +1,105 @@ /* * Copyright (c) 1997 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #ifdef _THREAD_SAFE #include #include "pthread_private.h" int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset) { sigset_t sigset; int ret = 0; /* Check if the existing signal process mask is to be returned: */ if (oset != NULL) { /* Return the current mask: */ *oset = _thread_run->sigmask; } /* Check if a new signal set was provided by the caller: */ if (set != NULL) { /* Process according to what to do: */ switch (how) { /* Block signals: */ case SIG_BLOCK: /* Add signals to the existing mask: */ SIGSETOR(_thread_run->sigmask, *set); break; /* Unblock signals: */ case SIG_UNBLOCK: /* Clear signals from the existing mask: */ SIGSETNAND(_thread_run->sigmask, *set); break; /* Set the signal process mask: */ case SIG_SETMASK: /* Set the new mask: */ _thread_run->sigmask = *set; break; /* Trap invalid actions: */ default: /* Return an invalid argument: */ errno = EINVAL; ret = -1; break; } + /* Increment the sequence number: */ + _thread_run->sigmask_seqno++; + /* * Check if there are pending signals for the running * thread or process that aren't blocked: */ sigset = _thread_run->sigpend; SIGSETOR(sigset, _process_sigpending); SIGSETNAND(sigset, _thread_run->sigmask); if (SIGNOTEMPTY(sigset)) /* * Call the kernel scheduler which will safely * install a signal frame for the running thread: */ _thread_kern_sched_sig(); } /* Return the completion status: */ return (ret); } #endif