diff --git a/lib/libc_r/uthread/pthread_private.h b/lib/libc_r/uthread/pthread_private.h index 04023fb81a35..3fef49c7b25f 100644 --- a/lib/libc_r/uthread/pthread_private.h +++ b/lib/libc_r/uthread/pthread_private.h @@ -1,1362 +1,1339 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Private thread definitions for the uthread kernel. * * $FreeBSD$ */ #ifndef _PTHREAD_PRIVATE_H #define _PTHREAD_PRIVATE_H /* * Evaluate the storage class specifier. */ #ifdef GLOBAL_PTHREAD_PRIVATE #define SCLASS #else #define SCLASS extern #endif /* * Include files. */ #include #include #include #include #include #include #include #include #include #include #include /* * Define machine dependent macros to get and set the stack pointer * from the supported contexts. Also define a macro to set the return * address in a jmp_buf context. * * XXX - These need to be moved into architecture dependent support files. */ #if defined(__i386__) #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2])) #define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp)) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk) #define FP_SAVE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("fnsave %0": :"m"(*fdata)); \ } while (0) #define FP_RESTORE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("frstor %0": :"m"(*fdata)); \ } while (0) #define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra) #elif defined(__alpha__) #include #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[R_SP + 4])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[R_SP + 4])) #define GET_STACK_UC(ucp) ((ucp)->uc_mcontext.mc_regs[R_SP]) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk) #define FP_SAVE_UC(ucp) #define FP_RESTORE_UC(ucp) #define SET_RETURN_ADDR_JB(jb, ra) do { \ (jb)[0]._jb[2] = (long)(ra); \ (jb)[0]._jb[R_RA + 4] = (long)(ra); \ (jb)[0]._jb[R_T12 + 4] = (long)(ra); \ } while (0) #else #error "Don't recognize this architecture!" #endif /* * Kernel fatal error handler macro. */ #define PANIC(string) _thread_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ __sys_write(1, buf, strlen(buf)); \ } while (0) #define stderr_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ __sys_write(2, buf, strlen(buf)); \ } while (0) /* * Priority queue manipulation macros (using pqe link): */ #define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd) #define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd) #define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd) #define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq) /* * Waiting queue manipulation macros (using pqe link): */ #define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd) #define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd) #if defined(_PTHREADS_INVARIANTS) #define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive() #define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive() #else #define PTHREAD_WAITQ_CLEARACTIVE() #define PTHREAD_WAITQ_SETACTIVE() #endif /* * Work queue manipulation macros (using qe link): */ #define PTHREAD_WORKQ_INSERT(thrd) do { \ TAILQ_INSERT_TAIL(&_workq,thrd,qe); \ (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \ } while (0) #define PTHREAD_WORKQ_REMOVE(thrd) do { \ TAILQ_REMOVE(&_workq,thrd,qe); \ (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \ } while (0) /* * State change macro without scheduling queue change: */ #define PTHREAD_SET_STATE(thrd, newstate) do { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } while (0) /* * State change macro with scheduling queue change - This must be * called with preemption deferred (see thread_kern_sched_[un]defer). */ #if defined(_PTHREADS_INVARIANTS) #include #define PTHREAD_ASSERT(cond, msg) do { \ if (!(cond)) \ PANIC(msg); \ } while (0) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \ PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \ "Illegal call from signal handler"); #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if (_thread_kern_new_state != 0) \ PANIC("Recursive PTHREAD_NEW_STATE"); \ _thread_kern_new_state = 1; \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ + PTHREAD_SET_STATE(thrd, newstate); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ + PTHREAD_SET_STATE(thrd, newstate); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ _thread_kern_new_state = 0; \ - PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #else #define PTHREAD_ASSERT(cond, msg) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #endif /* * Define the signals to be used for scheduling. */ #if defined(_PTHREADS_COMPAT_SCHED) #define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL #define _SCHED_SIGNAL SIGVTALRM #else #define _ITIMER_SCHED_TIMER ITIMER_PROF #define _SCHED_SIGNAL SIGPROF #endif /* * Priority queues. * * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. */ typedef struct pq_list { TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ int pl_prio; /* the priority of this list */ int pl_queued; /* is this in the priority queue */ } pq_list_t; typedef struct pq_queue { TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ pq_list_t *pq_lists; /* array of all priority lists */ int pq_size; /* number of priority lists */ } pq_queue_t; /* * TailQ initialization values. */ #define TAILQ_INITIALIZER { NULL, NULL } /* * Mutex definitions. */ union pthread_mutex_data { void *m_ptr; int m_count; }; struct pthread_mutex { enum pthread_mutextype m_type; int m_protocol; TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; union pthread_mutex_data m_data; long m_flags; int m_refcount; /* * Used for priority inheritence and protection. * * m_prio - For priority inheritence, the highest active * priority (threads locking the mutex inherit * this priority). For priority protection, the * ceiling priority of this mutex. * m_saved_prio - mutex owners inherited priority before * taking the mutex, restored when the owner * unlocks the mutex. */ int m_prio; int m_saved_prio; /* * Link for list of all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; /* * Lock for accesses to this structure. */ spinlock_t lock; }; /* * Flags for mutexes. */ #define MUTEX_FLAGS_PRIVATE 0x01 #define MUTEX_FLAGS_INITED 0x02 #define MUTEX_FLAGS_BUSY 0x04 /* * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \ NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \ _SPINLOCK_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; long m_flags; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } /* * Condition variable definitions. */ enum pthread_cond_type { COND_TYPE_FAST, COND_TYPE_MAX }; struct pthread_cond { enum pthread_cond_type c_type; TAILQ_HEAD(cond_head, pthread) c_queue; pthread_mutex_t c_mutex; void *c_data; long c_flags; int c_seqno; /* * Lock for accesses to this structure. */ spinlock_t lock; }; struct pthread_cond_attr { enum pthread_cond_type c_type; long c_flags; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \ 0, 0, _SPINLOCK_INITIALIZER } /* * Semaphore definitions. */ struct sem { #define SEM_MAGIC ((u_int32_t) 0x09fa4012) u_int32_t magic; pthread_mutex_t lock; pthread_cond_t gtzero; u_int32_t count; u_int32_t nwaiters; }; /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *next; void (*routine) (); void *routine_arg; }; struct pthread_attr { int sched_policy; int sched_inherit; int sched_interval; int prio; int suspend; int flags; void *arg_attr; void (*cleanup_attr) (); void *stackaddr_attr; size_t stacksize_attr; size_t guardsize_attr; }; /* * Thread creation state attributes. */ #define PTHREAD_CREATE_RUNNING 0 #define PTHREAD_CREATE_SUSPENDED 1 -/* - * Additional state for a thread suspended with pthread_suspend_np(). - */ -enum pthread_susp { - SUSP_NO, /* Not suspended. */ - SUSP_YES, /* Suspended. */ - SUSP_JOIN, /* Suspended, joining. */ - SUSP_NOWAIT, /* Suspended, was in a mutex or condition queue. */ - SUSP_MUTEX_WAIT,/* Suspended, still in a mutex queue. */ - SUSP_COND_WAIT /* Suspended, still in a condition queue. */ -}; - /* * Miscellaneous definitions. */ #define PTHREAD_STACK_DEFAULT 65536 /* * Size of default red zone at the end of each stack. In actuality, this "red * zone" is merely an unmapped region, except in the case of the initial stack. * Since mmap() makes it possible to specify the maximum growth of a MAP_STACK * region, an unmapped gap between thread stacks achieves the same effect as * explicitly mapped red zones. * This is declared and initialized in uthread_init.c. */ extern int _pthread_guard_default; extern int _pthread_page_size; /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define PTHREAD_STACK_INITIAL 0x100000 /* * Define the different priority ranges. All applications have thread * priorities constrained within 0-31. The threads library raises the * priority when delivering signals in order to ensure that signal * delivery happens (from the POSIX spec) "as soon as possible". * In the future, the threads library will also be able to map specific * threads into real-time (cooperating) processes or kernel threads. * The RT and SIGNAL priorities will be used internally and added to * thread base priorities so that the scheduling queue can handle both * normal and RT priority threads with and without signal handling. * * The approach taken is that, within each class, signal delivery * always has priority over thread execution. */ #define PTHREAD_DEFAULT_PRIORITY 15 #define PTHREAD_MIN_PRIORITY 0 #define PTHREAD_MAX_PRIORITY 31 /* 0x1F */ #define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */ #define PTHREAD_RT_PRIORITY 64 /* 0x40 */ #define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY #define PTHREAD_LAST_PRIORITY \ (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY) #define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY) /* * Clock resolution in microseconds. */ #define CLOCK_RES_USEC 10000 #define CLOCK_RES_USEC_MIN 1000 /* * Time slice period in microseconds. */ #define TIMESLICE_USEC 20000 /* * Define a thread-safe macro to get the current time of day * which is updated at regular intervals by the scheduling signal * handler. */ #define GET_CURRENT_TOD(tv) \ do { \ tv.tv_sec = _sched_tod.tv_sec; \ tv.tv_usec = _sched_tod.tv_usec; \ } while (tv.tv_sec != _sched_tod.tv_sec) struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ int state; /* 0 = idle >0 = # of readers -1 = writer */ pthread_cond_t read_signal; pthread_cond_t write_signal; int blocked_writers; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_SIGTHREAD, PS_MUTEX_WAIT, PS_COND_WAIT, PS_FDLR_WAIT, PS_FDLW_WAIT, PS_FDR_WAIT, PS_FDW_WAIT, PS_FILE_WAIT, PS_POLL_WAIT, PS_SELECT_WAIT, PS_SLEEP_WAIT, PS_WAIT_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, PS_SPINBLOCK, PS_JOIN, PS_SUSPENDED, PS_DEAD, PS_DEADLOCK, PS_STATE_MAX }; /* * File descriptor locking definitions. */ #define FD_READ 0x1 #define FD_WRITE 0x2 #define FD_RDWR (FD_READ | FD_WRITE) /* * File descriptor table structure. */ struct fd_table_entry { /* * Lock for accesses to this file descriptor table * entry. This is passed to _spinlock() to provide atomic * access to this structure. It does *not* represent the * state of the lock on the file descriptor. */ spinlock_t lock; TAILQ_HEAD(, pthread) r_queue; /* Read queue. */ TAILQ_HEAD(, pthread) w_queue; /* Write queue. */ struct pthread *r_owner; /* Ptr to thread owning read lock. */ struct pthread *w_owner; /* Ptr to thread owning write lock. */ char *r_fname; /* Ptr to read lock source file name */ int r_lineno; /* Read lock source line number. */ char *w_fname; /* Ptr to write lock source file name */ int w_lineno; /* Write lock source line number. */ int r_lockcount; /* Count for FILE read locks. */ int w_lockcount; /* Count for FILE write locks. */ int flags; /* Flags used in open. */ }; struct pthread_poll_data { int nfds; struct pollfd *fds; }; union pthread_wait_data { pthread_mutex_t mutex; pthread_cond_t cond; const sigset_t *sigwait; /* Waiting on a signal in sigwait */ struct { short fd; /* Used when thread waiting on fd */ short branch; /* Line number, for debugging. */ char *fname; /* Source file name for debugging.*/ } fd; FILE *fp; struct pthread_poll_data *poll_data; spinlock_t *spinlock; struct pthread *thread; }; /* * Define a continuation routine that can be used to perform a * transfer of control: */ typedef void (*thread_continuation_t) (void *); struct pthread_signal_frame; struct pthread_state_data { struct pthread_signal_frame *psd_curframe; sigset_t psd_sigmask; struct timespec psd_wakeup_time; union pthread_wait_data psd_wait_data; enum pthread_state psd_state; int psd_flags; int psd_interrupted; int psd_longjmp_val; int psd_sigmask_seqno; int psd_signo; int psd_sig_defer_count; /* XXX - What about thread->timeout and/or thread->error? */ }; struct join_status { struct pthread *thread; void *ret; int error; }; /* * The frame that is added to the top of a threads stack when setting up * up the thread to run a signal handler. */ struct pthread_signal_frame { /* * This stores the threads state before the signal. */ struct pthread_state_data saved_state; /* * Threads return context; we use only jmp_buf's for now. */ union { jmp_buf jb; ucontext_t uc; } ctx; int signo; /* signal, arg 1 to sighandler */ int sig_has_args; /* use signal args if true */ ucontext_t uc; siginfo_t siginfo; }; struct pthread_specific_elem { const void *data; int seqno; }; /* * Thread structure. */ struct pthread { /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; char *name; u_int64_t uniqueid; /* for gdb */ /* * Lock for accesses to this thread structure. */ spinlock_t lock; /* Queue entry for list of all threads: */ TAILQ_ENTRY(pthread) tle; /* Queue entry for list of dead threads: */ TAILQ_ENTRY(pthread) dle; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; void *stack; struct pthread_attr attr; /* * Threads return context; we use only jmp_buf's for now. */ union { jmp_buf jb; ucontext_t uc; } ctx; /* * Used for tracking delivery of signal handlers. */ struct pthread_signal_frame *curframe; /* * Cancelability flags - the lower 2 bits are used by cancel * definitions in pthread.h */ #define PTHREAD_AT_CANCEL_POINT 0x0004 #define PTHREAD_CANCELLING 0x0008 #define PTHREAD_CANCEL_NEEDED 0x0010 int cancelflags; - enum pthread_susp suspended; - thread_continuation_t continuation; /* * Current signal mask and pending signals. */ sigset_t sigmask; sigset_t sigpend; int sigmask_seqno; int check_pending; /* Thread state: */ enum pthread_state state; /* Scheduling clock when this thread was last made active. */ long last_active; /* Scheduling clock when this thread was last made inactive. */ long last_inactive; /* * Number of microseconds accumulated by this thread when * time slicing is active. */ long slice_usec; /* * Time to wake up thread. This is used for sleeping threads and * for any operation which may time out (such as select). */ struct timespec wakeup_time; /* TRUE if operation has timed out. */ int timeout; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* * The joiner is the thread that is joining to this thread. The * join status keeps track of a join operation to another thread. */ struct pthread *joiner; struct join_status join_status; /* * The current thread can belong to only one scheduling queue at * a time (ready or waiting queue). It can also belong to: * * o A queue of threads waiting for a mutex * o A queue of threads waiting for a condition variable * o A queue of threads waiting for a file descriptor lock * o A queue of threads needing work done by the kernel thread * (waiting for a spinlock or file I/O) * * A thread can also be joining a thread (the joiner field above). * * It must not be possible for a thread to belong to any of the * above queues while it is handling a signal. Signal handlers * may longjmp back to previous stack frames circumventing normal * control flow. This could corrupt queue integrity if the thread * retains membership in the queue. Therefore, if a thread is a * member of one of these queues when a signal handler is invoked, * it must remove itself from the queue before calling the signal * handler and reinsert itself after normal return of the handler. * * Use pqe for the scheduling queue link (both ready and waiting), * sqe for synchronization (mutex and condition variable) queue * links, and qe for all other links. */ TAILQ_ENTRY(pthread) pqe; /* priority queue link */ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ TAILQ_ENTRY(pthread) qe; /* all other queues link */ /* Wait data. */ union pthread_wait_data data; /* * Allocated for converting select into poll. */ struct pthread_poll_data poll_data; /* * Set to TRUE if a blocking operation was * interrupted by a signal: */ int interrupted; /* Signal number when in state PS_SIGWAIT: */ int signo; /* * Set to non-zero when this thread has deferred signals. * We allow for recursive deferral. */ int sig_defer_count; /* * Set to TRUE if this thread should yield after undeferring * signals. */ int yield_on_sig_undefer; /* Miscellaneous flags; only set with signals deferred. */ int flags; #define PTHREAD_FLAGS_PRIVATE 0x0001 #define PTHREAD_EXITING 0x0002 #define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */ #define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */ #define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */ #define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */ #define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */ #define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/ #define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */ -#define PTHREAD_FLAGS_TRACE 0x0200 /* for debugging purposes */ +#define PTHREAD_FLAGS_SUSPENDED 0x0200 /* thread is suspended */ +#define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */ #define PTHREAD_FLAGS_IN_SYNCQ \ (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ) /* * Base priority is the user setable and retrievable priority * of the thread. It is only affected by explicit calls to * set thread priority and upon thread creation via a thread * attribute or default priority. */ char base_priority; /* * Inherited priority is the priority a thread inherits by * taking a priority inheritence or protection mutex. It * is not affected by base priority changes. Inherited * priority defaults to and remains 0 until a mutex is taken * that is being waited on by any other thread whose priority * is non-zero. */ char inherited_priority; /* * Active priority is always the maximum of the threads base * priority and inherited priority. When there is a change * in either the base or inherited priority, the active * priority must be recalculated. */ char active_priority; /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; /* * Queue of currently owned mutexes. */ TAILQ_HEAD(, pthread_mutex) mutexq; void *ret; struct pthread_specific_elem *specific; int specific_data_count; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; char *fname; /* Ptr to source file name */ int lineno; /* Source line number. */ }; /* * Global variables for the uthread kernel. */ SCLASS void *_usrstack #ifdef GLOBAL_PTHREAD_PRIVATE = (void *) USRSTACK; #else ; #endif /* Kernel thread structure used when there are no running threads: */ SCLASS struct pthread _thread_kern_thread; /* Ptr to the thread structure for the running thread: */ SCLASS struct pthread * volatile _thread_run #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* Ptr to the thread structure for the last user thread to run: */ SCLASS struct pthread * volatile _last_user_thread #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif -/* - * Ptr to the thread running in single-threaded mode or NULL if - * running multi-threaded (default POSIX behaviour). - */ -SCLASS struct pthread * volatile _thread_single -#ifdef GLOBAL_PTHREAD_PRIVATE -= NULL; -#else -; -#endif - /* List of all threads: */ SCLASS TAILQ_HEAD(, pthread) _thread_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_thread_list); #else ; #endif /* * Array of kernel pipe file descriptors that are used to ensure that * no signals are missed in calls to _select. */ SCLASS int _thread_kern_pipe[2] #ifdef GLOBAL_PTHREAD_PRIVATE = { -1, -1 }; #else ; #endif SCLASS int volatile _queue_signals #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _thread_kern_in_sched #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _sig_in_handler #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Time of day at last scheduling timer signal: */ SCLASS struct timeval volatile _sched_tod #ifdef GLOBAL_PTHREAD_PRIVATE = { 0, 0 }; #else ; #endif /* * Current scheduling timer ticks; used as resource usage. */ SCLASS unsigned int volatile _sched_ticks #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Dead threads: */ SCLASS TAILQ_HEAD(, pthread) _dead_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_dead_list); #else ; #endif /* Initial thread: */ SCLASS struct pthread *_thread_initial #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Default thread attributes: */ SCLASS struct pthread_attr pthread_attr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, PTHREAD_STACK_DEFAULT, -1 }; #else ; #endif /* Default mutex attributes: */ SCLASS struct pthread_mutex_attr pthread_mutexattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }; #else ; #endif /* Default condition variable attributes: */ SCLASS struct pthread_cond_attr pthread_condattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { COND_TYPE_FAST, 0 }; #else ; #endif /* * Standard I/O file descriptors need special flag treatment since * setting one to non-blocking does all on *BSD. Sigh. This array * is used to store the initial flag settings. */ SCLASS int _pthread_stdio_flags[3]; /* File table information: */ SCLASS struct fd_table_entry **_thread_fd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Table for polling file descriptors: */ SCLASS struct pollfd *_thread_pfd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif SCLASS const int dtablecount #ifdef GLOBAL_PTHREAD_PRIVATE = 4096/sizeof(struct fd_table_entry); #else ; #endif SCLASS int _thread_dtablesize /* Descriptor table size. */ #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _clock_res_usec /* Clock resolution in usec. */ #ifdef GLOBAL_PTHREAD_PRIVATE = CLOCK_RES_USEC; #else ; #endif /* Garbage collector mutex and condition variable. */ SCLASS pthread_mutex_t _gc_mutex #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; SCLASS pthread_cond_t _gc_cond #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Array of signal actions for this process. */ SCLASS struct sigaction _thread_sigact[NSIG]; /* * Array of counts of dummy handlers for SIG_DFL signals. This is used to * assure that there is always a dummy signal handler installed while there is a * thread sigwait()ing on the corresponding signal. */ SCLASS int _thread_dfl_count[NSIG]; /* * Pending signals and mask for this process: */ SCLASS sigset_t _process_sigpending; SCLASS sigset_t _process_sigmask #ifdef GLOBAL_PTHREAD_PRIVATE = { {0, 0, 0, 0} } #endif ; /* * Scheduling queues: */ SCLASS pq_queue_t _readyq; SCLASS TAILQ_HEAD(, pthread) _waitingq; /* * Work queue: */ SCLASS TAILQ_HEAD(, pthread) _workq; /* Tracks the number of threads blocked while waiting for a spinlock. */ SCLASS volatile int _spinblock_count #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Used to maintain pending and active signals: */ struct sigstatus { int pending; /* Is this a pending signal? */ int blocked; /* * A handler is currently active for * this signal; ignore subsequent * signals until the handler is done. */ int signo; /* arg 1 to signal handler */ siginfo_t siginfo; /* arg 2 to signal handler */ ucontext_t uc; /* arg 3 to signal handler */ }; SCLASS struct sigstatus _thread_sigq[NSIG]; /* Indicates that the signal queue needs to be checked. */ SCLASS volatile int _sigq_check_reqd #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Thread switch hook. */ SCLASS pthread_switch_routine_t _sched_switch_hook #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Declare the kernel scheduler jump buffer and stack: */ SCLASS jmp_buf _thread_kern_sched_jb; SCLASS void * _thread_kern_sched_stack #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* Used for _PTHREADS_INVARIANTS checking. */ SCLASS int _thread_kern_new_state #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Undefine the storage class specifier: */ #undef SCLASS #ifdef _LOCK_DEBUG #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \ _ts, __FILE__, __LINE__) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \ __FILE__, __LINE__) #else #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type) #endif /* * Function prototype definitions. */ __BEGIN_DECLS char *__ttyname_basic(int); char *__ttyname_r_basic(int, char *, size_t); char *ttyname_r(int, char *, size_t); void _cond_wait_backout(pthread_t); void _fd_lock_backout(pthread_t); int _find_thread(pthread_t); struct pthread *_get_curthread(void); void _set_curthread(struct pthread *); void *_thread_stack_alloc(size_t, size_t); void _thread_stack_free(void *, size_t, size_t); int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t); int _thread_fd_lock(int, int, struct timespec *); int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); void _mutex_lock_backout(pthread_t); void _mutex_notify_priochange(pthread_t); int _mutex_reinit(pthread_mutex_t *); void _mutex_unlock_private(pthread_t); int _cond_reinit(pthread_cond_t *); int _pq_alloc(struct pq_queue *, int, int); int _pq_init(struct pq_queue *); void _pq_remove(struct pq_queue *pq, struct pthread *); void _pq_insert_head(struct pq_queue *pq, struct pthread *); void _pq_insert_tail(struct pq_queue *pq, struct pthread *); struct pthread *_pq_first(struct pq_queue *pq); void *_pthread_getspecific(pthread_key_t); int _pthread_key_create(pthread_key_t *, void (*) (void *)); int _pthread_key_delete(pthread_key_t); int _pthread_mutex_destroy(pthread_mutex_t *); int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); int _pthread_mutex_lock(pthread_mutex_t *); int _pthread_mutex_trylock(pthread_mutex_t *); int _pthread_mutex_unlock(pthread_mutex_t *); int _pthread_mutexattr_init(pthread_mutexattr_t *); int _pthread_mutexattr_destroy(pthread_mutexattr_t *); int _pthread_mutexattr_settype(pthread_mutexattr_t *, int); int _pthread_once(pthread_once_t *, void (*) (void)); pthread_t _pthread_self(void); int _pthread_setspecific(pthread_key_t, const void *); void _waitq_insert(pthread_t pthread); void _waitq_remove(pthread_t pthread); #if defined(_PTHREADS_INVARIANTS) void _waitq_setactive(void); void _waitq_clearactive(void); #endif void _thread_exit(char *, int, char *); void _thread_exit_cleanup(void); void _thread_fd_unlock(int, int); void _thread_fd_unlock_debug(int, int, char *, int); void _thread_fd_unlock_owned(pthread_t); void *_thread_cleanup(pthread_t); void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_init(void); void _thread_kern_sched(ucontext_t *); void _thread_kern_scheduler(void); void _thread_kern_sched_frame(struct pthread_signal_frame *psf); void _thread_kern_sched_sig(void); void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno); void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno); void _thread_kern_set_timeout(const struct timespec *); void _thread_kern_sig_defer(void); void _thread_kern_sig_undefer(void); void _thread_sig_handler(int, siginfo_t *, ucontext_t *); void _thread_sig_check_pending(struct pthread *pthread); void _thread_sig_handle_pending(void); void _thread_sig_send(struct pthread *pthread, int sig); void _thread_sig_wrapper(void); void _thread_sigframe_restore(struct pthread *thread, struct pthread_signal_frame *psf); void _thread_start(void); void _thread_seterrno(pthread_t, int); int _thread_fd_table_init(int fd); pthread_addr_t _thread_gc(pthread_addr_t); void _thread_enter_cancellation_point(void); void _thread_leave_cancellation_point(void); void _thread_cancellation_point(void); /* #include */ #ifdef _SYS_ACL_H int __sys___acl_aclcheck_fd(int, acl_type_t, struct acl *); int __sys___acl_delete_fd(int, acl_type_t); int __sys___acl_get_fd(int, acl_type_t, struct acl *); int __sys___acl_set_fd(int, acl_type_t, struct acl *); #endif /* #include */ #ifdef _SYS_AIO_H_ int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); #endif /* #include */ #ifdef _SYS_CAPABILITY_H int __sys___cap_get_fd(int, struct cap *); int __sys___cap_set_fd(int, struct cap *); #endif /* #include */ #ifdef _SYS_EVENT_H_ int __sys_kevent(int, const struct kevent *, int, struct kevent *, int, const struct timespec *); #endif /* #include */ #ifdef _SYS_IOCTL_H_ int __sys_ioctl(int, unsigned long, ...); #endif /* #include */ #ifdef _SYS_MMAN_H_ int __sys_msync(void *, size_t, int); #endif /* #include */ #ifdef _SYS_MOUNT_H_ int __sys_fstatfs(int, struct statfs *); #endif /* #include */ #ifdef _SYS_SOCKET_H_ int __sys_accept(int, struct sockaddr *, socklen_t *); int __sys_bind(int, const struct sockaddr *, socklen_t); int __sys_connect(int, const struct sockaddr *, socklen_t); int __sys_getpeername(int, struct sockaddr *, socklen_t *); int __sys_getsockname(int, struct sockaddr *, socklen_t *); int __sys_getsockopt(int, int, int, void *, socklen_t *); int __sys_listen(int, int); ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, socklen_t *); ssize_t __sys_recvmsg(int, struct msghdr *, int); int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int); ssize_t __sys_sendmsg(int, const struct msghdr *, int); ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, socklen_t); int __sys_setsockopt(int, int, int, const void *, socklen_t); int __sys_shutdown(int, int); int __sys_socket(int, int, int); int __sys_socketpair(int, int, int, int *); #endif /* #include */ #ifdef _SYS_STAT_H_ int __sys_fchflags(int, u_long); int __sys_fchmod(int, mode_t); int __sys_fstat(int, struct stat *); #endif /* #include */ #ifdef _SYS_UIO_H_ ssize_t __sys_readv(int, const struct iovec *, int); ssize_t __sys_writev(int, const struct iovec *, int); #endif /* #include */ #ifdef WNOHANG pid_t __sys_wait4(pid_t, int *, int, struct rusage *); #endif /* #include */ #ifdef _DIRENT_H_ int __sys_getdirentries(int, char *, int, long *); #endif /* #include */ #ifdef _SYS_FCNTL_H_ int __sys_fcntl(int, int, ...); int __sys_flock(int, int); int __sys_open(const char *, int, ...); #endif /* #include */ #ifdef _SYS_POLL_H_ int __sys_poll(struct pollfd *, unsigned, int); #endif /* #include */ #ifdef _SIGNAL_H_ int __sys_sigaction(int, const struct sigaction *, struct sigaction *); int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); int __sys_sigprocmask(int, const sigset_t *, sigset_t *); int __sys_sigreturn(ucontext_t *); #endif /* #include */ #ifdef _UNISTD_H_ int __sys_close(int); int __sys_dup(int); int __sys_dup2(int, int); int __sys_execve(const char *, char * const *, char * const *); void __sys_exit(int); int __sys_fchown(int, uid_t, gid_t); pid_t __sys_fork(void); long __sys_fpathconf(int, int); int __sys_fsync(int); int __sys_pipe(int *); ssize_t __sys_read(int, void *, size_t); ssize_t __sys_write(int, const void *, size_t); #endif /* #include */ #ifdef _SETJMP_H_ extern void __siglongjmp(sigjmp_buf, int) __dead2; extern void __longjmp(jmp_buf, int) __dead2; extern void ___longjmp(jmp_buf, int) __dead2; #endif __END_DECLS #endif /* !_PTHREAD_PRIVATE_H */ diff --git a/lib/libc_r/uthread/uthread_cancel.c b/lib/libc_r/uthread/uthread_cancel.c index b6b070f0549d..d9324abf01aa 100644 --- a/lib/libc_r/uthread/uthread_cancel.c +++ b/lib/libc_r/uthread/uthread_cancel.c @@ -1,245 +1,231 @@ /* * David Leonard , 1999. Public domain. * $FreeBSD$ */ #include #include #include "pthread_private.h" static void finish_cancellation(void *arg); __weak_reference(_pthread_cancel, pthread_cancel); __weak_reference(_pthread_setcancelstate, pthread_setcancelstate); __weak_reference(_pthread_setcanceltype, pthread_setcanceltype); __weak_reference(_pthread_testcancel, pthread_testcancel); int _pthread_cancel(pthread_t pthread) { int ret; if ((ret = _find_thread(pthread)) != 0) { /* NOTHING */ } else if (pthread->state == PS_DEAD || pthread->state == PS_DEADLOCK || (pthread->flags & PTHREAD_EXITING) != 0) { ret = 0; } else { /* Protect the scheduling queues: */ _thread_kern_sig_defer(); if (((pthread->cancelflags & PTHREAD_CANCEL_DISABLE) != 0) || (((pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0) && ((pthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0))) /* Just mark it for cancellation: */ pthread->cancelflags |= PTHREAD_CANCELLING; else { /* * Check if we need to kick it back into the * run queue: */ switch (pthread->state) { case PS_RUNNING: /* No need to resume: */ pthread->cancelflags |= PTHREAD_CANCELLING; break; case PS_SPINBLOCK: case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* Remove these threads from the work queue: */ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) PTHREAD_WORKQ_REMOVE(pthread); /* Fall through: */ case PS_SIGTHREAD: case PS_SLEEP_WAIT: case PS_WAIT_WAIT: case PS_SIGSUSPEND: case PS_SIGWAIT: /* Interrupt and resume: */ pthread->interrupted = 1; pthread->cancelflags |= PTHREAD_CANCELLING; PTHREAD_NEW_STATE(pthread,PS_RUNNING); break; case PS_JOIN: /* * Disconnect the thread from the joinee: */ if (pthread->join_status.thread != NULL) { pthread->join_status.thread->joiner = NULL; pthread->join_status.thread = NULL; } pthread->cancelflags |= PTHREAD_CANCELLING; PTHREAD_NEW_STATE(pthread, PS_RUNNING); break; case PS_SUSPENDED: - if (pthread->suspended == SUSP_NO || - pthread->suspended == SUSP_YES || - pthread->suspended == SUSP_JOIN || - pthread->suspended == SUSP_NOWAIT) { - /* - * This thread isn't in any scheduling - * queues; just change it's state: - */ - pthread->cancelflags |= - PTHREAD_CANCELLING; - PTHREAD_SET_STATE(pthread, PS_RUNNING); - break; - } - /* FALLTHROUGH */ case PS_MUTEX_WAIT: case PS_COND_WAIT: case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: /* * Threads in these states may be in queues. * In order to preserve queue integrity, the * cancelled thread must remove itself from the * queue. Mark the thread as interrupted and * needing cancellation, and set the state to * running. When the thread resumes, it will * remove itself from the queue and call the * cancellation completion routine. */ pthread->interrupted = 1; pthread->cancelflags |= PTHREAD_CANCEL_NEEDED; - PTHREAD_NEW_STATE(pthread,PS_RUNNING); + PTHREAD_NEW_STATE(pthread, PS_RUNNING); pthread->continuation = finish_cancellation; break; case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: /* Ignore - only here to silence -Wall: */ break; } } /* Unprotect the scheduling queues: */ _thread_kern_sig_undefer(); ret = 0; } return (ret); } int _pthread_setcancelstate(int state, int *oldstate) { struct pthread *curthread = _get_curthread(); int ostate; int ret; ostate = curthread->cancelflags & PTHREAD_CANCEL_DISABLE; switch (state) { case PTHREAD_CANCEL_ENABLE: if (oldstate != NULL) *oldstate = ostate; curthread->cancelflags &= ~PTHREAD_CANCEL_DISABLE; if ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0) pthread_testcancel(); ret = 0; break; case PTHREAD_CANCEL_DISABLE: if (oldstate != NULL) *oldstate = ostate; curthread->cancelflags |= PTHREAD_CANCEL_DISABLE; ret = 0; break; default: ret = EINVAL; } return (ret); } int _pthread_setcanceltype(int type, int *oldtype) { struct pthread *curthread = _get_curthread(); int otype; int ret; otype = curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS; switch (type) { case PTHREAD_CANCEL_ASYNCHRONOUS: if (oldtype != NULL) *oldtype = otype; curthread->cancelflags |= PTHREAD_CANCEL_ASYNCHRONOUS; pthread_testcancel(); ret = 0; break; case PTHREAD_CANCEL_DEFERRED: if (oldtype != NULL) *oldtype = otype; curthread->cancelflags &= ~PTHREAD_CANCEL_ASYNCHRONOUS; ret = 0; break; default: ret = EINVAL; } return (ret); } void _pthread_testcancel(void) { struct pthread *curthread = _get_curthread(); if (((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) && ((curthread->cancelflags & PTHREAD_CANCELLING) != 0) && ((curthread->flags & PTHREAD_EXITING) == 0)) { /* * It is possible for this thread to be swapped out * while performing cancellation; do not allow it * to be cancelled again. */ curthread->cancelflags &= ~PTHREAD_CANCELLING; _thread_exit_cleanup(); pthread_exit(PTHREAD_CANCELED); PANIC("cancel"); } } void _thread_enter_cancellation_point(void) { struct pthread *curthread = _get_curthread(); /* Look for a cancellation before we block: */ pthread_testcancel(); curthread->cancelflags |= PTHREAD_AT_CANCEL_POINT; } void _thread_leave_cancellation_point(void) { struct pthread *curthread = _get_curthread(); curthread->cancelflags &= ~PTHREAD_AT_CANCEL_POINT; /* Look for a cancellation after we unblock: */ pthread_testcancel(); } static void finish_cancellation(void *arg) { struct pthread *curthread = _get_curthread(); curthread->continuation = NULL; curthread->interrupted = 0; if ((curthread->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) { curthread->cancelflags &= ~PTHREAD_CANCEL_NEEDED; _thread_exit_cleanup(); pthread_exit(PTHREAD_CANCELED); } } diff --git a/lib/libc_r/uthread/uthread_cond.c b/lib/libc_r/uthread/uthread_cond.c index 7f3fe7acb2dd..cb45725531d0 100644 --- a/lib/libc_r/uthread/uthread_cond.c +++ b/lib/libc_r/uthread/uthread_cond.c @@ -1,747 +1,735 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "pthread_private.h" /* * Prototypes */ static inline pthread_t cond_queue_deq(pthread_cond_t); static inline void cond_queue_remove(pthread_cond_t, pthread_t); static inline void cond_queue_enq(pthread_cond_t, pthread_t); __weak_reference(_pthread_cond_init, pthread_cond_init); __weak_reference(_pthread_cond_destroy, pthread_cond_destroy); __weak_reference(_pthread_cond_wait, pthread_cond_wait); __weak_reference(_pthread_cond_timedwait, pthread_cond_timedwait); __weak_reference(_pthread_cond_signal, pthread_cond_signal); __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); /* Reinitialize a condition variable to defaults. */ int _cond_reinit(pthread_cond_t *cond) { int ret = 0; if (cond == NULL) ret = EINVAL; else if (*cond == NULL) ret = pthread_cond_init(cond, NULL); else { /* * Initialize the condition variable structure: */ TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags = COND_FLAGS_INITED; (*cond)->c_type = COND_TYPE_FAST; (*cond)->c_mutex = NULL; (*cond)->c_seqno = 0; memset(&(*cond)->lock, 0, sizeof((*cond)->lock)); } return (ret); } int _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { enum pthread_cond_type type; pthread_cond_t pcond; int rval = 0; if (cond == NULL) rval = EINVAL; else { /* * Check if a pointer to a condition variable attribute * structure was passed by the caller: */ if (cond_attr != NULL && *cond_attr != NULL) { /* Default to a fast condition variable: */ type = (*cond_attr)->c_type; } else { /* Default to a fast condition variable: */ type = COND_TYPE_FAST; } /* Process according to condition variable type: */ switch (type) { /* Fast condition variable: */ case COND_TYPE_FAST: /* Nothing to do here. */ break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Check for no errors: */ if (rval == 0) { if ((pcond = (pthread_cond_t) malloc(sizeof(struct pthread_cond))) == NULL) { rval = ENOMEM; } else { /* * Initialise the condition variable * structure: */ TAILQ_INIT(&pcond->c_queue); pcond->c_flags |= COND_FLAGS_INITED; pcond->c_type = type; pcond->c_mutex = NULL; pcond->c_seqno = 0; memset(&pcond->lock,0,sizeof(pcond->lock)); *cond = pcond; } } } /* Return the completion status: */ return (rval); } int _pthread_cond_destroy(pthread_cond_t *cond) { int rval = 0; if (cond == NULL || *cond == NULL) rval = EINVAL; else { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * Free the memory allocated for the condition * variable structure: */ free(*cond); /* * NULL the caller's pointer now that the condition * variable has been destroyed: */ *cond = NULL; } /* Return the completion status: */ return (rval); } int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { struct pthread *curthread = _get_curthread(); int rval = 0; int done = 0; int interrupted = 0; int seqno; _thread_enter_cancellation_point(); if (cond == NULL) return (EINVAL); /* * If the condition variable is statically initialized, * perform the dynamic initialization: */ if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) return (rval); /* * Enter a loop waiting for a condition signal or broadcast * to wake up this thread. A loop is needed in case the waiting * thread is interrupted by a signal to execute a signal handler. * It is not (currently) possible to remain in the waiting queue * while running a handler. Instead, the thread is interrupted * and backed out of the waiting queue prior to executing the * signal handler. */ do { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * If the condvar was statically allocated, properly * initialize the tail queue. */ if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) { TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags |= COND_FLAGS_INITED; } /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return invalid argument error: */ rval = EINVAL; } else { /* Reset the timeout and interrupted flags: */ curthread->timeout = 0; curthread->interrupted = 0; /* * Queue the running thread for the condition * variable: */ cond_queue_enq(*cond, curthread); /* Remember the mutex and sequence number: */ (*cond)->c_mutex = *mutex; seqno = (*cond)->c_seqno; /* Wait forever: */ curthread->wakeup_time.tv_sec = -1; /* Unlock the mutex: */ if ((rval = _mutex_cv_unlock(mutex)) != 0) { /* * Cannot unlock the mutex, so remove * the running thread from the condition * variable queue: */ cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* * Schedule the next thread and unlock * the condition variable structure: */ _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); done = (seqno != (*cond)->c_seqno); interrupted = curthread->interrupted; /* * Check if the wait was interrupted * (canceled) or needs to be resumed * after handling a signal. */ if (interrupted != 0) { /* * Lock the mutex and ignore any * errors. Note that even * though this thread may have * been canceled, POSIX requires * that the mutex be reaquired * prior to cancellation. */ (void)_mutex_cv_lock(mutex); } else { /* * Lock the condition variable * while removing the thread. */ _SPINLOCK(&(*cond)->lock); cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; _SPINUNLOCK(&(*cond)->lock); /* Lock the mutex: */ rval = _mutex_cv_lock(mutex); } } } break; /* Trap invalid condition variable types: */ default: /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return an invalid argument error: */ rval = EINVAL; break; } if ((interrupted != 0) && (curthread->continuation != NULL)) curthread->continuation((void *) curthread); } while ((done == 0) && (rval == 0)); _thread_leave_cancellation_point(); /* Return the completion status: */ return (rval); } int _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, const struct timespec * abstime) { struct pthread *curthread = _get_curthread(); int rval = 0; int done = 0; int interrupted = 0; int seqno; _thread_enter_cancellation_point(); if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return (EINVAL); /* * If the condition variable is statically initialized, perform dynamic * initialization. */ if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) return (rval); /* * Enter a loop waiting for a condition signal or broadcast * to wake up this thread. A loop is needed in case the waiting * thread is interrupted by a signal to execute a signal handler. * It is not (currently) possible to remain in the waiting queue * while running a handler. Instead, the thread is interrupted * and backed out of the waiting queue prior to executing the * signal handler. */ do { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * If the condvar was statically allocated, properly * initialize the tail queue. */ if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) { TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags |= COND_FLAGS_INITED; } /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { /* Return invalid argument error: */ rval = EINVAL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* Set the wakeup time: */ curthread->wakeup_time.tv_sec = abstime->tv_sec; curthread->wakeup_time.tv_nsec = abstime->tv_nsec; /* Reset the timeout and interrupted flags: */ curthread->timeout = 0; curthread->interrupted = 0; /* * Queue the running thread for the condition * variable: */ cond_queue_enq(*cond, curthread); /* Remember the mutex and sequence number: */ (*cond)->c_mutex = *mutex; seqno = (*cond)->c_seqno; /* Unlock the mutex: */ if ((rval = _mutex_cv_unlock(mutex)) != 0) { /* * Cannot unlock the mutex, so remove * the running thread from the condition * variable queue: */ cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* * Schedule the next thread and unlock * the condition variable structure: */ _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); done = (seqno != (*cond)->c_seqno); interrupted = curthread->interrupted; /* * Check if the wait was interrupted * (canceled) or needs to be resumed * after handling a signal. */ if (interrupted != 0) { /* * Lock the mutex and ignore any * errors. Note that even * though this thread may have * been canceled, POSIX requires * that the mutex be reaquired * prior to cancellation. */ (void)_mutex_cv_lock(mutex); } else { /* * Lock the condition variable * while removing the thread. */ _SPINLOCK(&(*cond)->lock); cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; _SPINUNLOCK(&(*cond)->lock); /* Lock the mutex: */ rval = _mutex_cv_lock(mutex); /* * Return ETIMEDOUT if the wait * timed out and there wasn't an * error locking the mutex: */ if ((curthread->timeout != 0) && rval == 0) rval = ETIMEDOUT; } } } break; /* Trap invalid condition variable types: */ default: /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return an invalid argument error: */ rval = EINVAL; break; } if ((interrupted != 0) && (curthread->continuation != NULL)) curthread->continuation((void *) curthread); } while ((done == 0) && (rval == 0)); _thread_leave_cancellation_point(); /* Return the completion status: */ return (rval); } int _pthread_cond_signal(pthread_cond_t * cond) { int rval = 0; pthread_t pthread; if (cond == NULL) rval = EINVAL; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: /* Increment the sequence number: */ (*cond)->c_seqno++; if ((pthread = cond_queue_deq(*cond)) != NULL) { /* - * Unless the thread is currently suspended, - * allow it to run. If the thread is suspended, - * make a note that the thread isn't in a wait - * queue any more. + * Wake up the signaled thread: */ - if (pthread->state != PS_SUSPENDED) - PTHREAD_NEW_STATE(pthread,PS_RUNNING); - else - pthread->suspended = SUSP_NOWAIT; + PTHREAD_NEW_STATE(pthread, PS_RUNNING); } /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (rval); } int _pthread_cond_broadcast(pthread_cond_t * cond) { int rval = 0; pthread_t pthread; if (cond == NULL) rval = EINVAL; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: /* Increment the sequence number: */ (*cond)->c_seqno++; /* * Enter a loop to bring all threads off the * condition queue: */ while ((pthread = cond_queue_deq(*cond)) != NULL) { /* - * Unless the thread is currently suspended, - * allow it to run. If the thread is suspended, - * make a note that the thread isn't in a wait - * queue any more. + * Wake up the signaled thread: */ - if (pthread->state != PS_SUSPENDED) - PTHREAD_NEW_STATE(pthread,PS_RUNNING); - else - pthread->suspended = SUSP_NOWAIT; + PTHREAD_NEW_STATE(pthread, PS_RUNNING); } /* There are no more waiting threads: */ (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (rval); } void _cond_wait_backout(pthread_t pthread) { pthread_cond_t cond; cond = pthread->data.cond; if (cond != NULL) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&cond->lock); /* Process according to condition variable type: */ switch (cond->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: cond_queue_remove(cond, pthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&cond->c_queue) == NULL) cond->c_mutex = NULL; break; default: break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&cond->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } } /* * Dequeue a waiting thread from the head of a condition queue in * descending priority order. */ static inline pthread_t cond_queue_deq(pthread_cond_t cond) { pthread_t pthread; while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) { TAILQ_REMOVE(&cond->c_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ; if ((pthread->timeout == 0) && (pthread->interrupted == 0)) /* * Only exit the loop when we find a thread * that hasn't timed out or been canceled; * those threads are already running and don't * need their run state changed. */ break; } return(pthread); } /* * Remove a waiting thread from a condition queue in descending priority * order. */ static inline void cond_queue_remove(pthread_cond_t cond, pthread_t pthread) { /* * Because pthread_cond_timedwait() can timeout as well * as be signaled by another thread, it is necessary to * guard against removing the thread from the queue if * it isn't in the queue. */ if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) { TAILQ_REMOVE(&cond->c_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ; } } /* * Enqueue a waiting thread to a condition queue in descending priority * order. */ static inline void cond_queue_enq(pthread_cond_t cond, pthread_t pthread) { pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head); PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); /* * For the common case of all threads having equal priority, * we perform a quick check against the priority of the thread * at the tail of the queue. */ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe); else { tid = TAILQ_FIRST(&cond->c_queue); while (pthread->active_priority <= tid->active_priority) tid = TAILQ_NEXT(tid, sqe); TAILQ_INSERT_BEFORE(tid, pthread, sqe); } pthread->flags |= PTHREAD_FLAGS_IN_CONDQ; pthread->data.cond = cond; } diff --git a/lib/libc_r/uthread/uthread_exit.c b/lib/libc_r/uthread/uthread_exit.c index c9513cfac15a..fd90e2959077 100644 --- a/lib/libc_r/uthread/uthread_exit.c +++ b/lib/libc_r/uthread/uthread_exit.c @@ -1,241 +1,227 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include "pthread_private.h" #define FLAGS_IN_SCHEDQ \ (PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ) __weak_reference(_pthread_exit, pthread_exit); void _exit(int status) { int flags; int i; struct itimerval itimer; /* Disable the interval timer: */ itimer.it_interval.tv_sec = 0; itimer.it_interval.tv_usec = 0; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = 0; setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL); /* Close the pthread kernel pipe: */ __sys_close(_thread_kern_pipe[0]); __sys_close(_thread_kern_pipe[1]); /* * Enter a loop to set all file descriptors to blocking * if they were not created as non-blocking: */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file descriptor is in use: */ if (_thread_fd_table[i] != NULL && !(_thread_fd_table[i]->flags & O_NONBLOCK)) { /* Get the current flags: */ flags = __sys_fcntl(i, F_GETFL, NULL); /* Clear the nonblocking file descriptor flag: */ __sys_fcntl(i, F_SETFL, flags & ~O_NONBLOCK); } } /* Call the _exit syscall: */ __sys_exit(status); } void _thread_exit(char *fname, int lineno, char *string) { char s[256]; /* Prepare an error message string: */ snprintf(s, sizeof(s), "Fatal error '%s' at line %d in file %s (errno = %d)\n", string, lineno, fname, errno); /* Write the string to the standard error file descriptor: */ __sys_write(2, s, strlen(s)); /* Force this process to exit: */ /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */ #if defined(_PTHREADS_INVARIANTS) abort(); #else __sys_exit(1); #endif } /* * Only called when a thread is cancelled. It may be more useful * to call it from pthread_exit() if other ways of asynchronous or * abnormal thread termination can be found. */ void _thread_exit_cleanup(void) { struct pthread *curthread = _get_curthread(); /* * POSIX states that cancellation/termination of a thread should * not release any visible resources (such as mutexes) and that * it is the applications responsibility. Resources that are * internal to the threads library, including file and fd locks, * are not visible to the application and need to be released. */ /* Unlock all owned fd locks: */ _thread_fd_unlock_owned(curthread); /* Unlock all private mutexes: */ _mutex_unlock_private(curthread); /* * This still isn't quite correct because we don't account * for held spinlocks (see libc/stdlib/malloc.c). */ } void _pthread_exit(void *status) { struct pthread *curthread = _get_curthread(); pthread_t pthread; /* Check if this thread is already in the process of exiting: */ if ((curthread->flags & PTHREAD_EXITING) != 0) { char msg[128]; snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",curthread); PANIC(msg); } /* Flag this thread as exiting: */ curthread->flags |= PTHREAD_EXITING; /* Save the return value: */ curthread->ret = status; while (curthread->cleanup != NULL) { pthread_cleanup_pop(1); } if (curthread->attr.cleanup_attr != NULL) { curthread->attr.cleanup_attr(curthread->attr.arg_attr); } /* Check if there is thread specific data: */ if (curthread->specific != NULL) { /* Run the thread-specific data destructors: */ _thread_cleanupspecific(); } /* Free thread-specific poll_data structure, if allocated: */ if (curthread->poll_data.fds != NULL) { free(curthread->poll_data.fds); curthread->poll_data.fds = NULL; } /* * Lock the garbage collector mutex to ensure that the garbage * collector is not using the dead thread list. */ if (pthread_mutex_lock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Add this thread to the list of dead threads. */ TAILQ_INSERT_HEAD(&_dead_list, curthread, dle); /* * Signal the garbage collector thread that there is something * to clean up. */ if (pthread_cond_signal(&_gc_cond) != 0) PANIC("Cannot signal gc cond"); /* * Avoid a race condition where a scheduling signal can occur * causing the garbage collector thread to run. If this happens, * the current thread can be cleaned out from under us. */ _thread_kern_sig_defer(); /* Unlock the garbage collector mutex: */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot unlock gc mutex"); /* Check if there is a thread joining this one: */ if (curthread->joiner != NULL) { pthread = curthread->joiner; curthread->joiner = NULL; - switch (pthread->suspended) { - case SUSP_JOIN: - /* - * The joining thread is suspended. Change the - * suspension state to make the thread runnable when it - * is resumed: - */ - pthread->suspended = SUSP_NO; - break; - case SUSP_NO: - /* Make the joining thread runnable: */ - PTHREAD_NEW_STATE(pthread, PS_RUNNING); - break; - default: - PANIC("Unreachable code reached"); - } + /* Make the joining thread runnable: */ + PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Set the return value for the joining thread: */ pthread->join_status.ret = curthread->ret; pthread->join_status.error = 0; pthread->join_status.thread = NULL; /* Make this thread collectable by the garbage collector. */ PTHREAD_ASSERT(((curthread->attr.flags & PTHREAD_DETACHED) == 0), "Cannot join a detached thread"); curthread->attr.flags |= PTHREAD_DETACHED; } /* Remove this thread from the thread list: */ TAILQ_REMOVE(&_thread_list, curthread, tle); /* This thread will never be re-scheduled. */ _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__); /* This point should not be reached. */ PANIC("Dead thread has resumed"); } diff --git a/lib/libc_r/uthread/uthread_init.c b/lib/libc_r/uthread/uthread_init.c index 2790748fd53b..74db740a07b8 100644 --- a/lib/libc_r/uthread/uthread_init.c +++ b/lib/libc_r/uthread/uthread_init.c @@ -1,486 +1,500 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* Allocate space for global thread variables here: */ #define GLOBAL_PTHREAD_PRIVATE #include "namespace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "un-namespace.h" #include "pthread_private.h" /* * All weak references used within libc should be in this table. * This will is so that static libraries will work. */ static void *references[] = { &_accept, &_bind, &_close, &_connect, &_dup, &_dup2, &_execve, &_fcntl, &_flock, &_flockfile, &_fstat, &_fstatfs, &_fsync, &_funlockfile, &_getdirentries, &_getlogin, &_getpeername, &_getsockname, &_getsockopt, &_ioctl, &_kevent, &_listen, &_nanosleep, &_open, &_pthread_getspecific, &_pthread_key_create, &_pthread_key_delete, &_pthread_mutex_destroy, &_pthread_mutex_init, &_pthread_mutex_lock, &_pthread_mutex_trylock, &_pthread_mutex_unlock, &_pthread_mutexattr_init, &_pthread_mutexattr_destroy, &_pthread_mutexattr_settype, &_pthread_once, &_pthread_setspecific, &_read, &_readv, &_recvfrom, &_recvmsg, &_select, &_sendmsg, &_sendto, &_setsockopt, &_sigaction, &_sigprocmask, &_sigsuspend, &_socket, &_socketpair, &_wait4, &_write, &_writev }; /* * These are needed when linking statically. All references within * libgcc (and in the future libc) to these routines are weak, but * if they are not (strongly) referenced by the application or other * libraries, then the actual functions will not be loaded. */ static void *libgcc_references[] = { &_pthread_once, &_pthread_key_create, &_pthread_key_delete, &_pthread_getspecific, &_pthread_setspecific, &_pthread_mutex_init, &_pthread_mutex_destroy, &_pthread_mutex_lock, &_pthread_mutex_trylock, &_pthread_mutex_unlock }; int _pthread_guard_default; int _pthread_page_size; /* * Threaded process initialization */ void _thread_init(void) { int fd; int flags; int i; size_t len; int mib[2]; int sched_stack_size; /* Size of scheduler stack. */ struct clockinfo clockinfo; struct sigaction act; _pthread_page_size = getpagesize(); _pthread_guard_default = getpagesize(); sched_stack_size = getpagesize(); pthread_attr_default.guardsize_attr = _pthread_guard_default; /* Check if this function has already been called: */ if (_thread_initial) /* Only initialise the threaded application once. */ return; /* * Make gcc quiescent about {,libgcc_}references not being * referenced: */ if ((references[0] == NULL) || (libgcc_references[0] == NULL)) PANIC("Failed loading mandatory references in _thread_init"); /* * Check for the special case of this process running as * or in place of init as pid = 1: */ if (getpid() == 1) { /* * Setup a new session for this process which is * assumed to be running as root. */ if (setsid() == -1) PANIC("Can't set session ID"); if (revoke(_PATH_CONSOLE) != 0) PANIC("Can't revoke console"); if ((fd = __sys_open(_PATH_CONSOLE, O_RDWR)) < 0) PANIC("Can't open console"); if (setlogin("root") == -1) PANIC("Can't set login to root"); - if (__sys_ioctl(fd,TIOCSCTTY, (char *) NULL) == -1) + if (__sys_ioctl(fd, TIOCSCTTY, (char *) NULL) == -1) PANIC("Can't set controlling terminal"); - if (__sys_dup2(fd,0) == -1 || - __sys_dup2(fd,1) == -1 || - __sys_dup2(fd,2) == -1) + if (__sys_dup2(fd, 0) == -1 || + __sys_dup2(fd, 1) == -1 || + __sys_dup2(fd, 2) == -1) PANIC("Can't dup2"); } /* Get the standard I/O flags before messing with them : */ - for (i = 0; i < 3; i++) + for (i = 0; i < 3; i++) { if (((_pthread_stdio_flags[i] = - __sys_fcntl(i,F_GETFL, NULL)) == -1) && + __sys_fcntl(i, F_GETFL, NULL)) == -1) && (errno != EBADF)) PANIC("Cannot get stdio flags"); + } /* * Create a pipe that is written to by the signal handler to prevent * signals being missed in calls to _select: */ if (__sys_pipe(_thread_kern_pipe) != 0) { /* Cannot create pipe, so abort: */ PANIC("Cannot create kernel pipe"); } + + /* + * Make sure the pipe does not get in the way of stdio: + */ + for (i = 0; i < 2; i++) { + if (_thread_kern_pipe[i] < 3) { + fd = __sys_fcntl(_thread_kern_pipe[i], F_DUPFD, 3); + if (fd == -1) + PANIC("Cannot create kernel pipe"); + __sys_close(_thread_kern_pipe[i]); + _thread_kern_pipe[i] = fd; + } + } /* Get the flags for the read pipe: */ - else if ((flags = __sys_fcntl(_thread_kern_pipe[0], F_GETFL, NULL)) == -1) { + if ((flags = __sys_fcntl(_thread_kern_pipe[0], F_GETFL, NULL)) == -1) { /* Abort this application: */ PANIC("Cannot get kernel read pipe flags"); } /* Make the read pipe non-blocking: */ else if (__sys_fcntl(_thread_kern_pipe[0], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ PANIC("Cannot make kernel read pipe non-blocking"); } /* Get the flags for the write pipe: */ else if ((flags = __sys_fcntl(_thread_kern_pipe[1], F_GETFL, NULL)) == -1) { /* Abort this application: */ PANIC("Cannot get kernel write pipe flags"); } /* Make the write pipe non-blocking: */ else if (__sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ PANIC("Cannot get kernel write pipe flags"); } /* Allocate and initialize the ready queue: */ else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) != 0) { /* Abort this application: */ PANIC("Cannot allocate priority ready queue."); } /* Allocate memory for the thread structure of the initial thread: */ else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) { /* * Insufficient memory to initialise this application, so * abort: */ PANIC("Cannot allocate memory for initial thread"); } /* Allocate memory for the scheduler stack: */ else if ((_thread_kern_sched_stack = malloc(sched_stack_size)) == NULL) PANIC("Failed to allocate stack for scheduler"); else { /* Zero the global kernel thread structure: */ memset(&_thread_kern_thread, 0, sizeof(struct pthread)); _thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE; memset(_thread_initial, 0, sizeof(struct pthread)); /* Initialize the waiting and work queues: */ TAILQ_INIT(&_waitingq); TAILQ_INIT(&_workq); /* Initialize the scheduling switch hook routine: */ _sched_switch_hook = NULL; /* Give this thread default attributes: */ memcpy((void *) &_thread_initial->attr, &pthread_attr_default, sizeof(struct pthread_attr)); /* Find the stack top */ mib[0] = CTL_KERN; mib[1] = KERN_USRSTACK; len = sizeof (_usrstack); if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1) _usrstack = (void *)USRSTACK; /* * Create a red zone below the main stack. All other stacks are * constrained to a maximum size by the paramters passed to * mmap(), but this stack is only limited by resource limits, so * this stack needs an explicitly mapped red zone to protect the * thread stack that is just beyond. */ if (mmap(_usrstack - PTHREAD_STACK_INITIAL - _pthread_guard_default, _pthread_guard_default, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* Set the main thread stack pointer. */ _thread_initial->stack = _usrstack - PTHREAD_STACK_INITIAL; /* Set the stack attributes: */ _thread_initial->attr.stackaddr_attr = _thread_initial->stack; _thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL; /* Setup the context for the scheduler: */ _setjmp(_thread_kern_sched_jb); SET_STACK_JB(_thread_kern_sched_jb, _thread_kern_sched_stack + sched_stack_size - sizeof(double)); SET_RETURN_ADDR_JB(_thread_kern_sched_jb, _thread_kern_scheduler); /* * Write a magic value to the thread structure * to help identify valid ones: */ _thread_initial->magic = PTHREAD_MAGIC; /* Set the initial cancel state */ _thread_initial->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; /* Default the priority of the initial thread: */ _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY; _thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY; _thread_initial->inherited_priority = 0; /* Initialise the state of the initial thread: */ _thread_initial->state = PS_RUNNING; /* Set the name of the thread: */ _thread_initial->name = strdup("_thread_initial"); /* Initialize joiner to NULL (no joiner): */ _thread_initial->joiner = NULL; /* Initialize the owned mutex queue and count: */ TAILQ_INIT(&(_thread_initial->mutexq)); _thread_initial->priority_mutex_count = 0; /* Initialize the global scheduling time: */ _sched_ticks = 0; gettimeofday((struct timeval *) &_sched_tod, NULL); /* Initialize last active: */ _thread_initial->last_active = (long) _sched_ticks; /* Initialize the initial context: */ _thread_initial->curframe = NULL; /* Initialise the rest of the fields: */ _thread_initial->poll_data.nfds = 0; _thread_initial->poll_data.fds = NULL; _thread_initial->sig_defer_count = 0; _thread_initial->yield_on_sig_undefer = 0; _thread_initial->specific = NULL; _thread_initial->cleanup = NULL; _thread_initial->flags = 0; _thread_initial->error = 0; TAILQ_INIT(&_thread_list); TAILQ_INSERT_HEAD(&_thread_list, _thread_initial, tle); _set_curthread(_thread_initial); /* Initialise the global signal action structure: */ sigfillset(&act.sa_mask); act.sa_handler = (void (*) ()) _thread_sig_handler; act.sa_flags = SA_SIGINFO | SA_ONSTACK; /* Clear pending signals for the process: */ sigemptyset(&_process_sigpending); /* Clear the signal queue: */ memset(_thread_sigq, 0, sizeof(_thread_sigq)); /* Enter a loop to get the existing signal status: */ for (i = 1; i < NSIG; i++) { /* Check for signals which cannot be trapped: */ if (i == SIGKILL || i == SIGSTOP) { } /* Get the signal handler details: */ else if (__sys_sigaction(i, NULL, &_thread_sigact[i - 1]) != 0) { /* * Abort this process if signal * initialisation fails: */ PANIC("Cannot read signal handler info"); } /* Initialize the SIG_DFL dummy handler count. */ _thread_dfl_count[i] = 0; } /* * Install the signal handler for the most important * signals that the user-thread kernel needs. Actually * SIGINFO isn't really needed, but it is nice to have. */ if (__sys_sigaction(_SCHED_SIGNAL, &act, NULL) != 0 || __sys_sigaction(SIGINFO, &act, NULL) != 0 || __sys_sigaction(SIGCHLD, &act, NULL) != 0) { /* * Abort this process if signal initialisation fails: */ PANIC("Cannot initialise signal handler"); } _thread_sigact[_SCHED_SIGNAL - 1].sa_flags = SA_SIGINFO; _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO; _thread_sigact[SIGCHLD - 1].sa_flags = SA_SIGINFO; /* Get the process signal mask: */ __sys_sigprocmask(SIG_SETMASK, NULL, &_process_sigmask); /* Get the kernel clockrate: */ mib[0] = CTL_KERN; mib[1] = KERN_CLOCKRATE; len = sizeof (struct clockinfo); if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0) _clock_res_usec = clockinfo.tick > CLOCK_RES_USEC_MIN ? clockinfo.tick : CLOCK_RES_USEC_MIN; /* Get the table size: */ if ((_thread_dtablesize = getdtablesize()) < 0) { /* * Cannot get the system defined table size, so abort * this process. */ PANIC("Cannot get dtablesize"); } /* Allocate memory for the file descriptor table: */ if ((_thread_fd_table = (struct fd_table_entry **) malloc(sizeof(struct fd_table_entry *) * _thread_dtablesize)) == NULL) { /* Avoid accesses to file descriptor table on exit: */ _thread_dtablesize = 0; /* * Cannot allocate memory for the file descriptor * table, so abort this process. */ PANIC("Cannot allocate memory for file descriptor table"); } /* Allocate memory for the pollfd table: */ if ((_thread_pfd_table = (struct pollfd *) malloc(sizeof(struct pollfd) * _thread_dtablesize)) == NULL) { /* * Cannot allocate memory for the file descriptor * table, so abort this process. */ PANIC("Cannot allocate memory for pollfd table"); } else { /* * Enter a loop to initialise the file descriptor * table: */ for (i = 0; i < _thread_dtablesize; i++) { /* Initialise the file descriptor table: */ _thread_fd_table[i] = NULL; } /* Initialize stdio file descriptor table entries: */ for (i = 0; i < 3; i++) { if ((_thread_fd_table_init(i) != 0) && (errno != EBADF)) PANIC("Cannot initialize stdio file " "descriptor table entry"); } } } /* Initialise the garbage collector mutex and condition variable. */ if (_pthread_mutex_init(&_gc_mutex,NULL) != 0 || pthread_cond_init(&_gc_cond,NULL) != 0) PANIC("Failed to initialise garbage collector mutex or condvar"); } /* * Special start up code for NetBSD/Alpha */ #if defined(__NetBSD__) && defined(__alpha__) int main(int argc, char *argv[], char *env); int _thread_main(int argc, char *argv[], char *env) { _thread_init(); return (main(argc, argv, env)); } #endif diff --git a/lib/libc_r/uthread/uthread_multi_np.c b/lib/libc_r/uthread/uthread_multi_np.c index c1a069f11ce5..bd42365621a6 100644 --- a/lib/libc_r/uthread/uthread_multi_np.c +++ b/lib/libc_r/uthread/uthread_multi_np.c @@ -1,46 +1,50 @@ /* * Copyright (c) 1996 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ -#include #include -#include "pthread_private.h" +#include __weak_reference(_pthread_multi_np, pthread_multi_np); int _pthread_multi_np() { + /* Return to multi-threaded scheduling mode: */ - _thread_single = NULL; - return(0); + /* + * XXX - Do we want to do this? + * __is_threaded = 1; + */ + pthread_resume_all_np(); + return (0); } diff --git a/lib/libc_r/uthread/uthread_mutex.c b/lib/libc_r/uthread/uthread_mutex.c index 0f67b4b01965..86e0b8bf324c 100644 --- a/lib/libc_r/uthread/uthread_mutex.c +++ b/lib/libc_r/uthread/uthread_mutex.c @@ -1,1576 +1,1544 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include "pthread_private.h" #if defined(_PTHREADS_INVARIANTS) #define _MUTEX_INIT_LINK(m) do { \ (m)->m_qe.tqe_prev = NULL; \ (m)->m_qe.tqe_next = NULL; \ } while (0) #define _MUTEX_ASSERT_IS_OWNED(m) do { \ if ((m)->m_qe.tqe_prev == NULL) \ PANIC("mutex is not on list"); \ } while (0) #define _MUTEX_ASSERT_NOT_OWNED(m) do { \ if (((m)->m_qe.tqe_prev != NULL) || \ ((m)->m_qe.tqe_next != NULL)) \ PANIC("mutex is on list"); \ } while (0) #else #define _MUTEX_INIT_LINK(m) #define _MUTEX_ASSERT_IS_OWNED(m) #define _MUTEX_ASSERT_NOT_OWNED(m) #endif /* * Prototypes */ static inline int mutex_self_trylock(pthread_mutex_t); static inline int mutex_self_lock(pthread_mutex_t); static inline int mutex_unlock_common(pthread_mutex_t *, int); static void mutex_priority_adjust(pthread_mutex_t); static void mutex_rescan_owned (pthread_t, pthread_mutex_t); static inline pthread_t mutex_queue_deq(pthread_mutex_t); static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; static struct pthread_mutex_attr static_mutex_attr = PTHREAD_MUTEXATTR_STATIC_INITIALIZER; static pthread_mutexattr_t static_mattr = &static_mutex_attr; /* Single underscore versions provided for libc internal usage: */ __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); /* No difference between libc and application usage of these: */ __weak_reference(_pthread_mutex_init, pthread_mutex_init); __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); /* Reinitialize a mutex to defaults. */ int _mutex_reinit(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; else if (*mutex == NULL) ret = pthread_mutex_init(mutex, NULL); else { /* * Initialize the mutex structure: */ (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT; (*mutex)->m_protocol = PTHREAD_PRIO_NONE; TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_owner = NULL; (*mutex)->m_data.m_count = 0; (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE; (*mutex)->m_flags |= MUTEX_FLAGS_INITED; (*mutex)->m_refcount = 0; (*mutex)->m_prio = 0; (*mutex)->m_saved_prio = 0; _MUTEX_INIT_LINK(*mutex); memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock)); } return (ret); } int _pthread_mutex_init(pthread_mutex_t * mutex, const pthread_mutexattr_t * mutex_attr) { enum pthread_mutextype type; int protocol; int ceiling; int flags; pthread_mutex_t pmutex; int ret = 0; if (mutex == NULL) ret = EINVAL; /* Check if default mutex attributes: */ else if (mutex_attr == NULL || *mutex_attr == NULL) { /* Default to a (error checking) POSIX mutex: */ type = PTHREAD_MUTEX_ERRORCHECK; protocol = PTHREAD_PRIO_NONE; ceiling = PTHREAD_MAX_PRIORITY; flags = 0; } /* Check mutex type: */ else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) /* Return an invalid argument error: */ ret = EINVAL; /* Check mutex protocol: */ else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) /* Return an invalid argument error: */ ret = EINVAL; else { /* Use the requested mutex type and protocol: */ type = (*mutex_attr)->m_type; protocol = (*mutex_attr)->m_protocol; ceiling = (*mutex_attr)->m_ceiling; flags = (*mutex_attr)->m_flags; } /* Check no errors so far: */ if (ret == 0) { if ((pmutex = (pthread_mutex_t) malloc(sizeof(struct pthread_mutex))) == NULL) ret = ENOMEM; else { /* Set the mutex flags: */ pmutex->m_flags = flags; /* Process according to mutex type: */ switch (type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: /* Nothing to do here. */ break; /* Single UNIX Spec 2 recursive mutex: */ case PTHREAD_MUTEX_RECURSIVE: /* Reset the mutex count: */ pmutex->m_data.m_count = 0; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } if (ret == 0) { /* Initialise the rest of the mutex: */ TAILQ_INIT(&pmutex->m_queue); pmutex->m_flags |= MUTEX_FLAGS_INITED; pmutex->m_owner = NULL; pmutex->m_type = type; pmutex->m_protocol = protocol; pmutex->m_refcount = 0; if (protocol == PTHREAD_PRIO_PROTECT) pmutex->m_prio = ceiling; else pmutex->m_prio = 0; pmutex->m_saved_prio = 0; _MUTEX_INIT_LINK(pmutex); memset(&pmutex->lock, 0, sizeof(pmutex->lock)); *mutex = pmutex; } else { free(pmutex); *mutex = NULL; } } } /* Return the completion status: */ return(ret); } int _pthread_mutex_destroy(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL || *mutex == NULL) ret = EINVAL; else { /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * Check to see if this mutex is in use: */ if (((*mutex)->m_owner != NULL) || (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || ((*mutex)->m_refcount != 0)) { ret = EBUSY; /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); } else { /* * Free the memory allocated for the mutex * structure: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); free(*mutex); /* * Leave the caller's pointer NULL now that * the mutex has been destroyed: */ *mutex = NULL; } } /* Return the completion status: */ return (ret); } static int init_static(pthread_mutex_t *mutex) { int ret; _SPINLOCK(&static_init_lock); if (*mutex == NULL) ret = pthread_mutex_init(mutex, NULL); else ret = 0; _SPINUNLOCK(&static_init_lock); return(ret); } static int init_static_private(pthread_mutex_t *mutex) { int ret; _SPINLOCK(&static_init_lock); if (*mutex == NULL) ret = pthread_mutex_init(mutex, &static_mattr); else ret = 0; _SPINUNLOCK(&static_init_lock); return(ret); } static int mutex_trylock_common(pthread_mutex_t *mutex) { struct pthread *curthread = _get_curthread(); int ret = 0; PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), "Uninitialized mutex in pthread_mutex_trylock_basic"); /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); _MUTEX_INIT_LINK(*mutex); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; } /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The mutex takes on the attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = curthread->active_priority; (*mutex)->m_saved_prio = curthread->inherited_priority; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (curthread->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority. */ curthread->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); /* Return the completion status: */ return (ret); } int __pthread_mutex_trylock(pthread_mutex_t *mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization: */ else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0) ret = mutex_trylock_common(mutex); return (ret); } int _pthread_mutex_trylock(pthread_mutex_t *mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization marking the mutex private (delete safe): */ else if ((*mutex != NULL) || (ret = init_static_private(mutex)) == 0) ret = mutex_trylock_common(mutex); return (ret); } static int mutex_lock_common(pthread_mutex_t * mutex) { struct pthread *curthread = _get_curthread(); int ret = 0; PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), "Uninitialized mutex in pthread_mutex_trylock_basic"); /* Reset the interrupted flag: */ curthread->interrupted = 0; /* * Enter a loop waiting to become the mutex owner. We need a * loop in case the waiting thread is interrupted by a signal * to execute a signal handler. It is not (currently) possible * to remain in the waiting queue while running a handler. * Instead, the thread is interrupted and backed out of the * waiting queue prior to executing the signal handler. */ do { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; _MUTEX_INIT_LINK(*mutex); } /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = curthread; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The mutex takes on attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = curthread->active_priority; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; if (curthread->active_priority > (*mutex)->m_prio) /* Adjust priorities: */ mutex_priority_adjust(*mutex); /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (curthread->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* * Lock the mutex for the running * thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority: */ curthread->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; /* Clear any previous error: */ curthread->error = 0; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); /* * The threads priority may have changed while * waiting for the mutex causing a ceiling * violation. */ ret = curthread->error; curthread->error = 0; } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* * Check to see if this thread was interrupted and * is still in the mutex queue of waiting threads: */ if (curthread->interrupted != 0) mutex_queue_remove(*mutex, curthread); /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } while (((*mutex)->m_owner != curthread) && (ret == 0) && (curthread->interrupted == 0)); if (curthread->interrupted != 0 && curthread->continuation != NULL) curthread->continuation((void *) curthread); /* Return the completion status: */ return (ret); } int __pthread_mutex_lock(pthread_mutex_t *mutex) { int ret = 0; if (_thread_initial == NULL) _thread_init(); if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization: */ else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0)) ret = mutex_lock_common(mutex); return (ret); } int _pthread_mutex_lock(pthread_mutex_t *mutex) { int ret = 0; if (_thread_initial == NULL) _thread_init(); if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization marking it private (delete safe): */ else if ((*mutex != NULL) || ((ret = init_static_private(mutex)) == 0)) ret = mutex_lock_common(mutex); return (ret); } int _pthread_mutex_unlock(pthread_mutex_t * mutex) { return (mutex_unlock_common(mutex, /* add reference */ 0)); } int _mutex_cv_unlock(pthread_mutex_t * mutex) { return (mutex_unlock_common(mutex, /* add reference */ 1)); } int _mutex_cv_lock(pthread_mutex_t * mutex) { int ret; if ((ret = pthread_mutex_lock(mutex)) == 0) (*mutex)->m_refcount--; return (ret); } static inline int mutex_self_trylock(pthread_mutex_t mutex) { int ret = 0; switch (mutex->m_type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: /* * POSIX specifies that mutexes should return EDEADLK if a * recursive lock is detected. */ ret = EBUSY; break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ mutex->m_data.m_count++; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return(ret); } static inline int mutex_self_lock(pthread_mutex_t mutex) { int ret = 0; switch (mutex->m_type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: /* * POSIX specifies that mutexes should return EDEADLK if a * recursive lock is detected. */ ret = EDEADLK; break; case PTHREAD_MUTEX_NORMAL: /* * What SS2 define as a 'normal' mutex. Intentionally * deadlock on attempts to get a lock you already own. */ _thread_kern_sched_state_unlock(PS_DEADLOCK, &mutex->lock, __FILE__, __LINE__); break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ mutex->m_data.m_count++; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return(ret); } static inline int mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) { struct pthread *curthread = _get_curthread(); int ret = 0; if (mutex == NULL || *mutex == NULL) { ret = EINVAL; } else { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != curthread) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Get the next thread from the queue of * threads waiting on the mutex: */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) { - /* - * Unless the new owner of the mutex is - * currently suspended, allow the owner - * to run. If the thread is suspended, - * make a note that the thread isn't in - * a wait queue any more. - */ - if (((*mutex)->m_owner->state != - PS_SUSPENDED)) { - PTHREAD_NEW_STATE((*mutex)->m_owner, - PS_RUNNING); - } else { - (*mutex)->m_owner->suspended = - SUSP_NOWAIT; - } + /* Make the new owner runnable: */ + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); /* * Add the mutex to the threads list of * owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; } } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != curthread) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* * Restore the threads inherited priority and * recompute the active priority (being careful * not to override changes in the threads base * priority subsequent to locking the mutex). */ curthread->inherited_priority = (*mutex)->m_saved_prio; curthread->active_priority = MAX(curthread->inherited_priority, curthread->base_priority); /* * This thread now owns one less priority mutex. */ curthread->priority_mutex_count--; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Get the next thread from the queue of threads * waiting on the mutex: */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) == NULL) /* This mutex has no priority. */ (*mutex)->m_prio = 0; else { /* * Track number of priority mutexes owned: */ (*mutex)->m_owner->priority_mutex_count++; /* * Add the mutex to the threads list * of owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; /* * Set the priority of the mutex. Since * our waiting threads are in descending * priority order, the priority of the * mutex becomes the active priority of * the thread we just dequeued. */ (*mutex)->m_prio = (*mutex)->m_owner->active_priority; /* * Save the owning threads inherited * priority: */ (*mutex)->m_saved_prio = (*mutex)->m_owner->inherited_priority; /* * The owning threads inherited priority * now becomes his active priority (the * priority of the mutex). */ (*mutex)->m_owner->inherited_priority = (*mutex)->m_prio; /* - * Unless the new owner of the mutex is - * currently suspended, allow the owner - * to run. If the thread is suspended, - * make a note that the thread isn't in - * a wait queue any more. + * Make the new owner runnable: */ - if (((*mutex)->m_owner->state != - PS_SUSPENDED)) { - PTHREAD_NEW_STATE((*mutex)->m_owner, - PS_RUNNING); - } else { - (*mutex)->m_owner->suspended = - SUSP_NOWAIT; - } + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); } } break; /* POSIX priority ceiling mutex: */ case PTHREAD_PRIO_PROTECT: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != curthread) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* * Restore the threads inherited priority and * recompute the active priority (being careful * not to override changes in the threads base * priority subsequent to locking the mutex). */ curthread->inherited_priority = (*mutex)->m_saved_prio; curthread->active_priority = MAX(curthread->inherited_priority, curthread->base_priority); /* * This thread now owns one less priority mutex. */ curthread->priority_mutex_count--; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Enter a loop to find a waiting thread whose * active priority will not cause a ceiling * violation: */ while ((((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) && ((*mutex)->m_owner->active_priority > (*mutex)->m_prio)) { /* * Either the mutex ceiling priority * been lowered and/or this threads * priority has been raised subsequent * to this thread being queued on the * waiting list. */ (*mutex)->m_owner->error = EINVAL; PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); /* * The thread is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; } /* Check for a new owner: */ if ((*mutex)->m_owner != NULL) { /* * Track number of priority mutexes owned: */ (*mutex)->m_owner->priority_mutex_count++; /* * Add the mutex to the threads list * of owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; /* * Save the owning threads inherited * priority: */ (*mutex)->m_saved_prio = (*mutex)->m_owner->inherited_priority; /* * The owning thread inherits the * ceiling priority of the mutex and * executes at that priority: */ (*mutex)->m_owner->inherited_priority = (*mutex)->m_prio; (*mutex)->m_owner->active_priority = (*mutex)->m_prio; /* - * Unless the new owner of the mutex is - * currently suspended, allow the owner - * to run. If the thread is suspended, - * make a note that the thread isn't in - * a wait queue any more. + * Make the new owner runnable: */ - if (((*mutex)->m_owner->state != - PS_SUSPENDED)) { - PTHREAD_NEW_STATE((*mutex)->m_owner, - PS_RUNNING); - } else { - (*mutex)->m_owner->suspended = - SUSP_NOWAIT; - } + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); } } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } if ((ret == 0) && (add_reference != 0)) { /* Increment the reference count: */ (*mutex)->m_refcount++; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (ret); } /* * This function is called when a change in base priority occurs for * a thread that is holding or waiting for a priority protection or * inheritence mutex. A change in a threads base priority can effect * changes to active priorities of other threads and to the ordering * of mutex locking by waiting threads. * * This must be called while thread scheduling is deferred. */ void _mutex_notify_priochange(pthread_t pthread) { /* Adjust the priorites of any owned priority mutexes: */ if (pthread->priority_mutex_count > 0) { /* * Rescan the mutexes owned by this thread and correct * their priorities to account for this threads change * in priority. This has the side effect of changing * the threads active priority. */ mutex_rescan_owned(pthread, /* rescan all owned */ NULL); } /* * If this thread is waiting on a priority inheritence mutex, * check for priority adjustments. A change in priority can * also effect a ceiling violation(*) for a thread waiting on * a priority protection mutex; we don't perform the check here * as it is done in pthread_mutex_unlock. * * (*) It should be noted that a priority change to a thread * _after_ taking and owning a priority ceiling mutex * does not affect ownership of that mutex; the ceiling * priority is only checked before mutex ownership occurs. */ if (pthread->state == PS_MUTEX_WAIT) { /* Lock the mutex structure: */ _SPINLOCK(&pthread->data.mutex->lock); /* * Check to make sure this thread is still in the same state * (the spinlock above can yield the CPU to another thread): */ if (pthread->state == PS_MUTEX_WAIT) { /* * Remove and reinsert this thread into the list of * waiting threads to preserve decreasing priority * order. */ mutex_queue_remove(pthread->data.mutex, pthread); mutex_queue_enq(pthread->data.mutex, pthread); if (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT) { /* Adjust priorities: */ mutex_priority_adjust(pthread->data.mutex); } } /* Unlock the mutex structure: */ _SPINUNLOCK(&pthread->data.mutex->lock); } } /* * Called when a new thread is added to the mutex waiting queue or * when a threads priority changes that is already in the mutex * waiting queue. */ static void mutex_priority_adjust(pthread_mutex_t mutex) { pthread_t pthread_next, pthread = mutex->m_owner; int temp_prio; pthread_mutex_t m = mutex; /* * Calculate the mutex priority as the maximum of the highest * active priority of any waiting threads and the owning threads * active priority(*). * * (*) Because the owning threads current active priority may * reflect priority inherited from this mutex (and the mutex * priority may have changed) we must recalculate the active * priority based on the threads saved inherited priority * and its base priority. */ pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ temp_prio = MAX(pthread_next->active_priority, MAX(m->m_saved_prio, pthread->base_priority)); /* See if this mutex really needs adjusting: */ if (temp_prio == m->m_prio) /* No need to propagate the priority: */ return; /* Set new priority of the mutex: */ m->m_prio = temp_prio; while (m != NULL) { /* * Save the threads priority before rescanning the * owned mutexes: */ temp_prio = pthread->active_priority; /* * Fix the priorities for all the mutexes this thread has * locked since taking this mutex. This also has a * potential side-effect of changing the threads priority. */ mutex_rescan_owned(pthread, m); /* * If the thread is currently waiting on a mutex, check * to see if the threads new priority has affected the * priority of the mutex. */ if ((temp_prio != pthread->active_priority) && (pthread->state == PS_MUTEX_WAIT) && (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) { /* Grab the mutex this thread is waiting on: */ m = pthread->data.mutex; /* * The priority for this thread has changed. Remove * and reinsert this thread into the list of waiting * threads to preserve decreasing priority order. */ mutex_queue_remove(m, pthread); mutex_queue_enq(m, pthread); /* Grab the waiting thread with highest priority: */ pthread_next = TAILQ_FIRST(&m->m_queue); /* * Calculate the mutex priority as the maximum of the * highest active priority of any waiting threads and * the owning threads active priority. */ temp_prio = MAX(pthread_next->active_priority, MAX(m->m_saved_prio, m->m_owner->base_priority)); if (temp_prio != m->m_prio) { /* * The priority needs to be propagated to the * mutex this thread is waiting on and up to * the owner of that mutex. */ m->m_prio = temp_prio; pthread = m->m_owner; } else /* We're done: */ m = NULL; } else /* We're done: */ m = NULL; } } static void mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex) { int active_prio, inherited_prio; pthread_mutex_t m; pthread_t pthread_next; /* * Start walking the mutexes the thread has taken since * taking this mutex. */ if (mutex == NULL) { /* * A null mutex means start at the beginning of the owned * mutex list. */ m = TAILQ_FIRST(&pthread->mutexq); /* There is no inherited priority yet. */ inherited_prio = 0; } else { /* * The caller wants to start after a specific mutex. It * is assumed that this mutex is a priority inheritence * mutex and that its priority has been correctly * calculated. */ m = TAILQ_NEXT(mutex, m_qe); /* Start inheriting priority from the specified mutex. */ inherited_prio = mutex->m_prio; } active_prio = MAX(inherited_prio, pthread->base_priority); while (m != NULL) { /* * We only want to deal with priority inheritence * mutexes. This might be optimized by only placing * priority inheritence mutexes into the owned mutex * list, but it may prove to be useful having all * owned mutexes in this list. Consider a thread * exiting while holding mutexes... */ if (m->m_protocol == PTHREAD_PRIO_INHERIT) { /* * Fix the owners saved (inherited) priority to * reflect the priority of the previous mutex. */ m->m_saved_prio = inherited_prio; if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) /* Recalculate the priority of the mutex: */ m->m_prio = MAX(active_prio, pthread_next->active_priority); else m->m_prio = active_prio; /* Recalculate new inherited and active priorities: */ inherited_prio = m->m_prio; active_prio = MAX(m->m_prio, pthread->base_priority); } /* Advance to the next mutex owned by this thread: */ m = TAILQ_NEXT(m, m_qe); } /* * Fix the threads inherited priority and recalculate its * active priority. */ pthread->inherited_priority = inherited_prio; active_prio = MAX(inherited_prio, pthread->base_priority); if (active_prio != pthread->active_priority) { /* * If this thread is in the priority queue, it must be * removed and reinserted for its new priority. */ if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) { /* * Remove the thread from the priority queue * before changing its priority: */ PTHREAD_PRIOQ_REMOVE(pthread); /* * POSIX states that if the priority is being * lowered, the thread must be inserted at the * head of the queue for its priority if it owns * any priority protection or inheritence mutexes. */ if ((active_prio < pthread->active_priority) && (pthread->priority_mutex_count > 0)) { /* Set the new active priority. */ pthread->active_priority = active_prio; PTHREAD_PRIOQ_INSERT_HEAD(pthread); } else { /* Set the new active priority. */ pthread->active_priority = active_prio; PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } else { /* Set the new active priority. */ pthread->active_priority = active_prio; } } } void _mutex_unlock_private(pthread_t pthread) { struct pthread_mutex *m, *m_next; for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { m_next = TAILQ_NEXT(m, m_qe); if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) pthread_mutex_unlock(&m); } } void _mutex_lock_backout(pthread_t pthread) { struct pthread_mutex *mutex; /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { mutex = pthread->data.mutex; /* Lock the mutex structure: */ _SPINLOCK(&mutex->lock); mutex_queue_remove(mutex, pthread); /* This thread is no longer waiting for the mutex: */ pthread->data.mutex = NULL; /* Unlock the mutex structure: */ _SPINUNLOCK(&mutex->lock); } /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* * Dequeue a waiting thread from the head of a mutex queue in descending * priority order. */ static inline pthread_t mutex_queue_deq(pthread_mutex_t mutex) { pthread_t pthread; while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; /* * Only exit the loop if the thread hasn't been * cancelled. */ if (pthread->interrupted == 0) break; } return(pthread); } /* * Remove a waiting thread from a mutex queue in descending priority order. */ static inline void mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) { if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; } } /* * Enqueue a waiting thread to a queue in descending priority order. */ static inline void mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) { pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); /* * For the common case of all threads having equal priority, * we perform a quick check against the priority of the thread * at the tail of the queue. */ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); else { tid = TAILQ_FIRST(&mutex->m_queue); while (pthread->active_priority <= tid->active_priority) tid = TAILQ_NEXT(tid, sqe); TAILQ_INSERT_BEFORE(tid, pthread, sqe); } pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; } diff --git a/lib/libc_r/uthread/uthread_priority_queue.c b/lib/libc_r/uthread/uthread_priority_queue.c index 55d742b9297a..b700d97f7955 100644 --- a/lib/libc_r/uthread/uthread_priority_queue.c +++ b/lib/libc_r/uthread/uthread_priority_queue.c @@ -1,337 +1,370 @@ /* * Copyright (c) 1998 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Daniel Eischen. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "pthread_private.h" /* Prototypes: */ static void pq_insert_prio_list(pq_queue_t *pq, int prio); #if defined(_PTHREADS_INVARIANTS) static int _pq_active = 0; #define _PQ_IN_SCHEDQ (PTHREAD_FLAGS_IN_PRIOQ | PTHREAD_FLAGS_IN_WAITQ | PTHREAD_FLAGS_IN_WORKQ) #define _PQ_SET_ACTIVE() _pq_active = 1 #define _PQ_CLEAR_ACTIVE() _pq_active = 0 #define _PQ_ASSERT_ACTIVE(msg) do { \ if (_pq_active == 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_INACTIVE(msg) do { \ if (_pq_active != 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_IN_WAITQ(thrd, msg) do { \ if (((thrd)->flags & PTHREAD_FLAGS_IN_WAITQ) == 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_IN_PRIOQ(thrd, msg) do { \ if (((thrd)->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \ if (((thrd)->flags & _PQ_IN_SCHEDQ) != 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_PROTECTED(msg) \ PTHREAD_ASSERT((_thread_kern_in_sched != 0) || \ ((_get_curthread())->sig_defer_count > 0) ||\ (_sig_in_handler != 0), msg); #else #define _PQ_SET_ACTIVE() #define _PQ_CLEAR_ACTIVE() #define _PQ_ASSERT_ACTIVE(msg) #define _PQ_ASSERT_INACTIVE(msg) #define _PQ_ASSERT_IN_WAITQ(thrd, msg) #define _PQ_ASSERT_IN_PRIOQ(thrd, msg) #define _PQ_ASSERT_NOT_QUEUED(thrd, msg) #define _PQ_ASSERT_PROTECTED(msg) #endif int _pq_alloc(pq_queue_t *pq, int minprio, int maxprio) { int ret = 0; int prioslots = maxprio - minprio + 1; if (pq == NULL) ret = -1; /* Create the priority queue with (maxprio - minprio + 1) slots: */ else if ((pq->pq_lists = (pq_list_t *) malloc(sizeof(pq_list_t) * prioslots)) == NULL) ret = -1; else { /* Remember the queue size: */ pq->pq_size = prioslots; ret = _pq_init(pq); } return (ret); } int _pq_init(pq_queue_t *pq) { int i, ret = 0; if ((pq == NULL) || (pq->pq_lists == NULL)) ret = -1; else { /* Initialize the queue for each priority slot: */ for (i = 0; i < pq->pq_size; i++) { TAILQ_INIT(&pq->pq_lists[i].pl_head); pq->pq_lists[i].pl_prio = i; pq->pq_lists[i].pl_queued = 0; } /* Initialize the priority queue: */ TAILQ_INIT(&pq->pq_queue); _PQ_CLEAR_ACTIVE(); } return (ret); } void _pq_remove(pq_queue_t *pq, pthread_t pthread) { int prio = pthread->active_priority; /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_INACTIVE("_pq_remove: pq_active"); _PQ_SET_ACTIVE(); _PQ_ASSERT_IN_PRIOQ(pthread, "_pq_remove: Not in priority queue"); _PQ_ASSERT_PROTECTED("_pq_remove: prioq not protected!"); /* * Remove this thread from priority list. Note that if * the priority list becomes empty, it is not removed * from the priority queue because another thread may be * added to the priority list (resulting in a needless * removal/insertion). Priority lists are only removed * from the priority queue when _pq_first is called. */ TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe); /* This thread is now longer in the priority queue. */ pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ; _PQ_CLEAR_ACTIVE(); } void _pq_insert_head(pq_queue_t *pq, pthread_t pthread) { - int prio = pthread->active_priority; + int prio; /* - * Make some assertions when debugging is enabled: + * Don't insert suspended threads into the priority queue. + * The caller is responsible for setting the threads state. */ - _PQ_ASSERT_INACTIVE("_pq_insert_head: pq_active"); - _PQ_SET_ACTIVE(); - _PQ_ASSERT_NOT_QUEUED(pthread, - "_pq_insert_head: Already in priority queue"); - _PQ_ASSERT_PROTECTED("_pq_insert_head: prioq not protected!"); - - TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe); - if (pq->pq_lists[prio].pl_queued == 0) - /* Insert the list into the priority queue: */ - pq_insert_prio_list(pq, prio); - - /* Mark this thread as being in the priority queue. */ - pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ; + if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) { + /* Make sure the threads state is suspended. */ + if (pthread->state != PS_SUSPENDED) + PTHREAD_SET_STATE(pthread, PS_SUSPENDED); + } else { + /* + * Make some assertions when debugging is enabled: + */ + _PQ_ASSERT_INACTIVE("_pq_insert_head: pq_active"); + _PQ_SET_ACTIVE(); + _PQ_ASSERT_NOT_QUEUED(pthread, + "_pq_insert_head: Already in priority queue"); + _PQ_ASSERT_PROTECTED("_pq_insert_head: prioq not protected!"); + + prio = pthread->active_priority; + TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe); + if (pq->pq_lists[prio].pl_queued == 0) + /* Insert the list into the priority queue: */ + pq_insert_prio_list(pq, prio); + + /* Mark this thread as being in the priority queue. */ + pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ; - _PQ_CLEAR_ACTIVE(); + _PQ_CLEAR_ACTIVE(); + } } void _pq_insert_tail(pq_queue_t *pq, pthread_t pthread) { - int prio = pthread->active_priority; + int prio; /* - * Make some assertions when debugging is enabled: + * Don't insert suspended threads into the priority queue. + * The caller is responsible for setting the threads state. */ - _PQ_ASSERT_INACTIVE("_pq_insert_tail: pq_active"); - _PQ_SET_ACTIVE(); - _PQ_ASSERT_NOT_QUEUED(pthread, - "_pq_insert_tail: Already in priority queue"); - _PQ_ASSERT_PROTECTED("_pq_insert_tail: prioq not protected!"); - - TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe); - if (pq->pq_lists[prio].pl_queued == 0) - /* Insert the list into the priority queue: */ - pq_insert_prio_list(pq, prio); - - /* Mark this thread as being in the priority queue. */ - pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ; + if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) { + /* Make sure the threads state is suspended. */ + if (pthread->state != PS_SUSPENDED) + PTHREAD_SET_STATE(pthread, PS_SUSPENDED); + } else { + /* + * Make some assertions when debugging is enabled: + */ + _PQ_ASSERT_INACTIVE("_pq_insert_tail: pq_active"); + _PQ_SET_ACTIVE(); + _PQ_ASSERT_NOT_QUEUED(pthread, + "_pq_insert_tail: Already in priority queue"); + _PQ_ASSERT_PROTECTED("_pq_insert_tail: prioq not protected!"); + + prio = pthread->active_priority; + TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe); + if (pq->pq_lists[prio].pl_queued == 0) + /* Insert the list into the priority queue: */ + pq_insert_prio_list(pq, prio); + + /* Mark this thread as being in the priority queue. */ + pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ; - _PQ_CLEAR_ACTIVE(); + _PQ_CLEAR_ACTIVE(); + } } pthread_t _pq_first(pq_queue_t *pq) { pq_list_t *pql; pthread_t pthread = NULL; /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_INACTIVE("_pq_first: pq_active"); _PQ_SET_ACTIVE(); _PQ_ASSERT_PROTECTED("_pq_first: prioq not protected!"); while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) && (pthread == NULL)) { if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) { /* * The priority list is empty; remove the list * from the queue. */ TAILQ_REMOVE(&pq->pq_queue, pql, pl_link); /* Mark the list as not being in the queue: */ pql->pl_queued = 0; + } else if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) { + /* + * This thread is suspended; remove it from the + * list and ensure its state is suspended. + */ + TAILQ_REMOVE(&pql->pl_head, pthread, pqe); + PTHREAD_SET_STATE(pthread, PS_SUSPENDED); + + /* This thread is now longer in the priority queue. */ + pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ; + pthread = NULL; } } _PQ_CLEAR_ACTIVE(); return (pthread); } static void pq_insert_prio_list(pq_queue_t *pq, int prio) { pq_list_t *pql; /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_ACTIVE("pq_insert_prio_list: pq_active"); _PQ_ASSERT_PROTECTED("_pq_insert_prio_list: prioq not protected!"); /* * The priority queue is in descending priority order. Start at * the beginning of the queue and find the list before which the * new list should be inserted. */ pql = TAILQ_FIRST(&pq->pq_queue); while ((pql != NULL) && (pql->pl_prio > prio)) pql = TAILQ_NEXT(pql, pl_link); /* Insert the list: */ if (pql == NULL) TAILQ_INSERT_TAIL(&pq->pq_queue, &pq->pq_lists[prio], pl_link); else TAILQ_INSERT_BEFORE(pql, &pq->pq_lists[prio], pl_link); /* Mark this list as being in the queue: */ pq->pq_lists[prio].pl_queued = 1; } void _waitq_insert(pthread_t pthread) { pthread_t tid; /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_INACTIVE("_waitq_insert: pq_active"); _PQ_SET_ACTIVE(); _PQ_ASSERT_NOT_QUEUED(pthread, "_waitq_insert: Already in queue"); if (pthread->wakeup_time.tv_sec == -1) TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe); else { tid = TAILQ_FIRST(&_waitingq); while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) && ((tid->wakeup_time.tv_sec < pthread->wakeup_time.tv_sec) || ((tid->wakeup_time.tv_sec == pthread->wakeup_time.tv_sec) && (tid->wakeup_time.tv_nsec <= pthread->wakeup_time.tv_nsec)))) tid = TAILQ_NEXT(tid, pqe); if (tid == NULL) TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe); else TAILQ_INSERT_BEFORE(tid, pthread, pqe); } pthread->flags |= PTHREAD_FLAGS_IN_WAITQ; _PQ_CLEAR_ACTIVE(); } void _waitq_remove(pthread_t pthread) { /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_INACTIVE("_waitq_remove: pq_active"); _PQ_SET_ACTIVE(); _PQ_ASSERT_IN_WAITQ(pthread, "_waitq_remove: Not in queue"); TAILQ_REMOVE(&_waitingq, pthread, pqe); pthread->flags &= ~PTHREAD_FLAGS_IN_WAITQ; _PQ_CLEAR_ACTIVE(); } void _waitq_setactive(void) { _PQ_ASSERT_INACTIVE("_waitq_setactive: pq_active"); _PQ_SET_ACTIVE(); } void _waitq_clearactive(void) { _PQ_ASSERT_ACTIVE("_waitq_clearactive: ! pq_active"); _PQ_CLEAR_ACTIVE(); } diff --git a/lib/libc_r/uthread/uthread_resume_np.c b/lib/libc_r/uthread/uthread_resume_np.c index 9cbcf8563790..ed20b6a8d2f5 100644 --- a/lib/libc_r/uthread/uthread_resume_np.c +++ b/lib/libc_r/uthread/uthread_resume_np.c @@ -1,96 +1,111 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "pthread_private.h" +static void resume_common(struct pthread *); + __weak_reference(_pthread_resume_np, pthread_resume_np); +__weak_reference(_pthread_resume_all_np, pthread_resume_all_np); /* Resume a thread: */ int _pthread_resume_np(pthread_t thread) { - int ret; - enum pthread_susp old_suspended; + int ret; /* Find the thread in the list of active threads: */ if ((ret = _find_thread(thread)) == 0) { - /* Cancel any pending suspensions: */ - old_suspended = thread->suspended; - thread->suspended = SUSP_NO; + /* + * Defer signals to protect the scheduling queues + * from access by the signal handler: + */ + _thread_kern_sig_defer(); + + if ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) + resume_common(thread); + + /* + * Undefer and handle pending signals, yielding if + * necessary: + */ + _thread_kern_sig_undefer(); + } + return (ret); +} + +void +_pthread_resume_all_np(void) +{ + struct pthread *curthread = _get_curthread(); + struct pthread *thread; + + /* + * Defer signals to protect the scheduling queues from access + * by the signal handler: + */ + _thread_kern_sig_defer(); + + TAILQ_FOREACH(thread, &_thread_list, tle) { + if ((thread != curthread) && + ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)) + resume_common(thread); + } - /* Is it currently suspended? */ - if (thread->state == PS_SUSPENDED) { - /* - * Defer signals to protect the scheduling queues - * from access by the signal handler: - */ - _thread_kern_sig_defer(); + /* + * Undefer and handle pending signals, yielding if necessary: + */ + _thread_kern_sig_undefer(); +} - switch (old_suspended) { - case SUSP_MUTEX_WAIT: - /* Set the thread's state back. */ - PTHREAD_SET_STATE(thread,PS_MUTEX_WAIT); - break; - case SUSP_COND_WAIT: - /* Set the thread's state back. */ - PTHREAD_SET_STATE(thread,PS_COND_WAIT); - break; - case SUSP_JOIN: - /* Set the thread's state back. */ - PTHREAD_SET_STATE(thread,PS_JOIN); - break; - case SUSP_NOWAIT: - /* Allow the thread to run. */ - PTHREAD_SET_STATE(thread,PS_RUNNING); - PTHREAD_WAITQ_REMOVE(thread); - PTHREAD_PRIOQ_INSERT_TAIL(thread); - break; - case SUSP_NO: - case SUSP_YES: - /* Allow the thread to run. */ - PTHREAD_SET_STATE(thread,PS_RUNNING); - PTHREAD_PRIOQ_INSERT_TAIL(thread); - break; - } +static void +resume_common(struct pthread *thread) +{ + /* Clear the suspend flag: */ + thread->flags &= ~PTHREAD_FLAGS_SUSPENDED; - /* - * Undefer and handle pending signals, yielding if - * necessary: - */ - _thread_kern_sig_undefer(); - } + /* + * If the thread's state is suspended, that means it is + * now runnable but not in any scheduling queue. Set the + * state to running and insert it into the run queue. + */ + if (thread->state == PS_SUSPENDED) { + PTHREAD_SET_STATE(thread, PS_RUNNING); + if (thread->priority_mutex_count > 0) + PTHREAD_PRIOQ_INSERT_HEAD(thread); + else + PTHREAD_PRIOQ_INSERT_TAIL(thread); } - return(ret); } diff --git a/lib/libc_r/uthread/uthread_sig.c b/lib/libc_r/uthread/uthread_sig.c index 1bd93b7d67cb..7aa9b53967b3 100644 --- a/lib/libc_r/uthread/uthread_sig.c +++ b/lib/libc_r/uthread/uthread_sig.c @@ -1,1117 +1,1125 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include "pthread_private.h" /* Prototypes: */ static void thread_sig_add(struct pthread *pthread, int sig, int has_args); static void thread_sig_check_state(struct pthread *pthread, int sig); static struct pthread *thread_sig_find(int sig); static void thread_sig_handle_special(int sig); static void thread_sigframe_add(struct pthread *thread, int sig, int has_args); static void thread_sigframe_save(struct pthread *thread, struct pthread_signal_frame *psf); static void thread_sig_invoke_handler(int sig, siginfo_t *info, ucontext_t *ucp); -/* #define DEBUG_SIGNAL */ +/*#define DEBUG_SIGNAL*/ #ifdef DEBUG_SIGNAL #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif #if defined(_PTHREADS_INVARIANTS) #define SIG_SET_ACTIVE() _sig_in_handler = 1 #define SIG_SET_INACTIVE() _sig_in_handler = 0 #else #define SIG_SET_ACTIVE() #define SIG_SET_INACTIVE() #endif void _thread_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) { struct pthread *curthread = _get_curthread(); struct pthread *pthread, *pthread_h; int in_sched = _thread_kern_in_sched; char c; if (ucp == NULL) PANIC("Thread signal handler received null context"); DBG_MSG("Got signal %d, current thread %p\n", sig, curthread); /* Check if an interval timer signal: */ if (sig == _SCHED_SIGNAL) { /* Update the scheduling clock: */ gettimeofday((struct timeval *)&_sched_tod, NULL); _sched_ticks++; if (in_sched != 0) { /* * The scheduler is already running; ignore this * signal. */ } /* * Check if the scheduler interrupt has come when * the currently running thread has deferred thread * signals. */ else if (curthread->sig_defer_count > 0) curthread->yield_on_sig_undefer = 1; else { /* Schedule the next thread: */ _thread_kern_sched(ucp); /* * This point should not be reached, so abort the * process: */ PANIC("Returned to signal function from scheduler"); } } /* * Check if the kernel has been interrupted while the scheduler * is accessing the scheduling queues or if there is a currently * running thread that has deferred signals. */ else if ((in_sched != 0) || (curthread->sig_defer_count > 0)) { /* Cast the signal number to a character variable: */ c = sig; /* * Write the signal number to the kernel pipe so that it will * be ready to read when this signal handler returns. */ if (_queue_signals != 0) { __sys_write(_thread_kern_pipe[1], &c, 1); DBG_MSG("Got signal %d, queueing to kernel pipe\n", sig); } if (_thread_sigq[sig - 1].blocked == 0) { DBG_MSG("Got signal %d, adding to _thread_sigq\n", sig); /* * Do not block this signal; it will be blocked * when the pending signals are run down. */ /* _thread_sigq[sig - 1].blocked = 1; */ /* * Queue the signal, saving siginfo and sigcontext * (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); /* Indicate that there are queued signals: */ _thread_sigq[sig - 1].pending = 1; _sigq_check_reqd = 1; } /* These signals need special handling: */ else if (sig == SIGCHLD || sig == SIGTSTP || sig == SIGTTIN || sig == SIGTTOU) { _thread_sigq[sig - 1].pending = 1; _thread_sigq[sig - 1].signo = sig; _sigq_check_reqd = 1; } else DBG_MSG("Got signal %d, ignored.\n", sig); } /* * The signal handlers should have been installed so that they * cannot be interrupted by other signals. */ else if (_thread_sigq[sig - 1].blocked == 0) { /* * The signal is not blocked; handle the signal. * * Ignore subsequent occurrences of this signal * until the current signal is handled: */ _thread_sigq[sig - 1].blocked = 1; /* This signal will be handled; clear the pending flag: */ _thread_sigq[sig - 1].pending = 0; /* * Save siginfo and sigcontext (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); SIG_SET_ACTIVE(); /* Handle special signals: */ thread_sig_handle_special(sig); pthread_h = NULL; if ((pthread = thread_sig_find(sig)) == NULL) DBG_MSG("No thread to handle signal %d\n", sig); else if (pthread == curthread) { /* * Unblock the signal and restore the process signal * mask in case we don't return from the handler: */ _thread_sigq[sig - 1].blocked = 0; __sys_sigprocmask(SIG_SETMASK, &_process_sigmask, NULL); /* Call the signal handler for the current thread: */ thread_sig_invoke_handler(sig, info, ucp); /* * Set the process signal mask in the context; it * could have changed by the handler. */ ucp->uc_sigmask = _process_sigmask; /* Resume the interrupted thread: */ __sys_sigreturn(ucp); } else { DBG_MSG("Got signal %d, adding frame to thread %p\n", sig, pthread); /* Setup the target thread to receive the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); /* Take a peek at the next ready to run thread: */ pthread_h = PTHREAD_PRIOQ_FIRST(); DBG_MSG("Finished adding frame, head of prio list %p\n", pthread_h); } SIG_SET_INACTIVE(); /* * Switch to a different context if the currently running * thread takes a signal, or if another thread takes a * signal and the currently running thread is not in a * signal handler. */ if ((pthread_h != NULL) && (pthread_h->active_priority > curthread->active_priority)) { /* Enter the kernel scheduler: */ _thread_kern_sched(ucp); } } else { SIG_SET_ACTIVE(); thread_sig_handle_special(sig); SIG_SET_INACTIVE(); } } static void thread_sig_invoke_handler(int sig, siginfo_t *info, ucontext_t *ucp) { struct pthread *curthread = _get_curthread(); void (*sigfunc)(int, siginfo_t *, void *); int saved_seqno; sigset_t saved_sigmask; /* Invoke the signal handler without going through the scheduler: */ DBG_MSG("Got signal %d, calling handler for current thread %p\n", sig, curthread); /* Save the threads signal mask: */ saved_sigmask = curthread->sigmask; saved_seqno = curthread->sigmask_seqno; /* Setup the threads signal mask: */ SIGSETOR(curthread->sigmask, _thread_sigact[sig - 1].sa_mask); sigaddset(&curthread->sigmask, sig); /* * Check that a custom handler is installed and if * the signal is not blocked: */ sigfunc = _thread_sigact[sig - 1].sa_sigaction; if (((__sighandler_t *)sigfunc != SIG_DFL) && ((__sighandler_t *)sigfunc != SIG_IGN)) { if (((_thread_sigact[sig - 1].sa_flags & SA_SIGINFO) != 0) || (info == NULL)) (*(sigfunc))(sig, info, ucp); else (*(sigfunc))(sig, (siginfo_t *)info->si_code, ucp); } /* * Only restore the signal mask if it hasn't been changed by the * application during invocation of the signal handler: */ if (curthread->sigmask_seqno == saved_seqno) curthread->sigmask = saved_sigmask; } /* * Find a thread that can handle the signal. */ struct pthread * thread_sig_find(int sig) { struct pthread *curthread = _get_curthread(); int handler_installed; struct pthread *pthread, *pthread_next; struct pthread *suspended_thread, *signaled_thread; DBG_MSG("Looking for thread to handle signal %d\n", sig); /* Check if the signal requires a dump of thread information: */ if (sig == SIGINFO) { /* Dump thread information to file: */ _thread_dump_info(); /* Unblock this signal to allow further dumps: */ _thread_sigq[sig - 1].blocked = 0; } /* Check if an interval timer signal: */ else if (sig == _SCHED_SIGNAL) { /* * This shouldn't ever occur (should this panic?). */ } else { /* * Enter a loop to look for threads that have the signal * unmasked. POSIX specifies that a thread in a sigwait * will get the signal over any other threads. Second * preference will be threads in in a sigsuspend. Third * preference will be the current thread. If none of the * above, then the signal is delivered to the first thread * that is found. Note that if a custom handler is not * installed, the signal only affects threads in sigwait. */ suspended_thread = NULL; if ((curthread != &_thread_kern_thread) && !sigismember(&curthread->sigmask, sig)) signaled_thread = curthread; else signaled_thread = NULL; if ((_thread_sigact[sig - 1].sa_handler == SIG_IGN) || (_thread_sigact[sig - 1].sa_handler == SIG_DFL)) handler_installed = 0; else handler_installed = 1; for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly destroying * the link entry. */ pthread_next = TAILQ_NEXT(pthread, pqe); if ((pthread->state == PS_SIGWAIT) && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* * A signal handler is not invoked for threads * in sigwait. Clear the blocked and pending * flags. */ _thread_sigq[sig - 1].blocked = 0; _thread_sigq[sig - 1].pending = 0; /* Return the signal number: */ pthread->signo = sig; /* * POSIX doesn't doesn't specify which thread * will get the signal if there are multiple * waiters, so we give it to the first thread * we find. * * Do not attempt to deliver this signal * to other threads and do not add the signal * to the process pending set. */ return (NULL); } else if ((handler_installed != 0) && - !sigismember(&pthread->sigmask, sig)) { + !sigismember(&pthread->sigmask, sig) && + ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) == 0)) { if (pthread->state == PS_SIGSUSPEND) { if (suspended_thread == NULL) suspended_thread = pthread; } else if (signaled_thread == NULL) signaled_thread = pthread; } } /* * Only perform wakeups and signal delivery if there is a * custom handler installed: */ if (handler_installed == 0) { /* * There is no handler installed. Unblock the * signal so that if a handler _is_ installed, any * subsequent signals can be handled. */ _thread_sigq[sig - 1].blocked = 0; } else { /* * If we didn't find a thread in the waiting queue, * check the all threads queue: */ if (suspended_thread == NULL && signaled_thread == NULL) { /* * Enter a loop to look for other threads * capable of receiving the signal: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { if (!sigismember(&pthread->sigmask, sig)) { signaled_thread = pthread; break; } } } if (suspended_thread == NULL && signaled_thread == NULL) /* * Add it to the set of signals pending * on the process: */ sigaddset(&_process_sigpending, sig); else { /* * We only deliver the signal to one thread; * give preference to the suspended thread: */ if (suspended_thread != NULL) pthread = suspended_thread; else pthread = signaled_thread; return (pthread); } } } /* Returns nothing. */ return (NULL); } void _thread_sig_check_pending(struct pthread *pthread) { sigset_t sigset; int i; /* * Check if there are pending signals for the running * thread or process that aren't blocked: */ sigset = pthread->sigpend; SIGSETOR(sigset, _process_sigpending); SIGSETNAND(sigset, pthread->sigmask); if (SIGNOTEMPTY(sigset)) { for (i = 1; i < NSIG; i++) { if (sigismember(&sigset, i) != 0) { if (sigismember(&pthread->sigpend, i) != 0) thread_sig_add(pthread, i, /*has_args*/ 0); else { thread_sig_add(pthread, i, /*has_args*/ 1); sigdelset(&_process_sigpending, i); } } } } } /* * This can only be called from the kernel scheduler. It assumes that * all thread contexts are saved and that a signal frame can safely be * added to any user thread. */ void _thread_sig_handle_pending(void) { struct pthread *pthread; int i, sig; PTHREAD_ASSERT(_thread_kern_in_sched != 0, "_thread_sig_handle_pending called from outside kernel schedule"); /* * Check the array of pending signals: */ for (i = 0; i < NSIG; i++) { if (_thread_sigq[i].pending != 0) { /* This signal is no longer pending. */ _thread_sigq[i].pending = 0; sig = _thread_sigq[i].signo; /* Some signals need special handling: */ thread_sig_handle_special(sig); if (_thread_sigq[i].blocked == 0) { /* * Block future signals until this one * is handled: */ _thread_sigq[i].blocked = 1; if ((pthread = thread_sig_find(sig)) != NULL) { /* * Setup the target thread to receive * the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); } } } } } static void thread_sig_handle_special(int sig) { struct pthread *pthread, *pthread_next; int i; switch (sig) { case SIGCHLD: /* * Go through the file list and set all files * to non-blocking again in case the child * set some of them to block. Sigh. */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file is used: */ if (_thread_fd_table[i] != NULL) { /* * Set the file descriptor to non-blocking: */ __sys_fcntl(i, F_SETFL, _thread_fd_table[i]->flags | O_NONBLOCK); } } /* * Enter a loop to wake up all threads waiting * for a process to complete: */ for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly * destroying the link entry: */ pthread_next = TAILQ_NEXT(pthread, pqe); /* * If this thread is waiting for a child * process to complete, wake it up: */ if (pthread->state == PS_WAIT_WAIT) { /* Make the thread runnable: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } } break; /* * POSIX says that pending SIGCONT signals are * discarded when one of these signals occurs. */ case SIGTSTP: case SIGTTIN: case SIGTTOU: /* * Enter a loop to discard pending SIGCONT * signals: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { sigdelset(&pthread->sigpend, SIGCONT); } break; default: break; } } /* * Perform thread specific actions in response to a signal. * This function is only called if there is a handler installed * for the signal, and if the target thread has the signal * unmasked. */ static void thread_sig_add(struct pthread *pthread, int sig, int has_args) { int restart; int suppress_handler = 0; int thread_is_active = 0; restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; /* Make sure this signal isn't still in the pending set: */ sigdelset(&pthread->sigpend, sig); /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: /* * You can't call a signal handler for threads in these * states. */ suppress_handler = 1; break; /* * States which do not need any cleanup handling when signals * occur: */ case PS_RUNNING: /* * Remove the thread from the queue before changing its * priority: */ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) PTHREAD_PRIOQ_REMOVE(pthread); else /* * This thread is running; avoid placing it in * the run queue: */ thread_is_active = 1; break; case PS_SUSPENDED: break; case PS_SPINBLOCK: /* Remove the thread from the workq and waitq: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); /* Make the thread runnable: */ PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGWAIT: /* The signal handler is not called for threads in SIGWAIT. */ suppress_handler = 1; /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* Change the state of the thread to run: */ PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else { /* * Mark the thread as interrupted only if the * restart flag is not set on the signal action: */ if (restart == 0) pthread->interrupted = 1; PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); } break; /* * States which cannot be interrupted but still require the * signal handler to run: */ case PS_COND_WAIT: case PS_MUTEX_WAIT: /* * Remove the thread from the wait queue. It will * be added back to the wait queue once all signal * handlers have been invoked. */ PTHREAD_WAITQ_REMOVE(pthread); break; case PS_JOIN: /* * Remove the thread from the wait queue. It will * be added back to the wait queue once all signal * handlers have been invoked. */ PTHREAD_WAITQ_REMOVE(pthread); /* Make the thread runnable: */ PTHREAD_SET_STATE(pthread, PS_RUNNING); break; /* * States which are interruptible but may need to be removed * from queues before any signal handler is called. * * XXX - We may not need to handle this condition, but will * mark it as a potential problem. */ case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: if (restart == 0) pthread->interrupted = 1; /* * Remove the thread from the wait queue. Our * signal handler hook will remove this thread * from the fd or file queue before invoking * the actual handler. */ PTHREAD_WAITQ_REMOVE(pthread); break; /* * States which are interruptible: */ case PS_FDR_WAIT: case PS_FDW_WAIT: if (restart == 0) { /* * Flag the operation as interrupted and * set the state to running: */ pthread->interrupted = 1; PTHREAD_SET_STATE(pthread, PS_RUNNING); } PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); break; case PS_POLL_WAIT: case PS_SELECT_WAIT: case PS_SLEEP_WAIT: /* * Unmasked signals always cause poll, select, and sleep * to terminate early, regardless of SA_RESTART: */ pthread->interrupted = 1; /* Remove threads in poll and select from the workq: */ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGSUSPEND: PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; } if (suppress_handler == 0) { /* Setup a signal frame and save the current threads state: */ thread_sigframe_add(pthread, sig, has_args); /* * Signals are deferred until just before the threads * signal handler is invoked: */ pthread->sig_defer_count = 1; /* Make sure the thread is runnable: */ if (pthread->state != PS_RUNNING) PTHREAD_SET_STATE(pthread, PS_RUNNING); /* * The thread should be removed from all scheduling * queues at this point. Raise the priority and place - * the thread in the run queue. + * the thread in the run queue. It is also possible + * for a signal to be sent to a suspended thread, + * mostly via pthread_kill(). If a thread is suspended, + * don't insert it into the priority queue; just set + * its state to suspended and it will run the signal + * handler when it is resumed. */ pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY; - if (thread_is_active == 0) + if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) + PTHREAD_SET_STATE(pthread, PS_SUSPENDED); + else if (thread_is_active == 0) PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } static void thread_sig_check_state(struct pthread *pthread, int sig) { /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: case PS_RUNNING: case PS_SUSPENDED: case PS_SPINBLOCK: case PS_COND_WAIT: case PS_JOIN: case PS_MUTEX_WAIT: break; case PS_SIGWAIT: /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* * Remove the thread from the wait queue and * make it runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } break; case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_SIGSUSPEND: case PS_SLEEP_WAIT: /* * Remove the thread from the wait queue and make it * runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; /* * These states are additionally in the work queue: */ case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_FILE_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* * Remove the thread from the wait and work queues, and * make it runnable: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; } } /* * Send a signal to a specific thread (ala pthread_kill): */ void _thread_sig_send(struct pthread *pthread, int sig) { struct pthread *curthread = _get_curthread(); /* Check for signals whose actions are SIG_DFL: */ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) { /* * Check to see if a temporary signal handler is * installed for sigwaiters: */ if (_thread_dfl_count[sig] == 0) /* * Deliver the signal to the process if a handler * is not installed: */ kill(getpid(), sig); /* * Assuming we're still running after the above kill(), * make any necessary state changes to the thread: */ thread_sig_check_state(pthread, sig); } /* * Check that the signal is not being ignored: */ else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) { if (pthread->state == PS_SIGWAIT && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else if (sigismember(&pthread->sigmask, sig)) /* Add the signal to the pending set: */ sigaddset(&pthread->sigpend, sig); else if (pthread == curthread) /* Call the signal handler for the current thread: */ thread_sig_invoke_handler(sig, NULL, NULL); else { /* Protect the scheduling queues: */ _thread_kern_sig_defer(); /* * Perform any state changes due to signal * arrival: */ thread_sig_add(pthread, sig, /* has args */ 0); /* Unprotect the scheduling queues: */ _thread_kern_sig_undefer(); } } } /* * User thread signal handler wrapper. * * thread - current running thread */ void _thread_sig_wrapper(void) { struct pthread_signal_frame *psf; struct pthread *thread = _get_curthread(); /* Get the current frame and state: */ psf = thread->curframe; thread->curframe = NULL; PTHREAD_ASSERT(psf != NULL, "Invalid signal frame in signal handler"); /* * We're coming from the kernel scheduler; clear the in * scheduler flag: */ _thread_kern_in_sched = 0; /* Check the threads previous state: */ if (psf->saved_state.psd_state != PS_RUNNING) { /* * Do a little cleanup handling for those threads in * queues before calling the signal handler. Signals * for these threads are temporarily blocked until * after cleanup handling. */ switch (psf->saved_state.psd_state) { case PS_FDLR_WAIT: case PS_FDLW_WAIT: _fd_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; case PS_COND_WAIT: _cond_wait_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; case PS_MUTEX_WAIT: _mutex_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; default: break; } } /* Unblock the signal in case we don't return from the handler: */ _thread_sigq[psf->signo - 1].blocked = 0; /* * Lower the priority before calling the handler in case * it never returns (longjmps back): */ thread->active_priority &= ~PTHREAD_SIGNAL_PRIORITY; /* * Reenable interruptions without checking for the need to * context switch: */ thread->sig_defer_count = 0; /* * Dispatch the signal via the custom signal handler: */ if (psf->sig_has_args == 0) thread_sig_invoke_handler(psf->signo, NULL, NULL); else thread_sig_invoke_handler(psf->signo, &psf->siginfo, &psf->uc); /* * Call the kernel scheduler to safely restore the frame and * schedule the next thread: */ _thread_kern_sched_frame(psf); } static void thread_sigframe_add(struct pthread *thread, int sig, int has_args) { struct pthread_signal_frame *psf = NULL; unsigned long stackp; /* Get the top of the threads stack: */ stackp = GET_STACK_JB(thread->ctx.jb); /* * Leave a little space on the stack and round down to the * nearest aligned word: */ stackp -= sizeof(double); stackp &= ~0x3UL; /* Allocate room on top of the stack for a new signal frame: */ stackp -= sizeof(struct pthread_signal_frame); psf = (struct pthread_signal_frame *) stackp; /* Save the current context in the signal frame: */ thread_sigframe_save(thread, psf); /* Set handler specific information: */ psf->sig_has_args = has_args; psf->signo = sig; if (has_args) { /* Copy the signal handler arguments to the signal frame: */ memcpy(&psf->uc, &_thread_sigq[psf->signo - 1].uc, sizeof(psf->uc)); memcpy(&psf->siginfo, &_thread_sigq[psf->signo - 1].siginfo, sizeof(psf->siginfo)); } /* Setup the signal mask: */ SIGSETOR(thread->sigmask, _thread_sigact[sig - 1].sa_mask); sigaddset(&thread->sigmask, sig); /* Set up the new frame: */ thread->curframe = psf; thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE | PTHREAD_FLAGS_IN_SYNCQ; /* * Set up the context: */ stackp -= sizeof(double); _setjmp(thread->ctx.jb); SET_STACK_JB(thread->ctx.jb, stackp); SET_RETURN_ADDR_JB(thread->ctx.jb, _thread_sig_wrapper); } void _thread_sigframe_restore(struct pthread *thread, struct pthread_signal_frame *psf) { memcpy(&thread->ctx, &psf->ctx, sizeof(thread->ctx)); /* * Only restore the signal mask if it hasn't been changed * by the application during invocation of the signal handler: */ if (thread->sigmask_seqno == psf->saved_state.psd_sigmask_seqno) thread->sigmask = psf->saved_state.psd_sigmask; thread->curframe = psf->saved_state.psd_curframe; thread->wakeup_time = psf->saved_state.psd_wakeup_time; thread->data = psf->saved_state.psd_wait_data; thread->state = psf->saved_state.psd_state; thread->flags = psf->saved_state.psd_flags; thread->interrupted = psf->saved_state.psd_interrupted; thread->signo = psf->saved_state.psd_signo; thread->sig_defer_count = psf->saved_state.psd_sig_defer_count; } static void thread_sigframe_save(struct pthread *thread, struct pthread_signal_frame *psf) { memcpy(&psf->ctx, &thread->ctx, sizeof(thread->ctx)); psf->saved_state.psd_sigmask = thread->sigmask; psf->saved_state.psd_curframe = thread->curframe; psf->saved_state.psd_wakeup_time = thread->wakeup_time; psf->saved_state.psd_wait_data = thread->data; psf->saved_state.psd_state = thread->state; psf->saved_state.psd_flags = thread->flags & (PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE); psf->saved_state.psd_interrupted = thread->interrupted; psf->saved_state.psd_sigmask_seqno = thread->sigmask_seqno; psf->saved_state.psd_signo = thread->signo; psf->saved_state.psd_sig_defer_count = thread->sig_defer_count; } diff --git a/lib/libc_r/uthread/uthread_single_np.c b/lib/libc_r/uthread/uthread_single_np.c index 85471b8cf5c1..1ee5e7918bd9 100644 --- a/lib/libc_r/uthread/uthread_single_np.c +++ b/lib/libc_r/uthread/uthread_single_np.c @@ -1,47 +1,49 @@ /* * Copyright (c) 1996 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ -#include #include -#include "pthread_private.h" +#include __weak_reference(_pthread_single_np, pthread_single_np); int _pthread_single_np() { - struct pthread *curthread = _get_curthread(); /* Enter single-threaded (non-POSIX) scheduling mode: */ - _thread_single = curthread; - return(0); + pthread_suspend_all_np(); + /* + * XXX - Do we want to do this? + * __is_threaded = 0; + */ + return (0); } diff --git a/lib/libc_r/uthread/uthread_spinlock.c b/lib/libc_r/uthread/uthread_spinlock.c index 73337094d431..e05aa4a5fc0b 100644 --- a/lib/libc_r/uthread/uthread_spinlock.c +++ b/lib/libc_r/uthread/uthread_spinlock.c @@ -1,111 +1,111 @@ /* * Copyright (c) 1997 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include #include #include #include #include #include #include #include "pthread_private.h" /* * Lock a location for the running thread. Yield to allow other * threads to run if this thread is blocked because the lock is * not available. Note that this function does not sleep. It * assumes that the lock will be available very soon. */ void _spinlock(spinlock_t *lck) { struct pthread *curthread = _get_curthread(); /* * Try to grab the lock and loop if another thread grabs * it before we do. */ while(_atomic_lock(&lck->access_lock)) { /* Block the thread until the lock. */ curthread->data.spinlock = lck; _thread_kern_sched_state(PS_SPINBLOCK, __FILE__, __LINE__); } /* The running thread now owns the lock: */ lck->lock_owner = (long) curthread; } /* * Lock a location for the running thread. Yield to allow other * threads to run if this thread is blocked because the lock is * not available. Note that this function does not sleep. It * assumes that the lock will be available very soon. * * This function checks if the running thread has already locked the * location, warns if this occurs and creates a thread dump before * returning. */ void _spinlock_debug(spinlock_t *lck, char *fname, int lineno) { struct pthread *curthread = _get_curthread(); int cnt = 0; /* * Try to grab the lock and loop if another thread grabs * it before we do. */ while(_atomic_lock(&lck->access_lock)) { cnt++; if (cnt > 100) { char str[256]; - snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", _getprogname(), curthread, lck, fname, lineno, lck->fname, lck->lineno); + snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", getprogname(), curthread, lck, fname, lineno, lck->fname, lck->lineno); __sys_write(2,str,strlen(str)); __sleep(1); cnt = 0; } /* Block the thread until the lock. */ curthread->data.spinlock = lck; _thread_kern_sched_state(PS_SPINBLOCK, fname, lineno); } /* The running thread now owns the lock: */ lck->lock_owner = (long) curthread; lck->fname = fname; lck->lineno = lineno; } diff --git a/lib/libc_r/uthread/uthread_suspend_np.c b/lib/libc_r/uthread/uthread_suspend_np.c index 0e272ff11d55..952baa350ec5 100644 --- a/lib/libc_r/uthread/uthread_suspend_np.c +++ b/lib/libc_r/uthread/uthread_suspend_np.c @@ -1,161 +1,104 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "pthread_private.h" -static void finish_suspension(void *arg); +static void suspend_common(struct pthread *thread); __weak_reference(_pthread_suspend_np, pthread_suspend_np); +__weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np); /* Suspend a thread: */ int _pthread_suspend_np(pthread_t thread) { int ret; + /* Suspending the current thread doesn't make sense. */ + if (thread == _get_curthread()) + ret = EDEADLK; + /* Find the thread in the list of active threads: */ - if ((ret = _find_thread(thread)) == 0) { + else if ((ret = _find_thread(thread)) == 0) { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); - switch (thread->state) { - case PS_RUNNING: - /* - * Remove the thread from the priority queue and - * set the state to suspended: - */ - PTHREAD_PRIOQ_REMOVE(thread); - PTHREAD_SET_STATE(thread, PS_SUSPENDED); - break; - - case PS_SPINBLOCK: - case PS_FDR_WAIT: - case PS_FDW_WAIT: - case PS_POLL_WAIT: - case PS_SELECT_WAIT: - /* - * Remove these threads from the work queue - * and mark the operation as interrupted: - */ - if ((thread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) - PTHREAD_WORKQ_REMOVE(thread); - _thread_seterrno(thread,EINTR); - - /* FALLTHROUGH */ - case PS_SLEEP_WAIT: - thread->interrupted = 1; - - /* FALLTHROUGH */ - case PS_SIGTHREAD: - case PS_WAIT_WAIT: - case PS_SIGSUSPEND: - case PS_SIGWAIT: - /* - * Remove these threads from the waiting queue and - * set their state to suspended: - */ - PTHREAD_WAITQ_REMOVE(thread); - PTHREAD_SET_STATE(thread, PS_SUSPENDED); - break; - - case PS_MUTEX_WAIT: - /* Mark the thread as suspended and still in a queue. */ - thread->suspended = SUSP_MUTEX_WAIT; - - PTHREAD_SET_STATE(thread, PS_SUSPENDED); - break; - case PS_COND_WAIT: - /* Mark the thread as suspended and still in a queue. */ - thread->suspended = SUSP_COND_WAIT; - - PTHREAD_SET_STATE(thread, PS_SUSPENDED); - break; - case PS_JOIN: - /* Mark the thread as suspended and joining: */ - thread->suspended = SUSP_JOIN; - - PTHREAD_NEW_STATE(thread, PS_SUSPENDED); - break; - case PS_FDLR_WAIT: - case PS_FDLW_WAIT: - case PS_FILE_WAIT: - /* Mark the thread as suspended: */ - thread->suspended = SUSP_YES; - - /* - * Threads in these states may be in queues. - * In order to preserve queue integrity, the - * cancelled thread must remove itself from the - * queue. Mark the thread as interrupted and - * set the state to running. When the thread - * resumes, it will remove itself from the queue - * and call the suspension completion routine. - */ - thread->interrupted = 1; - _thread_seterrno(thread, EINTR); - PTHREAD_NEW_STATE(thread, PS_RUNNING); - thread->continuation = finish_suspension; - break; - - case PS_DEAD: - case PS_DEADLOCK: - case PS_STATE_MAX: - case PS_SUSPENDED: - /* Nothing needs to be done: */ - break; - } + suspend_common(thread); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } - return(ret); + return (ret); } -static void -finish_suspension(void *arg) +void +_pthread_suspend_all_np(void) { struct pthread *curthread = _get_curthread(); + struct pthread *thread; - if (curthread->suspended != SUSP_NO) - _thread_kern_sched_state(PS_SUSPENDED, __FILE__, __LINE__); -} + /* + * Defer signals to protect the scheduling queues from + * access by the signal handler: + */ + _thread_kern_sig_defer(); + + TAILQ_FOREACH(thread, &_thread_list, tle) { + if (thread != curthread) + suspend_common(thread); + } + /* + * Undefer and handle pending signals, yielding if + * necessary: + */ + _thread_kern_sig_undefer(); +} +void +suspend_common(struct pthread *thread) +{ + thread->flags |= PTHREAD_FLAGS_SUSPENDED; + if (thread->flags & PTHREAD_FLAGS_IN_PRIOQ) { + PTHREAD_PRIOQ_REMOVE(thread); + PTHREAD_SET_STATE(thread, PS_SUSPENDED); + } +} diff --git a/lib/libkse/thread/thr_cancel.c b/lib/libkse/thread/thr_cancel.c index b6b070f0549d..d9324abf01aa 100644 --- a/lib/libkse/thread/thr_cancel.c +++ b/lib/libkse/thread/thr_cancel.c @@ -1,245 +1,231 @@ /* * David Leonard , 1999. Public domain. * $FreeBSD$ */ #include #include #include "pthread_private.h" static void finish_cancellation(void *arg); __weak_reference(_pthread_cancel, pthread_cancel); __weak_reference(_pthread_setcancelstate, pthread_setcancelstate); __weak_reference(_pthread_setcanceltype, pthread_setcanceltype); __weak_reference(_pthread_testcancel, pthread_testcancel); int _pthread_cancel(pthread_t pthread) { int ret; if ((ret = _find_thread(pthread)) != 0) { /* NOTHING */ } else if (pthread->state == PS_DEAD || pthread->state == PS_DEADLOCK || (pthread->flags & PTHREAD_EXITING) != 0) { ret = 0; } else { /* Protect the scheduling queues: */ _thread_kern_sig_defer(); if (((pthread->cancelflags & PTHREAD_CANCEL_DISABLE) != 0) || (((pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0) && ((pthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0))) /* Just mark it for cancellation: */ pthread->cancelflags |= PTHREAD_CANCELLING; else { /* * Check if we need to kick it back into the * run queue: */ switch (pthread->state) { case PS_RUNNING: /* No need to resume: */ pthread->cancelflags |= PTHREAD_CANCELLING; break; case PS_SPINBLOCK: case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* Remove these threads from the work queue: */ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) PTHREAD_WORKQ_REMOVE(pthread); /* Fall through: */ case PS_SIGTHREAD: case PS_SLEEP_WAIT: case PS_WAIT_WAIT: case PS_SIGSUSPEND: case PS_SIGWAIT: /* Interrupt and resume: */ pthread->interrupted = 1; pthread->cancelflags |= PTHREAD_CANCELLING; PTHREAD_NEW_STATE(pthread,PS_RUNNING); break; case PS_JOIN: /* * Disconnect the thread from the joinee: */ if (pthread->join_status.thread != NULL) { pthread->join_status.thread->joiner = NULL; pthread->join_status.thread = NULL; } pthread->cancelflags |= PTHREAD_CANCELLING; PTHREAD_NEW_STATE(pthread, PS_RUNNING); break; case PS_SUSPENDED: - if (pthread->suspended == SUSP_NO || - pthread->suspended == SUSP_YES || - pthread->suspended == SUSP_JOIN || - pthread->suspended == SUSP_NOWAIT) { - /* - * This thread isn't in any scheduling - * queues; just change it's state: - */ - pthread->cancelflags |= - PTHREAD_CANCELLING; - PTHREAD_SET_STATE(pthread, PS_RUNNING); - break; - } - /* FALLTHROUGH */ case PS_MUTEX_WAIT: case PS_COND_WAIT: case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: /* * Threads in these states may be in queues. * In order to preserve queue integrity, the * cancelled thread must remove itself from the * queue. Mark the thread as interrupted and * needing cancellation, and set the state to * running. When the thread resumes, it will * remove itself from the queue and call the * cancellation completion routine. */ pthread->interrupted = 1; pthread->cancelflags |= PTHREAD_CANCEL_NEEDED; - PTHREAD_NEW_STATE(pthread,PS_RUNNING); + PTHREAD_NEW_STATE(pthread, PS_RUNNING); pthread->continuation = finish_cancellation; break; case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: /* Ignore - only here to silence -Wall: */ break; } } /* Unprotect the scheduling queues: */ _thread_kern_sig_undefer(); ret = 0; } return (ret); } int _pthread_setcancelstate(int state, int *oldstate) { struct pthread *curthread = _get_curthread(); int ostate; int ret; ostate = curthread->cancelflags & PTHREAD_CANCEL_DISABLE; switch (state) { case PTHREAD_CANCEL_ENABLE: if (oldstate != NULL) *oldstate = ostate; curthread->cancelflags &= ~PTHREAD_CANCEL_DISABLE; if ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0) pthread_testcancel(); ret = 0; break; case PTHREAD_CANCEL_DISABLE: if (oldstate != NULL) *oldstate = ostate; curthread->cancelflags |= PTHREAD_CANCEL_DISABLE; ret = 0; break; default: ret = EINVAL; } return (ret); } int _pthread_setcanceltype(int type, int *oldtype) { struct pthread *curthread = _get_curthread(); int otype; int ret; otype = curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS; switch (type) { case PTHREAD_CANCEL_ASYNCHRONOUS: if (oldtype != NULL) *oldtype = otype; curthread->cancelflags |= PTHREAD_CANCEL_ASYNCHRONOUS; pthread_testcancel(); ret = 0; break; case PTHREAD_CANCEL_DEFERRED: if (oldtype != NULL) *oldtype = otype; curthread->cancelflags &= ~PTHREAD_CANCEL_ASYNCHRONOUS; ret = 0; break; default: ret = EINVAL; } return (ret); } void _pthread_testcancel(void) { struct pthread *curthread = _get_curthread(); if (((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) && ((curthread->cancelflags & PTHREAD_CANCELLING) != 0) && ((curthread->flags & PTHREAD_EXITING) == 0)) { /* * It is possible for this thread to be swapped out * while performing cancellation; do not allow it * to be cancelled again. */ curthread->cancelflags &= ~PTHREAD_CANCELLING; _thread_exit_cleanup(); pthread_exit(PTHREAD_CANCELED); PANIC("cancel"); } } void _thread_enter_cancellation_point(void) { struct pthread *curthread = _get_curthread(); /* Look for a cancellation before we block: */ pthread_testcancel(); curthread->cancelflags |= PTHREAD_AT_CANCEL_POINT; } void _thread_leave_cancellation_point(void) { struct pthread *curthread = _get_curthread(); curthread->cancelflags &= ~PTHREAD_AT_CANCEL_POINT; /* Look for a cancellation after we unblock: */ pthread_testcancel(); } static void finish_cancellation(void *arg) { struct pthread *curthread = _get_curthread(); curthread->continuation = NULL; curthread->interrupted = 0; if ((curthread->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) { curthread->cancelflags &= ~PTHREAD_CANCEL_NEEDED; _thread_exit_cleanup(); pthread_exit(PTHREAD_CANCELED); } } diff --git a/lib/libkse/thread/thr_cond.c b/lib/libkse/thread/thr_cond.c index 7f3fe7acb2dd..cb45725531d0 100644 --- a/lib/libkse/thread/thr_cond.c +++ b/lib/libkse/thread/thr_cond.c @@ -1,747 +1,735 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "pthread_private.h" /* * Prototypes */ static inline pthread_t cond_queue_deq(pthread_cond_t); static inline void cond_queue_remove(pthread_cond_t, pthread_t); static inline void cond_queue_enq(pthread_cond_t, pthread_t); __weak_reference(_pthread_cond_init, pthread_cond_init); __weak_reference(_pthread_cond_destroy, pthread_cond_destroy); __weak_reference(_pthread_cond_wait, pthread_cond_wait); __weak_reference(_pthread_cond_timedwait, pthread_cond_timedwait); __weak_reference(_pthread_cond_signal, pthread_cond_signal); __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); /* Reinitialize a condition variable to defaults. */ int _cond_reinit(pthread_cond_t *cond) { int ret = 0; if (cond == NULL) ret = EINVAL; else if (*cond == NULL) ret = pthread_cond_init(cond, NULL); else { /* * Initialize the condition variable structure: */ TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags = COND_FLAGS_INITED; (*cond)->c_type = COND_TYPE_FAST; (*cond)->c_mutex = NULL; (*cond)->c_seqno = 0; memset(&(*cond)->lock, 0, sizeof((*cond)->lock)); } return (ret); } int _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { enum pthread_cond_type type; pthread_cond_t pcond; int rval = 0; if (cond == NULL) rval = EINVAL; else { /* * Check if a pointer to a condition variable attribute * structure was passed by the caller: */ if (cond_attr != NULL && *cond_attr != NULL) { /* Default to a fast condition variable: */ type = (*cond_attr)->c_type; } else { /* Default to a fast condition variable: */ type = COND_TYPE_FAST; } /* Process according to condition variable type: */ switch (type) { /* Fast condition variable: */ case COND_TYPE_FAST: /* Nothing to do here. */ break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Check for no errors: */ if (rval == 0) { if ((pcond = (pthread_cond_t) malloc(sizeof(struct pthread_cond))) == NULL) { rval = ENOMEM; } else { /* * Initialise the condition variable * structure: */ TAILQ_INIT(&pcond->c_queue); pcond->c_flags |= COND_FLAGS_INITED; pcond->c_type = type; pcond->c_mutex = NULL; pcond->c_seqno = 0; memset(&pcond->lock,0,sizeof(pcond->lock)); *cond = pcond; } } } /* Return the completion status: */ return (rval); } int _pthread_cond_destroy(pthread_cond_t *cond) { int rval = 0; if (cond == NULL || *cond == NULL) rval = EINVAL; else { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * Free the memory allocated for the condition * variable structure: */ free(*cond); /* * NULL the caller's pointer now that the condition * variable has been destroyed: */ *cond = NULL; } /* Return the completion status: */ return (rval); } int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { struct pthread *curthread = _get_curthread(); int rval = 0; int done = 0; int interrupted = 0; int seqno; _thread_enter_cancellation_point(); if (cond == NULL) return (EINVAL); /* * If the condition variable is statically initialized, * perform the dynamic initialization: */ if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) return (rval); /* * Enter a loop waiting for a condition signal or broadcast * to wake up this thread. A loop is needed in case the waiting * thread is interrupted by a signal to execute a signal handler. * It is not (currently) possible to remain in the waiting queue * while running a handler. Instead, the thread is interrupted * and backed out of the waiting queue prior to executing the * signal handler. */ do { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * If the condvar was statically allocated, properly * initialize the tail queue. */ if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) { TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags |= COND_FLAGS_INITED; } /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return invalid argument error: */ rval = EINVAL; } else { /* Reset the timeout and interrupted flags: */ curthread->timeout = 0; curthread->interrupted = 0; /* * Queue the running thread for the condition * variable: */ cond_queue_enq(*cond, curthread); /* Remember the mutex and sequence number: */ (*cond)->c_mutex = *mutex; seqno = (*cond)->c_seqno; /* Wait forever: */ curthread->wakeup_time.tv_sec = -1; /* Unlock the mutex: */ if ((rval = _mutex_cv_unlock(mutex)) != 0) { /* * Cannot unlock the mutex, so remove * the running thread from the condition * variable queue: */ cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* * Schedule the next thread and unlock * the condition variable structure: */ _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); done = (seqno != (*cond)->c_seqno); interrupted = curthread->interrupted; /* * Check if the wait was interrupted * (canceled) or needs to be resumed * after handling a signal. */ if (interrupted != 0) { /* * Lock the mutex and ignore any * errors. Note that even * though this thread may have * been canceled, POSIX requires * that the mutex be reaquired * prior to cancellation. */ (void)_mutex_cv_lock(mutex); } else { /* * Lock the condition variable * while removing the thread. */ _SPINLOCK(&(*cond)->lock); cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; _SPINUNLOCK(&(*cond)->lock); /* Lock the mutex: */ rval = _mutex_cv_lock(mutex); } } } break; /* Trap invalid condition variable types: */ default: /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return an invalid argument error: */ rval = EINVAL; break; } if ((interrupted != 0) && (curthread->continuation != NULL)) curthread->continuation((void *) curthread); } while ((done == 0) && (rval == 0)); _thread_leave_cancellation_point(); /* Return the completion status: */ return (rval); } int _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, const struct timespec * abstime) { struct pthread *curthread = _get_curthread(); int rval = 0; int done = 0; int interrupted = 0; int seqno; _thread_enter_cancellation_point(); if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return (EINVAL); /* * If the condition variable is statically initialized, perform dynamic * initialization. */ if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) return (rval); /* * Enter a loop waiting for a condition signal or broadcast * to wake up this thread. A loop is needed in case the waiting * thread is interrupted by a signal to execute a signal handler. * It is not (currently) possible to remain in the waiting queue * while running a handler. Instead, the thread is interrupted * and backed out of the waiting queue prior to executing the * signal handler. */ do { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * If the condvar was statically allocated, properly * initialize the tail queue. */ if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) { TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags |= COND_FLAGS_INITED; } /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { /* Return invalid argument error: */ rval = EINVAL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* Set the wakeup time: */ curthread->wakeup_time.tv_sec = abstime->tv_sec; curthread->wakeup_time.tv_nsec = abstime->tv_nsec; /* Reset the timeout and interrupted flags: */ curthread->timeout = 0; curthread->interrupted = 0; /* * Queue the running thread for the condition * variable: */ cond_queue_enq(*cond, curthread); /* Remember the mutex and sequence number: */ (*cond)->c_mutex = *mutex; seqno = (*cond)->c_seqno; /* Unlock the mutex: */ if ((rval = _mutex_cv_unlock(mutex)) != 0) { /* * Cannot unlock the mutex, so remove * the running thread from the condition * variable queue: */ cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* * Schedule the next thread and unlock * the condition variable structure: */ _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); done = (seqno != (*cond)->c_seqno); interrupted = curthread->interrupted; /* * Check if the wait was interrupted * (canceled) or needs to be resumed * after handling a signal. */ if (interrupted != 0) { /* * Lock the mutex and ignore any * errors. Note that even * though this thread may have * been canceled, POSIX requires * that the mutex be reaquired * prior to cancellation. */ (void)_mutex_cv_lock(mutex); } else { /* * Lock the condition variable * while removing the thread. */ _SPINLOCK(&(*cond)->lock); cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; _SPINUNLOCK(&(*cond)->lock); /* Lock the mutex: */ rval = _mutex_cv_lock(mutex); /* * Return ETIMEDOUT if the wait * timed out and there wasn't an * error locking the mutex: */ if ((curthread->timeout != 0) && rval == 0) rval = ETIMEDOUT; } } } break; /* Trap invalid condition variable types: */ default: /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return an invalid argument error: */ rval = EINVAL; break; } if ((interrupted != 0) && (curthread->continuation != NULL)) curthread->continuation((void *) curthread); } while ((done == 0) && (rval == 0)); _thread_leave_cancellation_point(); /* Return the completion status: */ return (rval); } int _pthread_cond_signal(pthread_cond_t * cond) { int rval = 0; pthread_t pthread; if (cond == NULL) rval = EINVAL; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: /* Increment the sequence number: */ (*cond)->c_seqno++; if ((pthread = cond_queue_deq(*cond)) != NULL) { /* - * Unless the thread is currently suspended, - * allow it to run. If the thread is suspended, - * make a note that the thread isn't in a wait - * queue any more. + * Wake up the signaled thread: */ - if (pthread->state != PS_SUSPENDED) - PTHREAD_NEW_STATE(pthread,PS_RUNNING); - else - pthread->suspended = SUSP_NOWAIT; + PTHREAD_NEW_STATE(pthread, PS_RUNNING); } /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (rval); } int _pthread_cond_broadcast(pthread_cond_t * cond) { int rval = 0; pthread_t pthread; if (cond == NULL) rval = EINVAL; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: /* Increment the sequence number: */ (*cond)->c_seqno++; /* * Enter a loop to bring all threads off the * condition queue: */ while ((pthread = cond_queue_deq(*cond)) != NULL) { /* - * Unless the thread is currently suspended, - * allow it to run. If the thread is suspended, - * make a note that the thread isn't in a wait - * queue any more. + * Wake up the signaled thread: */ - if (pthread->state != PS_SUSPENDED) - PTHREAD_NEW_STATE(pthread,PS_RUNNING); - else - pthread->suspended = SUSP_NOWAIT; + PTHREAD_NEW_STATE(pthread, PS_RUNNING); } /* There are no more waiting threads: */ (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (rval); } void _cond_wait_backout(pthread_t pthread) { pthread_cond_t cond; cond = pthread->data.cond; if (cond != NULL) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&cond->lock); /* Process according to condition variable type: */ switch (cond->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: cond_queue_remove(cond, pthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&cond->c_queue) == NULL) cond->c_mutex = NULL; break; default: break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&cond->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } } /* * Dequeue a waiting thread from the head of a condition queue in * descending priority order. */ static inline pthread_t cond_queue_deq(pthread_cond_t cond) { pthread_t pthread; while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) { TAILQ_REMOVE(&cond->c_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ; if ((pthread->timeout == 0) && (pthread->interrupted == 0)) /* * Only exit the loop when we find a thread * that hasn't timed out or been canceled; * those threads are already running and don't * need their run state changed. */ break; } return(pthread); } /* * Remove a waiting thread from a condition queue in descending priority * order. */ static inline void cond_queue_remove(pthread_cond_t cond, pthread_t pthread) { /* * Because pthread_cond_timedwait() can timeout as well * as be signaled by another thread, it is necessary to * guard against removing the thread from the queue if * it isn't in the queue. */ if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) { TAILQ_REMOVE(&cond->c_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ; } } /* * Enqueue a waiting thread to a condition queue in descending priority * order. */ static inline void cond_queue_enq(pthread_cond_t cond, pthread_t pthread) { pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head); PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); /* * For the common case of all threads having equal priority, * we perform a quick check against the priority of the thread * at the tail of the queue. */ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe); else { tid = TAILQ_FIRST(&cond->c_queue); while (pthread->active_priority <= tid->active_priority) tid = TAILQ_NEXT(tid, sqe); TAILQ_INSERT_BEFORE(tid, pthread, sqe); } pthread->flags |= PTHREAD_FLAGS_IN_CONDQ; pthread->data.cond = cond; } diff --git a/lib/libkse/thread/thr_exit.c b/lib/libkse/thread/thr_exit.c index c9513cfac15a..fd90e2959077 100644 --- a/lib/libkse/thread/thr_exit.c +++ b/lib/libkse/thread/thr_exit.c @@ -1,241 +1,227 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include "pthread_private.h" #define FLAGS_IN_SCHEDQ \ (PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ) __weak_reference(_pthread_exit, pthread_exit); void _exit(int status) { int flags; int i; struct itimerval itimer; /* Disable the interval timer: */ itimer.it_interval.tv_sec = 0; itimer.it_interval.tv_usec = 0; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = 0; setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL); /* Close the pthread kernel pipe: */ __sys_close(_thread_kern_pipe[0]); __sys_close(_thread_kern_pipe[1]); /* * Enter a loop to set all file descriptors to blocking * if they were not created as non-blocking: */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file descriptor is in use: */ if (_thread_fd_table[i] != NULL && !(_thread_fd_table[i]->flags & O_NONBLOCK)) { /* Get the current flags: */ flags = __sys_fcntl(i, F_GETFL, NULL); /* Clear the nonblocking file descriptor flag: */ __sys_fcntl(i, F_SETFL, flags & ~O_NONBLOCK); } } /* Call the _exit syscall: */ __sys_exit(status); } void _thread_exit(char *fname, int lineno, char *string) { char s[256]; /* Prepare an error message string: */ snprintf(s, sizeof(s), "Fatal error '%s' at line %d in file %s (errno = %d)\n", string, lineno, fname, errno); /* Write the string to the standard error file descriptor: */ __sys_write(2, s, strlen(s)); /* Force this process to exit: */ /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */ #if defined(_PTHREADS_INVARIANTS) abort(); #else __sys_exit(1); #endif } /* * Only called when a thread is cancelled. It may be more useful * to call it from pthread_exit() if other ways of asynchronous or * abnormal thread termination can be found. */ void _thread_exit_cleanup(void) { struct pthread *curthread = _get_curthread(); /* * POSIX states that cancellation/termination of a thread should * not release any visible resources (such as mutexes) and that * it is the applications responsibility. Resources that are * internal to the threads library, including file and fd locks, * are not visible to the application and need to be released. */ /* Unlock all owned fd locks: */ _thread_fd_unlock_owned(curthread); /* Unlock all private mutexes: */ _mutex_unlock_private(curthread); /* * This still isn't quite correct because we don't account * for held spinlocks (see libc/stdlib/malloc.c). */ } void _pthread_exit(void *status) { struct pthread *curthread = _get_curthread(); pthread_t pthread; /* Check if this thread is already in the process of exiting: */ if ((curthread->flags & PTHREAD_EXITING) != 0) { char msg[128]; snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",curthread); PANIC(msg); } /* Flag this thread as exiting: */ curthread->flags |= PTHREAD_EXITING; /* Save the return value: */ curthread->ret = status; while (curthread->cleanup != NULL) { pthread_cleanup_pop(1); } if (curthread->attr.cleanup_attr != NULL) { curthread->attr.cleanup_attr(curthread->attr.arg_attr); } /* Check if there is thread specific data: */ if (curthread->specific != NULL) { /* Run the thread-specific data destructors: */ _thread_cleanupspecific(); } /* Free thread-specific poll_data structure, if allocated: */ if (curthread->poll_data.fds != NULL) { free(curthread->poll_data.fds); curthread->poll_data.fds = NULL; } /* * Lock the garbage collector mutex to ensure that the garbage * collector is not using the dead thread list. */ if (pthread_mutex_lock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Add this thread to the list of dead threads. */ TAILQ_INSERT_HEAD(&_dead_list, curthread, dle); /* * Signal the garbage collector thread that there is something * to clean up. */ if (pthread_cond_signal(&_gc_cond) != 0) PANIC("Cannot signal gc cond"); /* * Avoid a race condition where a scheduling signal can occur * causing the garbage collector thread to run. If this happens, * the current thread can be cleaned out from under us. */ _thread_kern_sig_defer(); /* Unlock the garbage collector mutex: */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot unlock gc mutex"); /* Check if there is a thread joining this one: */ if (curthread->joiner != NULL) { pthread = curthread->joiner; curthread->joiner = NULL; - switch (pthread->suspended) { - case SUSP_JOIN: - /* - * The joining thread is suspended. Change the - * suspension state to make the thread runnable when it - * is resumed: - */ - pthread->suspended = SUSP_NO; - break; - case SUSP_NO: - /* Make the joining thread runnable: */ - PTHREAD_NEW_STATE(pthread, PS_RUNNING); - break; - default: - PANIC("Unreachable code reached"); - } + /* Make the joining thread runnable: */ + PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Set the return value for the joining thread: */ pthread->join_status.ret = curthread->ret; pthread->join_status.error = 0; pthread->join_status.thread = NULL; /* Make this thread collectable by the garbage collector. */ PTHREAD_ASSERT(((curthread->attr.flags & PTHREAD_DETACHED) == 0), "Cannot join a detached thread"); curthread->attr.flags |= PTHREAD_DETACHED; } /* Remove this thread from the thread list: */ TAILQ_REMOVE(&_thread_list, curthread, tle); /* This thread will never be re-scheduled. */ _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__); /* This point should not be reached. */ PANIC("Dead thread has resumed"); } diff --git a/lib/libkse/thread/thr_init.c b/lib/libkse/thread/thr_init.c index 2790748fd53b..74db740a07b8 100644 --- a/lib/libkse/thread/thr_init.c +++ b/lib/libkse/thread/thr_init.c @@ -1,486 +1,500 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* Allocate space for global thread variables here: */ #define GLOBAL_PTHREAD_PRIVATE #include "namespace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "un-namespace.h" #include "pthread_private.h" /* * All weak references used within libc should be in this table. * This will is so that static libraries will work. */ static void *references[] = { &_accept, &_bind, &_close, &_connect, &_dup, &_dup2, &_execve, &_fcntl, &_flock, &_flockfile, &_fstat, &_fstatfs, &_fsync, &_funlockfile, &_getdirentries, &_getlogin, &_getpeername, &_getsockname, &_getsockopt, &_ioctl, &_kevent, &_listen, &_nanosleep, &_open, &_pthread_getspecific, &_pthread_key_create, &_pthread_key_delete, &_pthread_mutex_destroy, &_pthread_mutex_init, &_pthread_mutex_lock, &_pthread_mutex_trylock, &_pthread_mutex_unlock, &_pthread_mutexattr_init, &_pthread_mutexattr_destroy, &_pthread_mutexattr_settype, &_pthread_once, &_pthread_setspecific, &_read, &_readv, &_recvfrom, &_recvmsg, &_select, &_sendmsg, &_sendto, &_setsockopt, &_sigaction, &_sigprocmask, &_sigsuspend, &_socket, &_socketpair, &_wait4, &_write, &_writev }; /* * These are needed when linking statically. All references within * libgcc (and in the future libc) to these routines are weak, but * if they are not (strongly) referenced by the application or other * libraries, then the actual functions will not be loaded. */ static void *libgcc_references[] = { &_pthread_once, &_pthread_key_create, &_pthread_key_delete, &_pthread_getspecific, &_pthread_setspecific, &_pthread_mutex_init, &_pthread_mutex_destroy, &_pthread_mutex_lock, &_pthread_mutex_trylock, &_pthread_mutex_unlock }; int _pthread_guard_default; int _pthread_page_size; /* * Threaded process initialization */ void _thread_init(void) { int fd; int flags; int i; size_t len; int mib[2]; int sched_stack_size; /* Size of scheduler stack. */ struct clockinfo clockinfo; struct sigaction act; _pthread_page_size = getpagesize(); _pthread_guard_default = getpagesize(); sched_stack_size = getpagesize(); pthread_attr_default.guardsize_attr = _pthread_guard_default; /* Check if this function has already been called: */ if (_thread_initial) /* Only initialise the threaded application once. */ return; /* * Make gcc quiescent about {,libgcc_}references not being * referenced: */ if ((references[0] == NULL) || (libgcc_references[0] == NULL)) PANIC("Failed loading mandatory references in _thread_init"); /* * Check for the special case of this process running as * or in place of init as pid = 1: */ if (getpid() == 1) { /* * Setup a new session for this process which is * assumed to be running as root. */ if (setsid() == -1) PANIC("Can't set session ID"); if (revoke(_PATH_CONSOLE) != 0) PANIC("Can't revoke console"); if ((fd = __sys_open(_PATH_CONSOLE, O_RDWR)) < 0) PANIC("Can't open console"); if (setlogin("root") == -1) PANIC("Can't set login to root"); - if (__sys_ioctl(fd,TIOCSCTTY, (char *) NULL) == -1) + if (__sys_ioctl(fd, TIOCSCTTY, (char *) NULL) == -1) PANIC("Can't set controlling terminal"); - if (__sys_dup2(fd,0) == -1 || - __sys_dup2(fd,1) == -1 || - __sys_dup2(fd,2) == -1) + if (__sys_dup2(fd, 0) == -1 || + __sys_dup2(fd, 1) == -1 || + __sys_dup2(fd, 2) == -1) PANIC("Can't dup2"); } /* Get the standard I/O flags before messing with them : */ - for (i = 0; i < 3; i++) + for (i = 0; i < 3; i++) { if (((_pthread_stdio_flags[i] = - __sys_fcntl(i,F_GETFL, NULL)) == -1) && + __sys_fcntl(i, F_GETFL, NULL)) == -1) && (errno != EBADF)) PANIC("Cannot get stdio flags"); + } /* * Create a pipe that is written to by the signal handler to prevent * signals being missed in calls to _select: */ if (__sys_pipe(_thread_kern_pipe) != 0) { /* Cannot create pipe, so abort: */ PANIC("Cannot create kernel pipe"); } + + /* + * Make sure the pipe does not get in the way of stdio: + */ + for (i = 0; i < 2; i++) { + if (_thread_kern_pipe[i] < 3) { + fd = __sys_fcntl(_thread_kern_pipe[i], F_DUPFD, 3); + if (fd == -1) + PANIC("Cannot create kernel pipe"); + __sys_close(_thread_kern_pipe[i]); + _thread_kern_pipe[i] = fd; + } + } /* Get the flags for the read pipe: */ - else if ((flags = __sys_fcntl(_thread_kern_pipe[0], F_GETFL, NULL)) == -1) { + if ((flags = __sys_fcntl(_thread_kern_pipe[0], F_GETFL, NULL)) == -1) { /* Abort this application: */ PANIC("Cannot get kernel read pipe flags"); } /* Make the read pipe non-blocking: */ else if (__sys_fcntl(_thread_kern_pipe[0], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ PANIC("Cannot make kernel read pipe non-blocking"); } /* Get the flags for the write pipe: */ else if ((flags = __sys_fcntl(_thread_kern_pipe[1], F_GETFL, NULL)) == -1) { /* Abort this application: */ PANIC("Cannot get kernel write pipe flags"); } /* Make the write pipe non-blocking: */ else if (__sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ PANIC("Cannot get kernel write pipe flags"); } /* Allocate and initialize the ready queue: */ else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) != 0) { /* Abort this application: */ PANIC("Cannot allocate priority ready queue."); } /* Allocate memory for the thread structure of the initial thread: */ else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) { /* * Insufficient memory to initialise this application, so * abort: */ PANIC("Cannot allocate memory for initial thread"); } /* Allocate memory for the scheduler stack: */ else if ((_thread_kern_sched_stack = malloc(sched_stack_size)) == NULL) PANIC("Failed to allocate stack for scheduler"); else { /* Zero the global kernel thread structure: */ memset(&_thread_kern_thread, 0, sizeof(struct pthread)); _thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE; memset(_thread_initial, 0, sizeof(struct pthread)); /* Initialize the waiting and work queues: */ TAILQ_INIT(&_waitingq); TAILQ_INIT(&_workq); /* Initialize the scheduling switch hook routine: */ _sched_switch_hook = NULL; /* Give this thread default attributes: */ memcpy((void *) &_thread_initial->attr, &pthread_attr_default, sizeof(struct pthread_attr)); /* Find the stack top */ mib[0] = CTL_KERN; mib[1] = KERN_USRSTACK; len = sizeof (_usrstack); if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1) _usrstack = (void *)USRSTACK; /* * Create a red zone below the main stack. All other stacks are * constrained to a maximum size by the paramters passed to * mmap(), but this stack is only limited by resource limits, so * this stack needs an explicitly mapped red zone to protect the * thread stack that is just beyond. */ if (mmap(_usrstack - PTHREAD_STACK_INITIAL - _pthread_guard_default, _pthread_guard_default, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* Set the main thread stack pointer. */ _thread_initial->stack = _usrstack - PTHREAD_STACK_INITIAL; /* Set the stack attributes: */ _thread_initial->attr.stackaddr_attr = _thread_initial->stack; _thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL; /* Setup the context for the scheduler: */ _setjmp(_thread_kern_sched_jb); SET_STACK_JB(_thread_kern_sched_jb, _thread_kern_sched_stack + sched_stack_size - sizeof(double)); SET_RETURN_ADDR_JB(_thread_kern_sched_jb, _thread_kern_scheduler); /* * Write a magic value to the thread structure * to help identify valid ones: */ _thread_initial->magic = PTHREAD_MAGIC; /* Set the initial cancel state */ _thread_initial->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; /* Default the priority of the initial thread: */ _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY; _thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY; _thread_initial->inherited_priority = 0; /* Initialise the state of the initial thread: */ _thread_initial->state = PS_RUNNING; /* Set the name of the thread: */ _thread_initial->name = strdup("_thread_initial"); /* Initialize joiner to NULL (no joiner): */ _thread_initial->joiner = NULL; /* Initialize the owned mutex queue and count: */ TAILQ_INIT(&(_thread_initial->mutexq)); _thread_initial->priority_mutex_count = 0; /* Initialize the global scheduling time: */ _sched_ticks = 0; gettimeofday((struct timeval *) &_sched_tod, NULL); /* Initialize last active: */ _thread_initial->last_active = (long) _sched_ticks; /* Initialize the initial context: */ _thread_initial->curframe = NULL; /* Initialise the rest of the fields: */ _thread_initial->poll_data.nfds = 0; _thread_initial->poll_data.fds = NULL; _thread_initial->sig_defer_count = 0; _thread_initial->yield_on_sig_undefer = 0; _thread_initial->specific = NULL; _thread_initial->cleanup = NULL; _thread_initial->flags = 0; _thread_initial->error = 0; TAILQ_INIT(&_thread_list); TAILQ_INSERT_HEAD(&_thread_list, _thread_initial, tle); _set_curthread(_thread_initial); /* Initialise the global signal action structure: */ sigfillset(&act.sa_mask); act.sa_handler = (void (*) ()) _thread_sig_handler; act.sa_flags = SA_SIGINFO | SA_ONSTACK; /* Clear pending signals for the process: */ sigemptyset(&_process_sigpending); /* Clear the signal queue: */ memset(_thread_sigq, 0, sizeof(_thread_sigq)); /* Enter a loop to get the existing signal status: */ for (i = 1; i < NSIG; i++) { /* Check for signals which cannot be trapped: */ if (i == SIGKILL || i == SIGSTOP) { } /* Get the signal handler details: */ else if (__sys_sigaction(i, NULL, &_thread_sigact[i - 1]) != 0) { /* * Abort this process if signal * initialisation fails: */ PANIC("Cannot read signal handler info"); } /* Initialize the SIG_DFL dummy handler count. */ _thread_dfl_count[i] = 0; } /* * Install the signal handler for the most important * signals that the user-thread kernel needs. Actually * SIGINFO isn't really needed, but it is nice to have. */ if (__sys_sigaction(_SCHED_SIGNAL, &act, NULL) != 0 || __sys_sigaction(SIGINFO, &act, NULL) != 0 || __sys_sigaction(SIGCHLD, &act, NULL) != 0) { /* * Abort this process if signal initialisation fails: */ PANIC("Cannot initialise signal handler"); } _thread_sigact[_SCHED_SIGNAL - 1].sa_flags = SA_SIGINFO; _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO; _thread_sigact[SIGCHLD - 1].sa_flags = SA_SIGINFO; /* Get the process signal mask: */ __sys_sigprocmask(SIG_SETMASK, NULL, &_process_sigmask); /* Get the kernel clockrate: */ mib[0] = CTL_KERN; mib[1] = KERN_CLOCKRATE; len = sizeof (struct clockinfo); if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0) _clock_res_usec = clockinfo.tick > CLOCK_RES_USEC_MIN ? clockinfo.tick : CLOCK_RES_USEC_MIN; /* Get the table size: */ if ((_thread_dtablesize = getdtablesize()) < 0) { /* * Cannot get the system defined table size, so abort * this process. */ PANIC("Cannot get dtablesize"); } /* Allocate memory for the file descriptor table: */ if ((_thread_fd_table = (struct fd_table_entry **) malloc(sizeof(struct fd_table_entry *) * _thread_dtablesize)) == NULL) { /* Avoid accesses to file descriptor table on exit: */ _thread_dtablesize = 0; /* * Cannot allocate memory for the file descriptor * table, so abort this process. */ PANIC("Cannot allocate memory for file descriptor table"); } /* Allocate memory for the pollfd table: */ if ((_thread_pfd_table = (struct pollfd *) malloc(sizeof(struct pollfd) * _thread_dtablesize)) == NULL) { /* * Cannot allocate memory for the file descriptor * table, so abort this process. */ PANIC("Cannot allocate memory for pollfd table"); } else { /* * Enter a loop to initialise the file descriptor * table: */ for (i = 0; i < _thread_dtablesize; i++) { /* Initialise the file descriptor table: */ _thread_fd_table[i] = NULL; } /* Initialize stdio file descriptor table entries: */ for (i = 0; i < 3; i++) { if ((_thread_fd_table_init(i) != 0) && (errno != EBADF)) PANIC("Cannot initialize stdio file " "descriptor table entry"); } } } /* Initialise the garbage collector mutex and condition variable. */ if (_pthread_mutex_init(&_gc_mutex,NULL) != 0 || pthread_cond_init(&_gc_cond,NULL) != 0) PANIC("Failed to initialise garbage collector mutex or condvar"); } /* * Special start up code for NetBSD/Alpha */ #if defined(__NetBSD__) && defined(__alpha__) int main(int argc, char *argv[], char *env); int _thread_main(int argc, char *argv[], char *env) { _thread_init(); return (main(argc, argv, env)); } #endif diff --git a/lib/libkse/thread/thr_multi_np.c b/lib/libkse/thread/thr_multi_np.c index c1a069f11ce5..bd42365621a6 100644 --- a/lib/libkse/thread/thr_multi_np.c +++ b/lib/libkse/thread/thr_multi_np.c @@ -1,46 +1,50 @@ /* * Copyright (c) 1996 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ -#include #include -#include "pthread_private.h" +#include __weak_reference(_pthread_multi_np, pthread_multi_np); int _pthread_multi_np() { + /* Return to multi-threaded scheduling mode: */ - _thread_single = NULL; - return(0); + /* + * XXX - Do we want to do this? + * __is_threaded = 1; + */ + pthread_resume_all_np(); + return (0); } diff --git a/lib/libkse/thread/thr_mutex.c b/lib/libkse/thread/thr_mutex.c index 0f67b4b01965..86e0b8bf324c 100644 --- a/lib/libkse/thread/thr_mutex.c +++ b/lib/libkse/thread/thr_mutex.c @@ -1,1576 +1,1544 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include "pthread_private.h" #if defined(_PTHREADS_INVARIANTS) #define _MUTEX_INIT_LINK(m) do { \ (m)->m_qe.tqe_prev = NULL; \ (m)->m_qe.tqe_next = NULL; \ } while (0) #define _MUTEX_ASSERT_IS_OWNED(m) do { \ if ((m)->m_qe.tqe_prev == NULL) \ PANIC("mutex is not on list"); \ } while (0) #define _MUTEX_ASSERT_NOT_OWNED(m) do { \ if (((m)->m_qe.tqe_prev != NULL) || \ ((m)->m_qe.tqe_next != NULL)) \ PANIC("mutex is on list"); \ } while (0) #else #define _MUTEX_INIT_LINK(m) #define _MUTEX_ASSERT_IS_OWNED(m) #define _MUTEX_ASSERT_NOT_OWNED(m) #endif /* * Prototypes */ static inline int mutex_self_trylock(pthread_mutex_t); static inline int mutex_self_lock(pthread_mutex_t); static inline int mutex_unlock_common(pthread_mutex_t *, int); static void mutex_priority_adjust(pthread_mutex_t); static void mutex_rescan_owned (pthread_t, pthread_mutex_t); static inline pthread_t mutex_queue_deq(pthread_mutex_t); static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; static struct pthread_mutex_attr static_mutex_attr = PTHREAD_MUTEXATTR_STATIC_INITIALIZER; static pthread_mutexattr_t static_mattr = &static_mutex_attr; /* Single underscore versions provided for libc internal usage: */ __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); /* No difference between libc and application usage of these: */ __weak_reference(_pthread_mutex_init, pthread_mutex_init); __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); /* Reinitialize a mutex to defaults. */ int _mutex_reinit(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; else if (*mutex == NULL) ret = pthread_mutex_init(mutex, NULL); else { /* * Initialize the mutex structure: */ (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT; (*mutex)->m_protocol = PTHREAD_PRIO_NONE; TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_owner = NULL; (*mutex)->m_data.m_count = 0; (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE; (*mutex)->m_flags |= MUTEX_FLAGS_INITED; (*mutex)->m_refcount = 0; (*mutex)->m_prio = 0; (*mutex)->m_saved_prio = 0; _MUTEX_INIT_LINK(*mutex); memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock)); } return (ret); } int _pthread_mutex_init(pthread_mutex_t * mutex, const pthread_mutexattr_t * mutex_attr) { enum pthread_mutextype type; int protocol; int ceiling; int flags; pthread_mutex_t pmutex; int ret = 0; if (mutex == NULL) ret = EINVAL; /* Check if default mutex attributes: */ else if (mutex_attr == NULL || *mutex_attr == NULL) { /* Default to a (error checking) POSIX mutex: */ type = PTHREAD_MUTEX_ERRORCHECK; protocol = PTHREAD_PRIO_NONE; ceiling = PTHREAD_MAX_PRIORITY; flags = 0; } /* Check mutex type: */ else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) /* Return an invalid argument error: */ ret = EINVAL; /* Check mutex protocol: */ else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) /* Return an invalid argument error: */ ret = EINVAL; else { /* Use the requested mutex type and protocol: */ type = (*mutex_attr)->m_type; protocol = (*mutex_attr)->m_protocol; ceiling = (*mutex_attr)->m_ceiling; flags = (*mutex_attr)->m_flags; } /* Check no errors so far: */ if (ret == 0) { if ((pmutex = (pthread_mutex_t) malloc(sizeof(struct pthread_mutex))) == NULL) ret = ENOMEM; else { /* Set the mutex flags: */ pmutex->m_flags = flags; /* Process according to mutex type: */ switch (type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: /* Nothing to do here. */ break; /* Single UNIX Spec 2 recursive mutex: */ case PTHREAD_MUTEX_RECURSIVE: /* Reset the mutex count: */ pmutex->m_data.m_count = 0; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } if (ret == 0) { /* Initialise the rest of the mutex: */ TAILQ_INIT(&pmutex->m_queue); pmutex->m_flags |= MUTEX_FLAGS_INITED; pmutex->m_owner = NULL; pmutex->m_type = type; pmutex->m_protocol = protocol; pmutex->m_refcount = 0; if (protocol == PTHREAD_PRIO_PROTECT) pmutex->m_prio = ceiling; else pmutex->m_prio = 0; pmutex->m_saved_prio = 0; _MUTEX_INIT_LINK(pmutex); memset(&pmutex->lock, 0, sizeof(pmutex->lock)); *mutex = pmutex; } else { free(pmutex); *mutex = NULL; } } } /* Return the completion status: */ return(ret); } int _pthread_mutex_destroy(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL || *mutex == NULL) ret = EINVAL; else { /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * Check to see if this mutex is in use: */ if (((*mutex)->m_owner != NULL) || (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || ((*mutex)->m_refcount != 0)) { ret = EBUSY; /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); } else { /* * Free the memory allocated for the mutex * structure: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); free(*mutex); /* * Leave the caller's pointer NULL now that * the mutex has been destroyed: */ *mutex = NULL; } } /* Return the completion status: */ return (ret); } static int init_static(pthread_mutex_t *mutex) { int ret; _SPINLOCK(&static_init_lock); if (*mutex == NULL) ret = pthread_mutex_init(mutex, NULL); else ret = 0; _SPINUNLOCK(&static_init_lock); return(ret); } static int init_static_private(pthread_mutex_t *mutex) { int ret; _SPINLOCK(&static_init_lock); if (*mutex == NULL) ret = pthread_mutex_init(mutex, &static_mattr); else ret = 0; _SPINUNLOCK(&static_init_lock); return(ret); } static int mutex_trylock_common(pthread_mutex_t *mutex) { struct pthread *curthread = _get_curthread(); int ret = 0; PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), "Uninitialized mutex in pthread_mutex_trylock_basic"); /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); _MUTEX_INIT_LINK(*mutex); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; } /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The mutex takes on the attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = curthread->active_priority; (*mutex)->m_saved_prio = curthread->inherited_priority; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (curthread->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority. */ curthread->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); /* Return the completion status: */ return (ret); } int __pthread_mutex_trylock(pthread_mutex_t *mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization: */ else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0) ret = mutex_trylock_common(mutex); return (ret); } int _pthread_mutex_trylock(pthread_mutex_t *mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization marking the mutex private (delete safe): */ else if ((*mutex != NULL) || (ret = init_static_private(mutex)) == 0) ret = mutex_trylock_common(mutex); return (ret); } static int mutex_lock_common(pthread_mutex_t * mutex) { struct pthread *curthread = _get_curthread(); int ret = 0; PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), "Uninitialized mutex in pthread_mutex_trylock_basic"); /* Reset the interrupted flag: */ curthread->interrupted = 0; /* * Enter a loop waiting to become the mutex owner. We need a * loop in case the waiting thread is interrupted by a signal * to execute a signal handler. It is not (currently) possible * to remain in the waiting queue while running a handler. * Instead, the thread is interrupted and backed out of the * waiting queue prior to executing the signal handler. */ do { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; _MUTEX_INIT_LINK(*mutex); } /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = curthread; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The mutex takes on attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = curthread->active_priority; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; if (curthread->active_priority > (*mutex)->m_prio) /* Adjust priorities: */ mutex_priority_adjust(*mutex); /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (curthread->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* * Lock the mutex for the running * thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority: */ curthread->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; /* Clear any previous error: */ curthread->error = 0; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); /* * The threads priority may have changed while * waiting for the mutex causing a ceiling * violation. */ ret = curthread->error; curthread->error = 0; } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* * Check to see if this thread was interrupted and * is still in the mutex queue of waiting threads: */ if (curthread->interrupted != 0) mutex_queue_remove(*mutex, curthread); /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } while (((*mutex)->m_owner != curthread) && (ret == 0) && (curthread->interrupted == 0)); if (curthread->interrupted != 0 && curthread->continuation != NULL) curthread->continuation((void *) curthread); /* Return the completion status: */ return (ret); } int __pthread_mutex_lock(pthread_mutex_t *mutex) { int ret = 0; if (_thread_initial == NULL) _thread_init(); if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization: */ else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0)) ret = mutex_lock_common(mutex); return (ret); } int _pthread_mutex_lock(pthread_mutex_t *mutex) { int ret = 0; if (_thread_initial == NULL) _thread_init(); if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization marking it private (delete safe): */ else if ((*mutex != NULL) || ((ret = init_static_private(mutex)) == 0)) ret = mutex_lock_common(mutex); return (ret); } int _pthread_mutex_unlock(pthread_mutex_t * mutex) { return (mutex_unlock_common(mutex, /* add reference */ 0)); } int _mutex_cv_unlock(pthread_mutex_t * mutex) { return (mutex_unlock_common(mutex, /* add reference */ 1)); } int _mutex_cv_lock(pthread_mutex_t * mutex) { int ret; if ((ret = pthread_mutex_lock(mutex)) == 0) (*mutex)->m_refcount--; return (ret); } static inline int mutex_self_trylock(pthread_mutex_t mutex) { int ret = 0; switch (mutex->m_type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: /* * POSIX specifies that mutexes should return EDEADLK if a * recursive lock is detected. */ ret = EBUSY; break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ mutex->m_data.m_count++; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return(ret); } static inline int mutex_self_lock(pthread_mutex_t mutex) { int ret = 0; switch (mutex->m_type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: /* * POSIX specifies that mutexes should return EDEADLK if a * recursive lock is detected. */ ret = EDEADLK; break; case PTHREAD_MUTEX_NORMAL: /* * What SS2 define as a 'normal' mutex. Intentionally * deadlock on attempts to get a lock you already own. */ _thread_kern_sched_state_unlock(PS_DEADLOCK, &mutex->lock, __FILE__, __LINE__); break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ mutex->m_data.m_count++; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return(ret); } static inline int mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) { struct pthread *curthread = _get_curthread(); int ret = 0; if (mutex == NULL || *mutex == NULL) { ret = EINVAL; } else { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != curthread) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Get the next thread from the queue of * threads waiting on the mutex: */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) { - /* - * Unless the new owner of the mutex is - * currently suspended, allow the owner - * to run. If the thread is suspended, - * make a note that the thread isn't in - * a wait queue any more. - */ - if (((*mutex)->m_owner->state != - PS_SUSPENDED)) { - PTHREAD_NEW_STATE((*mutex)->m_owner, - PS_RUNNING); - } else { - (*mutex)->m_owner->suspended = - SUSP_NOWAIT; - } + /* Make the new owner runnable: */ + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); /* * Add the mutex to the threads list of * owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; } } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != curthread) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* * Restore the threads inherited priority and * recompute the active priority (being careful * not to override changes in the threads base * priority subsequent to locking the mutex). */ curthread->inherited_priority = (*mutex)->m_saved_prio; curthread->active_priority = MAX(curthread->inherited_priority, curthread->base_priority); /* * This thread now owns one less priority mutex. */ curthread->priority_mutex_count--; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Get the next thread from the queue of threads * waiting on the mutex: */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) == NULL) /* This mutex has no priority. */ (*mutex)->m_prio = 0; else { /* * Track number of priority mutexes owned: */ (*mutex)->m_owner->priority_mutex_count++; /* * Add the mutex to the threads list * of owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; /* * Set the priority of the mutex. Since * our waiting threads are in descending * priority order, the priority of the * mutex becomes the active priority of * the thread we just dequeued. */ (*mutex)->m_prio = (*mutex)->m_owner->active_priority; /* * Save the owning threads inherited * priority: */ (*mutex)->m_saved_prio = (*mutex)->m_owner->inherited_priority; /* * The owning threads inherited priority * now becomes his active priority (the * priority of the mutex). */ (*mutex)->m_owner->inherited_priority = (*mutex)->m_prio; /* - * Unless the new owner of the mutex is - * currently suspended, allow the owner - * to run. If the thread is suspended, - * make a note that the thread isn't in - * a wait queue any more. + * Make the new owner runnable: */ - if (((*mutex)->m_owner->state != - PS_SUSPENDED)) { - PTHREAD_NEW_STATE((*mutex)->m_owner, - PS_RUNNING); - } else { - (*mutex)->m_owner->suspended = - SUSP_NOWAIT; - } + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); } } break; /* POSIX priority ceiling mutex: */ case PTHREAD_PRIO_PROTECT: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != curthread) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* * Restore the threads inherited priority and * recompute the active priority (being careful * not to override changes in the threads base * priority subsequent to locking the mutex). */ curthread->inherited_priority = (*mutex)->m_saved_prio; curthread->active_priority = MAX(curthread->inherited_priority, curthread->base_priority); /* * This thread now owns one less priority mutex. */ curthread->priority_mutex_count--; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Enter a loop to find a waiting thread whose * active priority will not cause a ceiling * violation: */ while ((((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) && ((*mutex)->m_owner->active_priority > (*mutex)->m_prio)) { /* * Either the mutex ceiling priority * been lowered and/or this threads * priority has been raised subsequent * to this thread being queued on the * waiting list. */ (*mutex)->m_owner->error = EINVAL; PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); /* * The thread is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; } /* Check for a new owner: */ if ((*mutex)->m_owner != NULL) { /* * Track number of priority mutexes owned: */ (*mutex)->m_owner->priority_mutex_count++; /* * Add the mutex to the threads list * of owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; /* * Save the owning threads inherited * priority: */ (*mutex)->m_saved_prio = (*mutex)->m_owner->inherited_priority; /* * The owning thread inherits the * ceiling priority of the mutex and * executes at that priority: */ (*mutex)->m_owner->inherited_priority = (*mutex)->m_prio; (*mutex)->m_owner->active_priority = (*mutex)->m_prio; /* - * Unless the new owner of the mutex is - * currently suspended, allow the owner - * to run. If the thread is suspended, - * make a note that the thread isn't in - * a wait queue any more. + * Make the new owner runnable: */ - if (((*mutex)->m_owner->state != - PS_SUSPENDED)) { - PTHREAD_NEW_STATE((*mutex)->m_owner, - PS_RUNNING); - } else { - (*mutex)->m_owner->suspended = - SUSP_NOWAIT; - } + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); } } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } if ((ret == 0) && (add_reference != 0)) { /* Increment the reference count: */ (*mutex)->m_refcount++; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (ret); } /* * This function is called when a change in base priority occurs for * a thread that is holding or waiting for a priority protection or * inheritence mutex. A change in a threads base priority can effect * changes to active priorities of other threads and to the ordering * of mutex locking by waiting threads. * * This must be called while thread scheduling is deferred. */ void _mutex_notify_priochange(pthread_t pthread) { /* Adjust the priorites of any owned priority mutexes: */ if (pthread->priority_mutex_count > 0) { /* * Rescan the mutexes owned by this thread and correct * their priorities to account for this threads change * in priority. This has the side effect of changing * the threads active priority. */ mutex_rescan_owned(pthread, /* rescan all owned */ NULL); } /* * If this thread is waiting on a priority inheritence mutex, * check for priority adjustments. A change in priority can * also effect a ceiling violation(*) for a thread waiting on * a priority protection mutex; we don't perform the check here * as it is done in pthread_mutex_unlock. * * (*) It should be noted that a priority change to a thread * _after_ taking and owning a priority ceiling mutex * does not affect ownership of that mutex; the ceiling * priority is only checked before mutex ownership occurs. */ if (pthread->state == PS_MUTEX_WAIT) { /* Lock the mutex structure: */ _SPINLOCK(&pthread->data.mutex->lock); /* * Check to make sure this thread is still in the same state * (the spinlock above can yield the CPU to another thread): */ if (pthread->state == PS_MUTEX_WAIT) { /* * Remove and reinsert this thread into the list of * waiting threads to preserve decreasing priority * order. */ mutex_queue_remove(pthread->data.mutex, pthread); mutex_queue_enq(pthread->data.mutex, pthread); if (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT) { /* Adjust priorities: */ mutex_priority_adjust(pthread->data.mutex); } } /* Unlock the mutex structure: */ _SPINUNLOCK(&pthread->data.mutex->lock); } } /* * Called when a new thread is added to the mutex waiting queue or * when a threads priority changes that is already in the mutex * waiting queue. */ static void mutex_priority_adjust(pthread_mutex_t mutex) { pthread_t pthread_next, pthread = mutex->m_owner; int temp_prio; pthread_mutex_t m = mutex; /* * Calculate the mutex priority as the maximum of the highest * active priority of any waiting threads and the owning threads * active priority(*). * * (*) Because the owning threads current active priority may * reflect priority inherited from this mutex (and the mutex * priority may have changed) we must recalculate the active * priority based on the threads saved inherited priority * and its base priority. */ pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ temp_prio = MAX(pthread_next->active_priority, MAX(m->m_saved_prio, pthread->base_priority)); /* See if this mutex really needs adjusting: */ if (temp_prio == m->m_prio) /* No need to propagate the priority: */ return; /* Set new priority of the mutex: */ m->m_prio = temp_prio; while (m != NULL) { /* * Save the threads priority before rescanning the * owned mutexes: */ temp_prio = pthread->active_priority; /* * Fix the priorities for all the mutexes this thread has * locked since taking this mutex. This also has a * potential side-effect of changing the threads priority. */ mutex_rescan_owned(pthread, m); /* * If the thread is currently waiting on a mutex, check * to see if the threads new priority has affected the * priority of the mutex. */ if ((temp_prio != pthread->active_priority) && (pthread->state == PS_MUTEX_WAIT) && (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) { /* Grab the mutex this thread is waiting on: */ m = pthread->data.mutex; /* * The priority for this thread has changed. Remove * and reinsert this thread into the list of waiting * threads to preserve decreasing priority order. */ mutex_queue_remove(m, pthread); mutex_queue_enq(m, pthread); /* Grab the waiting thread with highest priority: */ pthread_next = TAILQ_FIRST(&m->m_queue); /* * Calculate the mutex priority as the maximum of the * highest active priority of any waiting threads and * the owning threads active priority. */ temp_prio = MAX(pthread_next->active_priority, MAX(m->m_saved_prio, m->m_owner->base_priority)); if (temp_prio != m->m_prio) { /* * The priority needs to be propagated to the * mutex this thread is waiting on and up to * the owner of that mutex. */ m->m_prio = temp_prio; pthread = m->m_owner; } else /* We're done: */ m = NULL; } else /* We're done: */ m = NULL; } } static void mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex) { int active_prio, inherited_prio; pthread_mutex_t m; pthread_t pthread_next; /* * Start walking the mutexes the thread has taken since * taking this mutex. */ if (mutex == NULL) { /* * A null mutex means start at the beginning of the owned * mutex list. */ m = TAILQ_FIRST(&pthread->mutexq); /* There is no inherited priority yet. */ inherited_prio = 0; } else { /* * The caller wants to start after a specific mutex. It * is assumed that this mutex is a priority inheritence * mutex and that its priority has been correctly * calculated. */ m = TAILQ_NEXT(mutex, m_qe); /* Start inheriting priority from the specified mutex. */ inherited_prio = mutex->m_prio; } active_prio = MAX(inherited_prio, pthread->base_priority); while (m != NULL) { /* * We only want to deal with priority inheritence * mutexes. This might be optimized by only placing * priority inheritence mutexes into the owned mutex * list, but it may prove to be useful having all * owned mutexes in this list. Consider a thread * exiting while holding mutexes... */ if (m->m_protocol == PTHREAD_PRIO_INHERIT) { /* * Fix the owners saved (inherited) priority to * reflect the priority of the previous mutex. */ m->m_saved_prio = inherited_prio; if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) /* Recalculate the priority of the mutex: */ m->m_prio = MAX(active_prio, pthread_next->active_priority); else m->m_prio = active_prio; /* Recalculate new inherited and active priorities: */ inherited_prio = m->m_prio; active_prio = MAX(m->m_prio, pthread->base_priority); } /* Advance to the next mutex owned by this thread: */ m = TAILQ_NEXT(m, m_qe); } /* * Fix the threads inherited priority and recalculate its * active priority. */ pthread->inherited_priority = inherited_prio; active_prio = MAX(inherited_prio, pthread->base_priority); if (active_prio != pthread->active_priority) { /* * If this thread is in the priority queue, it must be * removed and reinserted for its new priority. */ if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) { /* * Remove the thread from the priority queue * before changing its priority: */ PTHREAD_PRIOQ_REMOVE(pthread); /* * POSIX states that if the priority is being * lowered, the thread must be inserted at the * head of the queue for its priority if it owns * any priority protection or inheritence mutexes. */ if ((active_prio < pthread->active_priority) && (pthread->priority_mutex_count > 0)) { /* Set the new active priority. */ pthread->active_priority = active_prio; PTHREAD_PRIOQ_INSERT_HEAD(pthread); } else { /* Set the new active priority. */ pthread->active_priority = active_prio; PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } else { /* Set the new active priority. */ pthread->active_priority = active_prio; } } } void _mutex_unlock_private(pthread_t pthread) { struct pthread_mutex *m, *m_next; for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { m_next = TAILQ_NEXT(m, m_qe); if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) pthread_mutex_unlock(&m); } } void _mutex_lock_backout(pthread_t pthread) { struct pthread_mutex *mutex; /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { mutex = pthread->data.mutex; /* Lock the mutex structure: */ _SPINLOCK(&mutex->lock); mutex_queue_remove(mutex, pthread); /* This thread is no longer waiting for the mutex: */ pthread->data.mutex = NULL; /* Unlock the mutex structure: */ _SPINUNLOCK(&mutex->lock); } /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* * Dequeue a waiting thread from the head of a mutex queue in descending * priority order. */ static inline pthread_t mutex_queue_deq(pthread_mutex_t mutex) { pthread_t pthread; while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; /* * Only exit the loop if the thread hasn't been * cancelled. */ if (pthread->interrupted == 0) break; } return(pthread); } /* * Remove a waiting thread from a mutex queue in descending priority order. */ static inline void mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) { if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; } } /* * Enqueue a waiting thread to a queue in descending priority order. */ static inline void mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) { pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); /* * For the common case of all threads having equal priority, * we perform a quick check against the priority of the thread * at the tail of the queue. */ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); else { tid = TAILQ_FIRST(&mutex->m_queue); while (pthread->active_priority <= tid->active_priority) tid = TAILQ_NEXT(tid, sqe); TAILQ_INSERT_BEFORE(tid, pthread, sqe); } pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; } diff --git a/lib/libkse/thread/thr_priority_queue.c b/lib/libkse/thread/thr_priority_queue.c index 55d742b9297a..b700d97f7955 100644 --- a/lib/libkse/thread/thr_priority_queue.c +++ b/lib/libkse/thread/thr_priority_queue.c @@ -1,337 +1,370 @@ /* * Copyright (c) 1998 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Daniel Eischen. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "pthread_private.h" /* Prototypes: */ static void pq_insert_prio_list(pq_queue_t *pq, int prio); #if defined(_PTHREADS_INVARIANTS) static int _pq_active = 0; #define _PQ_IN_SCHEDQ (PTHREAD_FLAGS_IN_PRIOQ | PTHREAD_FLAGS_IN_WAITQ | PTHREAD_FLAGS_IN_WORKQ) #define _PQ_SET_ACTIVE() _pq_active = 1 #define _PQ_CLEAR_ACTIVE() _pq_active = 0 #define _PQ_ASSERT_ACTIVE(msg) do { \ if (_pq_active == 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_INACTIVE(msg) do { \ if (_pq_active != 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_IN_WAITQ(thrd, msg) do { \ if (((thrd)->flags & PTHREAD_FLAGS_IN_WAITQ) == 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_IN_PRIOQ(thrd, msg) do { \ if (((thrd)->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \ if (((thrd)->flags & _PQ_IN_SCHEDQ) != 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_PROTECTED(msg) \ PTHREAD_ASSERT((_thread_kern_in_sched != 0) || \ ((_get_curthread())->sig_defer_count > 0) ||\ (_sig_in_handler != 0), msg); #else #define _PQ_SET_ACTIVE() #define _PQ_CLEAR_ACTIVE() #define _PQ_ASSERT_ACTIVE(msg) #define _PQ_ASSERT_INACTIVE(msg) #define _PQ_ASSERT_IN_WAITQ(thrd, msg) #define _PQ_ASSERT_IN_PRIOQ(thrd, msg) #define _PQ_ASSERT_NOT_QUEUED(thrd, msg) #define _PQ_ASSERT_PROTECTED(msg) #endif int _pq_alloc(pq_queue_t *pq, int minprio, int maxprio) { int ret = 0; int prioslots = maxprio - minprio + 1; if (pq == NULL) ret = -1; /* Create the priority queue with (maxprio - minprio + 1) slots: */ else if ((pq->pq_lists = (pq_list_t *) malloc(sizeof(pq_list_t) * prioslots)) == NULL) ret = -1; else { /* Remember the queue size: */ pq->pq_size = prioslots; ret = _pq_init(pq); } return (ret); } int _pq_init(pq_queue_t *pq) { int i, ret = 0; if ((pq == NULL) || (pq->pq_lists == NULL)) ret = -1; else { /* Initialize the queue for each priority slot: */ for (i = 0; i < pq->pq_size; i++) { TAILQ_INIT(&pq->pq_lists[i].pl_head); pq->pq_lists[i].pl_prio = i; pq->pq_lists[i].pl_queued = 0; } /* Initialize the priority queue: */ TAILQ_INIT(&pq->pq_queue); _PQ_CLEAR_ACTIVE(); } return (ret); } void _pq_remove(pq_queue_t *pq, pthread_t pthread) { int prio = pthread->active_priority; /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_INACTIVE("_pq_remove: pq_active"); _PQ_SET_ACTIVE(); _PQ_ASSERT_IN_PRIOQ(pthread, "_pq_remove: Not in priority queue"); _PQ_ASSERT_PROTECTED("_pq_remove: prioq not protected!"); /* * Remove this thread from priority list. Note that if * the priority list becomes empty, it is not removed * from the priority queue because another thread may be * added to the priority list (resulting in a needless * removal/insertion). Priority lists are only removed * from the priority queue when _pq_first is called. */ TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe); /* This thread is now longer in the priority queue. */ pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ; _PQ_CLEAR_ACTIVE(); } void _pq_insert_head(pq_queue_t *pq, pthread_t pthread) { - int prio = pthread->active_priority; + int prio; /* - * Make some assertions when debugging is enabled: + * Don't insert suspended threads into the priority queue. + * The caller is responsible for setting the threads state. */ - _PQ_ASSERT_INACTIVE("_pq_insert_head: pq_active"); - _PQ_SET_ACTIVE(); - _PQ_ASSERT_NOT_QUEUED(pthread, - "_pq_insert_head: Already in priority queue"); - _PQ_ASSERT_PROTECTED("_pq_insert_head: prioq not protected!"); - - TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe); - if (pq->pq_lists[prio].pl_queued == 0) - /* Insert the list into the priority queue: */ - pq_insert_prio_list(pq, prio); - - /* Mark this thread as being in the priority queue. */ - pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ; + if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) { + /* Make sure the threads state is suspended. */ + if (pthread->state != PS_SUSPENDED) + PTHREAD_SET_STATE(pthread, PS_SUSPENDED); + } else { + /* + * Make some assertions when debugging is enabled: + */ + _PQ_ASSERT_INACTIVE("_pq_insert_head: pq_active"); + _PQ_SET_ACTIVE(); + _PQ_ASSERT_NOT_QUEUED(pthread, + "_pq_insert_head: Already in priority queue"); + _PQ_ASSERT_PROTECTED("_pq_insert_head: prioq not protected!"); + + prio = pthread->active_priority; + TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe); + if (pq->pq_lists[prio].pl_queued == 0) + /* Insert the list into the priority queue: */ + pq_insert_prio_list(pq, prio); + + /* Mark this thread as being in the priority queue. */ + pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ; - _PQ_CLEAR_ACTIVE(); + _PQ_CLEAR_ACTIVE(); + } } void _pq_insert_tail(pq_queue_t *pq, pthread_t pthread) { - int prio = pthread->active_priority; + int prio; /* - * Make some assertions when debugging is enabled: + * Don't insert suspended threads into the priority queue. + * The caller is responsible for setting the threads state. */ - _PQ_ASSERT_INACTIVE("_pq_insert_tail: pq_active"); - _PQ_SET_ACTIVE(); - _PQ_ASSERT_NOT_QUEUED(pthread, - "_pq_insert_tail: Already in priority queue"); - _PQ_ASSERT_PROTECTED("_pq_insert_tail: prioq not protected!"); - - TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe); - if (pq->pq_lists[prio].pl_queued == 0) - /* Insert the list into the priority queue: */ - pq_insert_prio_list(pq, prio); - - /* Mark this thread as being in the priority queue. */ - pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ; + if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) { + /* Make sure the threads state is suspended. */ + if (pthread->state != PS_SUSPENDED) + PTHREAD_SET_STATE(pthread, PS_SUSPENDED); + } else { + /* + * Make some assertions when debugging is enabled: + */ + _PQ_ASSERT_INACTIVE("_pq_insert_tail: pq_active"); + _PQ_SET_ACTIVE(); + _PQ_ASSERT_NOT_QUEUED(pthread, + "_pq_insert_tail: Already in priority queue"); + _PQ_ASSERT_PROTECTED("_pq_insert_tail: prioq not protected!"); + + prio = pthread->active_priority; + TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe); + if (pq->pq_lists[prio].pl_queued == 0) + /* Insert the list into the priority queue: */ + pq_insert_prio_list(pq, prio); + + /* Mark this thread as being in the priority queue. */ + pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ; - _PQ_CLEAR_ACTIVE(); + _PQ_CLEAR_ACTIVE(); + } } pthread_t _pq_first(pq_queue_t *pq) { pq_list_t *pql; pthread_t pthread = NULL; /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_INACTIVE("_pq_first: pq_active"); _PQ_SET_ACTIVE(); _PQ_ASSERT_PROTECTED("_pq_first: prioq not protected!"); while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) && (pthread == NULL)) { if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) { /* * The priority list is empty; remove the list * from the queue. */ TAILQ_REMOVE(&pq->pq_queue, pql, pl_link); /* Mark the list as not being in the queue: */ pql->pl_queued = 0; + } else if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) { + /* + * This thread is suspended; remove it from the + * list and ensure its state is suspended. + */ + TAILQ_REMOVE(&pql->pl_head, pthread, pqe); + PTHREAD_SET_STATE(pthread, PS_SUSPENDED); + + /* This thread is now longer in the priority queue. */ + pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ; + pthread = NULL; } } _PQ_CLEAR_ACTIVE(); return (pthread); } static void pq_insert_prio_list(pq_queue_t *pq, int prio) { pq_list_t *pql; /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_ACTIVE("pq_insert_prio_list: pq_active"); _PQ_ASSERT_PROTECTED("_pq_insert_prio_list: prioq not protected!"); /* * The priority queue is in descending priority order. Start at * the beginning of the queue and find the list before which the * new list should be inserted. */ pql = TAILQ_FIRST(&pq->pq_queue); while ((pql != NULL) && (pql->pl_prio > prio)) pql = TAILQ_NEXT(pql, pl_link); /* Insert the list: */ if (pql == NULL) TAILQ_INSERT_TAIL(&pq->pq_queue, &pq->pq_lists[prio], pl_link); else TAILQ_INSERT_BEFORE(pql, &pq->pq_lists[prio], pl_link); /* Mark this list as being in the queue: */ pq->pq_lists[prio].pl_queued = 1; } void _waitq_insert(pthread_t pthread) { pthread_t tid; /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_INACTIVE("_waitq_insert: pq_active"); _PQ_SET_ACTIVE(); _PQ_ASSERT_NOT_QUEUED(pthread, "_waitq_insert: Already in queue"); if (pthread->wakeup_time.tv_sec == -1) TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe); else { tid = TAILQ_FIRST(&_waitingq); while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) && ((tid->wakeup_time.tv_sec < pthread->wakeup_time.tv_sec) || ((tid->wakeup_time.tv_sec == pthread->wakeup_time.tv_sec) && (tid->wakeup_time.tv_nsec <= pthread->wakeup_time.tv_nsec)))) tid = TAILQ_NEXT(tid, pqe); if (tid == NULL) TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe); else TAILQ_INSERT_BEFORE(tid, pthread, pqe); } pthread->flags |= PTHREAD_FLAGS_IN_WAITQ; _PQ_CLEAR_ACTIVE(); } void _waitq_remove(pthread_t pthread) { /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_INACTIVE("_waitq_remove: pq_active"); _PQ_SET_ACTIVE(); _PQ_ASSERT_IN_WAITQ(pthread, "_waitq_remove: Not in queue"); TAILQ_REMOVE(&_waitingq, pthread, pqe); pthread->flags &= ~PTHREAD_FLAGS_IN_WAITQ; _PQ_CLEAR_ACTIVE(); } void _waitq_setactive(void) { _PQ_ASSERT_INACTIVE("_waitq_setactive: pq_active"); _PQ_SET_ACTIVE(); } void _waitq_clearactive(void) { _PQ_ASSERT_ACTIVE("_waitq_clearactive: ! pq_active"); _PQ_CLEAR_ACTIVE(); } diff --git a/lib/libkse/thread/thr_private.h b/lib/libkse/thread/thr_private.h index 04023fb81a35..3fef49c7b25f 100644 --- a/lib/libkse/thread/thr_private.h +++ b/lib/libkse/thread/thr_private.h @@ -1,1362 +1,1339 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Private thread definitions for the uthread kernel. * * $FreeBSD$ */ #ifndef _PTHREAD_PRIVATE_H #define _PTHREAD_PRIVATE_H /* * Evaluate the storage class specifier. */ #ifdef GLOBAL_PTHREAD_PRIVATE #define SCLASS #else #define SCLASS extern #endif /* * Include files. */ #include #include #include #include #include #include #include #include #include #include #include /* * Define machine dependent macros to get and set the stack pointer * from the supported contexts. Also define a macro to set the return * address in a jmp_buf context. * * XXX - These need to be moved into architecture dependent support files. */ #if defined(__i386__) #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2])) #define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp)) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk) #define FP_SAVE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("fnsave %0": :"m"(*fdata)); \ } while (0) #define FP_RESTORE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("frstor %0": :"m"(*fdata)); \ } while (0) #define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra) #elif defined(__alpha__) #include #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[R_SP + 4])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[R_SP + 4])) #define GET_STACK_UC(ucp) ((ucp)->uc_mcontext.mc_regs[R_SP]) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk) #define FP_SAVE_UC(ucp) #define FP_RESTORE_UC(ucp) #define SET_RETURN_ADDR_JB(jb, ra) do { \ (jb)[0]._jb[2] = (long)(ra); \ (jb)[0]._jb[R_RA + 4] = (long)(ra); \ (jb)[0]._jb[R_T12 + 4] = (long)(ra); \ } while (0) #else #error "Don't recognize this architecture!" #endif /* * Kernel fatal error handler macro. */ #define PANIC(string) _thread_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ __sys_write(1, buf, strlen(buf)); \ } while (0) #define stderr_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ __sys_write(2, buf, strlen(buf)); \ } while (0) /* * Priority queue manipulation macros (using pqe link): */ #define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd) #define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd) #define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd) #define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq) /* * Waiting queue manipulation macros (using pqe link): */ #define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd) #define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd) #if defined(_PTHREADS_INVARIANTS) #define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive() #define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive() #else #define PTHREAD_WAITQ_CLEARACTIVE() #define PTHREAD_WAITQ_SETACTIVE() #endif /* * Work queue manipulation macros (using qe link): */ #define PTHREAD_WORKQ_INSERT(thrd) do { \ TAILQ_INSERT_TAIL(&_workq,thrd,qe); \ (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \ } while (0) #define PTHREAD_WORKQ_REMOVE(thrd) do { \ TAILQ_REMOVE(&_workq,thrd,qe); \ (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \ } while (0) /* * State change macro without scheduling queue change: */ #define PTHREAD_SET_STATE(thrd, newstate) do { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } while (0) /* * State change macro with scheduling queue change - This must be * called with preemption deferred (see thread_kern_sched_[un]defer). */ #if defined(_PTHREADS_INVARIANTS) #include #define PTHREAD_ASSERT(cond, msg) do { \ if (!(cond)) \ PANIC(msg); \ } while (0) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \ PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \ "Illegal call from signal handler"); #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if (_thread_kern_new_state != 0) \ PANIC("Recursive PTHREAD_NEW_STATE"); \ _thread_kern_new_state = 1; \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ + PTHREAD_SET_STATE(thrd, newstate); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ + PTHREAD_SET_STATE(thrd, newstate); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ _thread_kern_new_state = 0; \ - PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #else #define PTHREAD_ASSERT(cond, msg) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #endif /* * Define the signals to be used for scheduling. */ #if defined(_PTHREADS_COMPAT_SCHED) #define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL #define _SCHED_SIGNAL SIGVTALRM #else #define _ITIMER_SCHED_TIMER ITIMER_PROF #define _SCHED_SIGNAL SIGPROF #endif /* * Priority queues. * * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. */ typedef struct pq_list { TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ int pl_prio; /* the priority of this list */ int pl_queued; /* is this in the priority queue */ } pq_list_t; typedef struct pq_queue { TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ pq_list_t *pq_lists; /* array of all priority lists */ int pq_size; /* number of priority lists */ } pq_queue_t; /* * TailQ initialization values. */ #define TAILQ_INITIALIZER { NULL, NULL } /* * Mutex definitions. */ union pthread_mutex_data { void *m_ptr; int m_count; }; struct pthread_mutex { enum pthread_mutextype m_type; int m_protocol; TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; union pthread_mutex_data m_data; long m_flags; int m_refcount; /* * Used for priority inheritence and protection. * * m_prio - For priority inheritence, the highest active * priority (threads locking the mutex inherit * this priority). For priority protection, the * ceiling priority of this mutex. * m_saved_prio - mutex owners inherited priority before * taking the mutex, restored when the owner * unlocks the mutex. */ int m_prio; int m_saved_prio; /* * Link for list of all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; /* * Lock for accesses to this structure. */ spinlock_t lock; }; /* * Flags for mutexes. */ #define MUTEX_FLAGS_PRIVATE 0x01 #define MUTEX_FLAGS_INITED 0x02 #define MUTEX_FLAGS_BUSY 0x04 /* * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \ NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \ _SPINLOCK_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; long m_flags; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } /* * Condition variable definitions. */ enum pthread_cond_type { COND_TYPE_FAST, COND_TYPE_MAX }; struct pthread_cond { enum pthread_cond_type c_type; TAILQ_HEAD(cond_head, pthread) c_queue; pthread_mutex_t c_mutex; void *c_data; long c_flags; int c_seqno; /* * Lock for accesses to this structure. */ spinlock_t lock; }; struct pthread_cond_attr { enum pthread_cond_type c_type; long c_flags; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \ 0, 0, _SPINLOCK_INITIALIZER } /* * Semaphore definitions. */ struct sem { #define SEM_MAGIC ((u_int32_t) 0x09fa4012) u_int32_t magic; pthread_mutex_t lock; pthread_cond_t gtzero; u_int32_t count; u_int32_t nwaiters; }; /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *next; void (*routine) (); void *routine_arg; }; struct pthread_attr { int sched_policy; int sched_inherit; int sched_interval; int prio; int suspend; int flags; void *arg_attr; void (*cleanup_attr) (); void *stackaddr_attr; size_t stacksize_attr; size_t guardsize_attr; }; /* * Thread creation state attributes. */ #define PTHREAD_CREATE_RUNNING 0 #define PTHREAD_CREATE_SUSPENDED 1 -/* - * Additional state for a thread suspended with pthread_suspend_np(). - */ -enum pthread_susp { - SUSP_NO, /* Not suspended. */ - SUSP_YES, /* Suspended. */ - SUSP_JOIN, /* Suspended, joining. */ - SUSP_NOWAIT, /* Suspended, was in a mutex or condition queue. */ - SUSP_MUTEX_WAIT,/* Suspended, still in a mutex queue. */ - SUSP_COND_WAIT /* Suspended, still in a condition queue. */ -}; - /* * Miscellaneous definitions. */ #define PTHREAD_STACK_DEFAULT 65536 /* * Size of default red zone at the end of each stack. In actuality, this "red * zone" is merely an unmapped region, except in the case of the initial stack. * Since mmap() makes it possible to specify the maximum growth of a MAP_STACK * region, an unmapped gap between thread stacks achieves the same effect as * explicitly mapped red zones. * This is declared and initialized in uthread_init.c. */ extern int _pthread_guard_default; extern int _pthread_page_size; /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define PTHREAD_STACK_INITIAL 0x100000 /* * Define the different priority ranges. All applications have thread * priorities constrained within 0-31. The threads library raises the * priority when delivering signals in order to ensure that signal * delivery happens (from the POSIX spec) "as soon as possible". * In the future, the threads library will also be able to map specific * threads into real-time (cooperating) processes or kernel threads. * The RT and SIGNAL priorities will be used internally and added to * thread base priorities so that the scheduling queue can handle both * normal and RT priority threads with and without signal handling. * * The approach taken is that, within each class, signal delivery * always has priority over thread execution. */ #define PTHREAD_DEFAULT_PRIORITY 15 #define PTHREAD_MIN_PRIORITY 0 #define PTHREAD_MAX_PRIORITY 31 /* 0x1F */ #define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */ #define PTHREAD_RT_PRIORITY 64 /* 0x40 */ #define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY #define PTHREAD_LAST_PRIORITY \ (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY) #define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY) /* * Clock resolution in microseconds. */ #define CLOCK_RES_USEC 10000 #define CLOCK_RES_USEC_MIN 1000 /* * Time slice period in microseconds. */ #define TIMESLICE_USEC 20000 /* * Define a thread-safe macro to get the current time of day * which is updated at regular intervals by the scheduling signal * handler. */ #define GET_CURRENT_TOD(tv) \ do { \ tv.tv_sec = _sched_tod.tv_sec; \ tv.tv_usec = _sched_tod.tv_usec; \ } while (tv.tv_sec != _sched_tod.tv_sec) struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ int state; /* 0 = idle >0 = # of readers -1 = writer */ pthread_cond_t read_signal; pthread_cond_t write_signal; int blocked_writers; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_SIGTHREAD, PS_MUTEX_WAIT, PS_COND_WAIT, PS_FDLR_WAIT, PS_FDLW_WAIT, PS_FDR_WAIT, PS_FDW_WAIT, PS_FILE_WAIT, PS_POLL_WAIT, PS_SELECT_WAIT, PS_SLEEP_WAIT, PS_WAIT_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, PS_SPINBLOCK, PS_JOIN, PS_SUSPENDED, PS_DEAD, PS_DEADLOCK, PS_STATE_MAX }; /* * File descriptor locking definitions. */ #define FD_READ 0x1 #define FD_WRITE 0x2 #define FD_RDWR (FD_READ | FD_WRITE) /* * File descriptor table structure. */ struct fd_table_entry { /* * Lock for accesses to this file descriptor table * entry. This is passed to _spinlock() to provide atomic * access to this structure. It does *not* represent the * state of the lock on the file descriptor. */ spinlock_t lock; TAILQ_HEAD(, pthread) r_queue; /* Read queue. */ TAILQ_HEAD(, pthread) w_queue; /* Write queue. */ struct pthread *r_owner; /* Ptr to thread owning read lock. */ struct pthread *w_owner; /* Ptr to thread owning write lock. */ char *r_fname; /* Ptr to read lock source file name */ int r_lineno; /* Read lock source line number. */ char *w_fname; /* Ptr to write lock source file name */ int w_lineno; /* Write lock source line number. */ int r_lockcount; /* Count for FILE read locks. */ int w_lockcount; /* Count for FILE write locks. */ int flags; /* Flags used in open. */ }; struct pthread_poll_data { int nfds; struct pollfd *fds; }; union pthread_wait_data { pthread_mutex_t mutex; pthread_cond_t cond; const sigset_t *sigwait; /* Waiting on a signal in sigwait */ struct { short fd; /* Used when thread waiting on fd */ short branch; /* Line number, for debugging. */ char *fname; /* Source file name for debugging.*/ } fd; FILE *fp; struct pthread_poll_data *poll_data; spinlock_t *spinlock; struct pthread *thread; }; /* * Define a continuation routine that can be used to perform a * transfer of control: */ typedef void (*thread_continuation_t) (void *); struct pthread_signal_frame; struct pthread_state_data { struct pthread_signal_frame *psd_curframe; sigset_t psd_sigmask; struct timespec psd_wakeup_time; union pthread_wait_data psd_wait_data; enum pthread_state psd_state; int psd_flags; int psd_interrupted; int psd_longjmp_val; int psd_sigmask_seqno; int psd_signo; int psd_sig_defer_count; /* XXX - What about thread->timeout and/or thread->error? */ }; struct join_status { struct pthread *thread; void *ret; int error; }; /* * The frame that is added to the top of a threads stack when setting up * up the thread to run a signal handler. */ struct pthread_signal_frame { /* * This stores the threads state before the signal. */ struct pthread_state_data saved_state; /* * Threads return context; we use only jmp_buf's for now. */ union { jmp_buf jb; ucontext_t uc; } ctx; int signo; /* signal, arg 1 to sighandler */ int sig_has_args; /* use signal args if true */ ucontext_t uc; siginfo_t siginfo; }; struct pthread_specific_elem { const void *data; int seqno; }; /* * Thread structure. */ struct pthread { /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; char *name; u_int64_t uniqueid; /* for gdb */ /* * Lock for accesses to this thread structure. */ spinlock_t lock; /* Queue entry for list of all threads: */ TAILQ_ENTRY(pthread) tle; /* Queue entry for list of dead threads: */ TAILQ_ENTRY(pthread) dle; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; void *stack; struct pthread_attr attr; /* * Threads return context; we use only jmp_buf's for now. */ union { jmp_buf jb; ucontext_t uc; } ctx; /* * Used for tracking delivery of signal handlers. */ struct pthread_signal_frame *curframe; /* * Cancelability flags - the lower 2 bits are used by cancel * definitions in pthread.h */ #define PTHREAD_AT_CANCEL_POINT 0x0004 #define PTHREAD_CANCELLING 0x0008 #define PTHREAD_CANCEL_NEEDED 0x0010 int cancelflags; - enum pthread_susp suspended; - thread_continuation_t continuation; /* * Current signal mask and pending signals. */ sigset_t sigmask; sigset_t sigpend; int sigmask_seqno; int check_pending; /* Thread state: */ enum pthread_state state; /* Scheduling clock when this thread was last made active. */ long last_active; /* Scheduling clock when this thread was last made inactive. */ long last_inactive; /* * Number of microseconds accumulated by this thread when * time slicing is active. */ long slice_usec; /* * Time to wake up thread. This is used for sleeping threads and * for any operation which may time out (such as select). */ struct timespec wakeup_time; /* TRUE if operation has timed out. */ int timeout; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* * The joiner is the thread that is joining to this thread. The * join status keeps track of a join operation to another thread. */ struct pthread *joiner; struct join_status join_status; /* * The current thread can belong to only one scheduling queue at * a time (ready or waiting queue). It can also belong to: * * o A queue of threads waiting for a mutex * o A queue of threads waiting for a condition variable * o A queue of threads waiting for a file descriptor lock * o A queue of threads needing work done by the kernel thread * (waiting for a spinlock or file I/O) * * A thread can also be joining a thread (the joiner field above). * * It must not be possible for a thread to belong to any of the * above queues while it is handling a signal. Signal handlers * may longjmp back to previous stack frames circumventing normal * control flow. This could corrupt queue integrity if the thread * retains membership in the queue. Therefore, if a thread is a * member of one of these queues when a signal handler is invoked, * it must remove itself from the queue before calling the signal * handler and reinsert itself after normal return of the handler. * * Use pqe for the scheduling queue link (both ready and waiting), * sqe for synchronization (mutex and condition variable) queue * links, and qe for all other links. */ TAILQ_ENTRY(pthread) pqe; /* priority queue link */ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ TAILQ_ENTRY(pthread) qe; /* all other queues link */ /* Wait data. */ union pthread_wait_data data; /* * Allocated for converting select into poll. */ struct pthread_poll_data poll_data; /* * Set to TRUE if a blocking operation was * interrupted by a signal: */ int interrupted; /* Signal number when in state PS_SIGWAIT: */ int signo; /* * Set to non-zero when this thread has deferred signals. * We allow for recursive deferral. */ int sig_defer_count; /* * Set to TRUE if this thread should yield after undeferring * signals. */ int yield_on_sig_undefer; /* Miscellaneous flags; only set with signals deferred. */ int flags; #define PTHREAD_FLAGS_PRIVATE 0x0001 #define PTHREAD_EXITING 0x0002 #define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */ #define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */ #define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */ #define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */ #define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */ #define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/ #define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */ -#define PTHREAD_FLAGS_TRACE 0x0200 /* for debugging purposes */ +#define PTHREAD_FLAGS_SUSPENDED 0x0200 /* thread is suspended */ +#define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */ #define PTHREAD_FLAGS_IN_SYNCQ \ (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ) /* * Base priority is the user setable and retrievable priority * of the thread. It is only affected by explicit calls to * set thread priority and upon thread creation via a thread * attribute or default priority. */ char base_priority; /* * Inherited priority is the priority a thread inherits by * taking a priority inheritence or protection mutex. It * is not affected by base priority changes. Inherited * priority defaults to and remains 0 until a mutex is taken * that is being waited on by any other thread whose priority * is non-zero. */ char inherited_priority; /* * Active priority is always the maximum of the threads base * priority and inherited priority. When there is a change * in either the base or inherited priority, the active * priority must be recalculated. */ char active_priority; /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; /* * Queue of currently owned mutexes. */ TAILQ_HEAD(, pthread_mutex) mutexq; void *ret; struct pthread_specific_elem *specific; int specific_data_count; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; char *fname; /* Ptr to source file name */ int lineno; /* Source line number. */ }; /* * Global variables for the uthread kernel. */ SCLASS void *_usrstack #ifdef GLOBAL_PTHREAD_PRIVATE = (void *) USRSTACK; #else ; #endif /* Kernel thread structure used when there are no running threads: */ SCLASS struct pthread _thread_kern_thread; /* Ptr to the thread structure for the running thread: */ SCLASS struct pthread * volatile _thread_run #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* Ptr to the thread structure for the last user thread to run: */ SCLASS struct pthread * volatile _last_user_thread #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif -/* - * Ptr to the thread running in single-threaded mode or NULL if - * running multi-threaded (default POSIX behaviour). - */ -SCLASS struct pthread * volatile _thread_single -#ifdef GLOBAL_PTHREAD_PRIVATE -= NULL; -#else -; -#endif - /* List of all threads: */ SCLASS TAILQ_HEAD(, pthread) _thread_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_thread_list); #else ; #endif /* * Array of kernel pipe file descriptors that are used to ensure that * no signals are missed in calls to _select. */ SCLASS int _thread_kern_pipe[2] #ifdef GLOBAL_PTHREAD_PRIVATE = { -1, -1 }; #else ; #endif SCLASS int volatile _queue_signals #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _thread_kern_in_sched #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _sig_in_handler #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Time of day at last scheduling timer signal: */ SCLASS struct timeval volatile _sched_tod #ifdef GLOBAL_PTHREAD_PRIVATE = { 0, 0 }; #else ; #endif /* * Current scheduling timer ticks; used as resource usage. */ SCLASS unsigned int volatile _sched_ticks #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Dead threads: */ SCLASS TAILQ_HEAD(, pthread) _dead_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_dead_list); #else ; #endif /* Initial thread: */ SCLASS struct pthread *_thread_initial #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Default thread attributes: */ SCLASS struct pthread_attr pthread_attr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, PTHREAD_STACK_DEFAULT, -1 }; #else ; #endif /* Default mutex attributes: */ SCLASS struct pthread_mutex_attr pthread_mutexattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }; #else ; #endif /* Default condition variable attributes: */ SCLASS struct pthread_cond_attr pthread_condattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { COND_TYPE_FAST, 0 }; #else ; #endif /* * Standard I/O file descriptors need special flag treatment since * setting one to non-blocking does all on *BSD. Sigh. This array * is used to store the initial flag settings. */ SCLASS int _pthread_stdio_flags[3]; /* File table information: */ SCLASS struct fd_table_entry **_thread_fd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Table for polling file descriptors: */ SCLASS struct pollfd *_thread_pfd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif SCLASS const int dtablecount #ifdef GLOBAL_PTHREAD_PRIVATE = 4096/sizeof(struct fd_table_entry); #else ; #endif SCLASS int _thread_dtablesize /* Descriptor table size. */ #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _clock_res_usec /* Clock resolution in usec. */ #ifdef GLOBAL_PTHREAD_PRIVATE = CLOCK_RES_USEC; #else ; #endif /* Garbage collector mutex and condition variable. */ SCLASS pthread_mutex_t _gc_mutex #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; SCLASS pthread_cond_t _gc_cond #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Array of signal actions for this process. */ SCLASS struct sigaction _thread_sigact[NSIG]; /* * Array of counts of dummy handlers for SIG_DFL signals. This is used to * assure that there is always a dummy signal handler installed while there is a * thread sigwait()ing on the corresponding signal. */ SCLASS int _thread_dfl_count[NSIG]; /* * Pending signals and mask for this process: */ SCLASS sigset_t _process_sigpending; SCLASS sigset_t _process_sigmask #ifdef GLOBAL_PTHREAD_PRIVATE = { {0, 0, 0, 0} } #endif ; /* * Scheduling queues: */ SCLASS pq_queue_t _readyq; SCLASS TAILQ_HEAD(, pthread) _waitingq; /* * Work queue: */ SCLASS TAILQ_HEAD(, pthread) _workq; /* Tracks the number of threads blocked while waiting for a spinlock. */ SCLASS volatile int _spinblock_count #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Used to maintain pending and active signals: */ struct sigstatus { int pending; /* Is this a pending signal? */ int blocked; /* * A handler is currently active for * this signal; ignore subsequent * signals until the handler is done. */ int signo; /* arg 1 to signal handler */ siginfo_t siginfo; /* arg 2 to signal handler */ ucontext_t uc; /* arg 3 to signal handler */ }; SCLASS struct sigstatus _thread_sigq[NSIG]; /* Indicates that the signal queue needs to be checked. */ SCLASS volatile int _sigq_check_reqd #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Thread switch hook. */ SCLASS pthread_switch_routine_t _sched_switch_hook #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Declare the kernel scheduler jump buffer and stack: */ SCLASS jmp_buf _thread_kern_sched_jb; SCLASS void * _thread_kern_sched_stack #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* Used for _PTHREADS_INVARIANTS checking. */ SCLASS int _thread_kern_new_state #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Undefine the storage class specifier: */ #undef SCLASS #ifdef _LOCK_DEBUG #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \ _ts, __FILE__, __LINE__) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \ __FILE__, __LINE__) #else #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type) #endif /* * Function prototype definitions. */ __BEGIN_DECLS char *__ttyname_basic(int); char *__ttyname_r_basic(int, char *, size_t); char *ttyname_r(int, char *, size_t); void _cond_wait_backout(pthread_t); void _fd_lock_backout(pthread_t); int _find_thread(pthread_t); struct pthread *_get_curthread(void); void _set_curthread(struct pthread *); void *_thread_stack_alloc(size_t, size_t); void _thread_stack_free(void *, size_t, size_t); int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t); int _thread_fd_lock(int, int, struct timespec *); int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); void _mutex_lock_backout(pthread_t); void _mutex_notify_priochange(pthread_t); int _mutex_reinit(pthread_mutex_t *); void _mutex_unlock_private(pthread_t); int _cond_reinit(pthread_cond_t *); int _pq_alloc(struct pq_queue *, int, int); int _pq_init(struct pq_queue *); void _pq_remove(struct pq_queue *pq, struct pthread *); void _pq_insert_head(struct pq_queue *pq, struct pthread *); void _pq_insert_tail(struct pq_queue *pq, struct pthread *); struct pthread *_pq_first(struct pq_queue *pq); void *_pthread_getspecific(pthread_key_t); int _pthread_key_create(pthread_key_t *, void (*) (void *)); int _pthread_key_delete(pthread_key_t); int _pthread_mutex_destroy(pthread_mutex_t *); int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); int _pthread_mutex_lock(pthread_mutex_t *); int _pthread_mutex_trylock(pthread_mutex_t *); int _pthread_mutex_unlock(pthread_mutex_t *); int _pthread_mutexattr_init(pthread_mutexattr_t *); int _pthread_mutexattr_destroy(pthread_mutexattr_t *); int _pthread_mutexattr_settype(pthread_mutexattr_t *, int); int _pthread_once(pthread_once_t *, void (*) (void)); pthread_t _pthread_self(void); int _pthread_setspecific(pthread_key_t, const void *); void _waitq_insert(pthread_t pthread); void _waitq_remove(pthread_t pthread); #if defined(_PTHREADS_INVARIANTS) void _waitq_setactive(void); void _waitq_clearactive(void); #endif void _thread_exit(char *, int, char *); void _thread_exit_cleanup(void); void _thread_fd_unlock(int, int); void _thread_fd_unlock_debug(int, int, char *, int); void _thread_fd_unlock_owned(pthread_t); void *_thread_cleanup(pthread_t); void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_init(void); void _thread_kern_sched(ucontext_t *); void _thread_kern_scheduler(void); void _thread_kern_sched_frame(struct pthread_signal_frame *psf); void _thread_kern_sched_sig(void); void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno); void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno); void _thread_kern_set_timeout(const struct timespec *); void _thread_kern_sig_defer(void); void _thread_kern_sig_undefer(void); void _thread_sig_handler(int, siginfo_t *, ucontext_t *); void _thread_sig_check_pending(struct pthread *pthread); void _thread_sig_handle_pending(void); void _thread_sig_send(struct pthread *pthread, int sig); void _thread_sig_wrapper(void); void _thread_sigframe_restore(struct pthread *thread, struct pthread_signal_frame *psf); void _thread_start(void); void _thread_seterrno(pthread_t, int); int _thread_fd_table_init(int fd); pthread_addr_t _thread_gc(pthread_addr_t); void _thread_enter_cancellation_point(void); void _thread_leave_cancellation_point(void); void _thread_cancellation_point(void); /* #include */ #ifdef _SYS_ACL_H int __sys___acl_aclcheck_fd(int, acl_type_t, struct acl *); int __sys___acl_delete_fd(int, acl_type_t); int __sys___acl_get_fd(int, acl_type_t, struct acl *); int __sys___acl_set_fd(int, acl_type_t, struct acl *); #endif /* #include */ #ifdef _SYS_AIO_H_ int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); #endif /* #include */ #ifdef _SYS_CAPABILITY_H int __sys___cap_get_fd(int, struct cap *); int __sys___cap_set_fd(int, struct cap *); #endif /* #include */ #ifdef _SYS_EVENT_H_ int __sys_kevent(int, const struct kevent *, int, struct kevent *, int, const struct timespec *); #endif /* #include */ #ifdef _SYS_IOCTL_H_ int __sys_ioctl(int, unsigned long, ...); #endif /* #include */ #ifdef _SYS_MMAN_H_ int __sys_msync(void *, size_t, int); #endif /* #include */ #ifdef _SYS_MOUNT_H_ int __sys_fstatfs(int, struct statfs *); #endif /* #include */ #ifdef _SYS_SOCKET_H_ int __sys_accept(int, struct sockaddr *, socklen_t *); int __sys_bind(int, const struct sockaddr *, socklen_t); int __sys_connect(int, const struct sockaddr *, socklen_t); int __sys_getpeername(int, struct sockaddr *, socklen_t *); int __sys_getsockname(int, struct sockaddr *, socklen_t *); int __sys_getsockopt(int, int, int, void *, socklen_t *); int __sys_listen(int, int); ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, socklen_t *); ssize_t __sys_recvmsg(int, struct msghdr *, int); int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int); ssize_t __sys_sendmsg(int, const struct msghdr *, int); ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, socklen_t); int __sys_setsockopt(int, int, int, const void *, socklen_t); int __sys_shutdown(int, int); int __sys_socket(int, int, int); int __sys_socketpair(int, int, int, int *); #endif /* #include */ #ifdef _SYS_STAT_H_ int __sys_fchflags(int, u_long); int __sys_fchmod(int, mode_t); int __sys_fstat(int, struct stat *); #endif /* #include */ #ifdef _SYS_UIO_H_ ssize_t __sys_readv(int, const struct iovec *, int); ssize_t __sys_writev(int, const struct iovec *, int); #endif /* #include */ #ifdef WNOHANG pid_t __sys_wait4(pid_t, int *, int, struct rusage *); #endif /* #include */ #ifdef _DIRENT_H_ int __sys_getdirentries(int, char *, int, long *); #endif /* #include */ #ifdef _SYS_FCNTL_H_ int __sys_fcntl(int, int, ...); int __sys_flock(int, int); int __sys_open(const char *, int, ...); #endif /* #include */ #ifdef _SYS_POLL_H_ int __sys_poll(struct pollfd *, unsigned, int); #endif /* #include */ #ifdef _SIGNAL_H_ int __sys_sigaction(int, const struct sigaction *, struct sigaction *); int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); int __sys_sigprocmask(int, const sigset_t *, sigset_t *); int __sys_sigreturn(ucontext_t *); #endif /* #include */ #ifdef _UNISTD_H_ int __sys_close(int); int __sys_dup(int); int __sys_dup2(int, int); int __sys_execve(const char *, char * const *, char * const *); void __sys_exit(int); int __sys_fchown(int, uid_t, gid_t); pid_t __sys_fork(void); long __sys_fpathconf(int, int); int __sys_fsync(int); int __sys_pipe(int *); ssize_t __sys_read(int, void *, size_t); ssize_t __sys_write(int, const void *, size_t); #endif /* #include */ #ifdef _SETJMP_H_ extern void __siglongjmp(sigjmp_buf, int) __dead2; extern void __longjmp(jmp_buf, int) __dead2; extern void ___longjmp(jmp_buf, int) __dead2; #endif __END_DECLS #endif /* !_PTHREAD_PRIVATE_H */ diff --git a/lib/libkse/thread/thr_resume_np.c b/lib/libkse/thread/thr_resume_np.c index 9cbcf8563790..ed20b6a8d2f5 100644 --- a/lib/libkse/thread/thr_resume_np.c +++ b/lib/libkse/thread/thr_resume_np.c @@ -1,96 +1,111 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "pthread_private.h" +static void resume_common(struct pthread *); + __weak_reference(_pthread_resume_np, pthread_resume_np); +__weak_reference(_pthread_resume_all_np, pthread_resume_all_np); /* Resume a thread: */ int _pthread_resume_np(pthread_t thread) { - int ret; - enum pthread_susp old_suspended; + int ret; /* Find the thread in the list of active threads: */ if ((ret = _find_thread(thread)) == 0) { - /* Cancel any pending suspensions: */ - old_suspended = thread->suspended; - thread->suspended = SUSP_NO; + /* + * Defer signals to protect the scheduling queues + * from access by the signal handler: + */ + _thread_kern_sig_defer(); + + if ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) + resume_common(thread); + + /* + * Undefer and handle pending signals, yielding if + * necessary: + */ + _thread_kern_sig_undefer(); + } + return (ret); +} + +void +_pthread_resume_all_np(void) +{ + struct pthread *curthread = _get_curthread(); + struct pthread *thread; + + /* + * Defer signals to protect the scheduling queues from access + * by the signal handler: + */ + _thread_kern_sig_defer(); + + TAILQ_FOREACH(thread, &_thread_list, tle) { + if ((thread != curthread) && + ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)) + resume_common(thread); + } - /* Is it currently suspended? */ - if (thread->state == PS_SUSPENDED) { - /* - * Defer signals to protect the scheduling queues - * from access by the signal handler: - */ - _thread_kern_sig_defer(); + /* + * Undefer and handle pending signals, yielding if necessary: + */ + _thread_kern_sig_undefer(); +} - switch (old_suspended) { - case SUSP_MUTEX_WAIT: - /* Set the thread's state back. */ - PTHREAD_SET_STATE(thread,PS_MUTEX_WAIT); - break; - case SUSP_COND_WAIT: - /* Set the thread's state back. */ - PTHREAD_SET_STATE(thread,PS_COND_WAIT); - break; - case SUSP_JOIN: - /* Set the thread's state back. */ - PTHREAD_SET_STATE(thread,PS_JOIN); - break; - case SUSP_NOWAIT: - /* Allow the thread to run. */ - PTHREAD_SET_STATE(thread,PS_RUNNING); - PTHREAD_WAITQ_REMOVE(thread); - PTHREAD_PRIOQ_INSERT_TAIL(thread); - break; - case SUSP_NO: - case SUSP_YES: - /* Allow the thread to run. */ - PTHREAD_SET_STATE(thread,PS_RUNNING); - PTHREAD_PRIOQ_INSERT_TAIL(thread); - break; - } +static void +resume_common(struct pthread *thread) +{ + /* Clear the suspend flag: */ + thread->flags &= ~PTHREAD_FLAGS_SUSPENDED; - /* - * Undefer and handle pending signals, yielding if - * necessary: - */ - _thread_kern_sig_undefer(); - } + /* + * If the thread's state is suspended, that means it is + * now runnable but not in any scheduling queue. Set the + * state to running and insert it into the run queue. + */ + if (thread->state == PS_SUSPENDED) { + PTHREAD_SET_STATE(thread, PS_RUNNING); + if (thread->priority_mutex_count > 0) + PTHREAD_PRIOQ_INSERT_HEAD(thread); + else + PTHREAD_PRIOQ_INSERT_TAIL(thread); } - return(ret); } diff --git a/lib/libkse/thread/thr_sig.c b/lib/libkse/thread/thr_sig.c index 1bd93b7d67cb..7aa9b53967b3 100644 --- a/lib/libkse/thread/thr_sig.c +++ b/lib/libkse/thread/thr_sig.c @@ -1,1117 +1,1125 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include "pthread_private.h" /* Prototypes: */ static void thread_sig_add(struct pthread *pthread, int sig, int has_args); static void thread_sig_check_state(struct pthread *pthread, int sig); static struct pthread *thread_sig_find(int sig); static void thread_sig_handle_special(int sig); static void thread_sigframe_add(struct pthread *thread, int sig, int has_args); static void thread_sigframe_save(struct pthread *thread, struct pthread_signal_frame *psf); static void thread_sig_invoke_handler(int sig, siginfo_t *info, ucontext_t *ucp); -/* #define DEBUG_SIGNAL */ +/*#define DEBUG_SIGNAL*/ #ifdef DEBUG_SIGNAL #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif #if defined(_PTHREADS_INVARIANTS) #define SIG_SET_ACTIVE() _sig_in_handler = 1 #define SIG_SET_INACTIVE() _sig_in_handler = 0 #else #define SIG_SET_ACTIVE() #define SIG_SET_INACTIVE() #endif void _thread_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) { struct pthread *curthread = _get_curthread(); struct pthread *pthread, *pthread_h; int in_sched = _thread_kern_in_sched; char c; if (ucp == NULL) PANIC("Thread signal handler received null context"); DBG_MSG("Got signal %d, current thread %p\n", sig, curthread); /* Check if an interval timer signal: */ if (sig == _SCHED_SIGNAL) { /* Update the scheduling clock: */ gettimeofday((struct timeval *)&_sched_tod, NULL); _sched_ticks++; if (in_sched != 0) { /* * The scheduler is already running; ignore this * signal. */ } /* * Check if the scheduler interrupt has come when * the currently running thread has deferred thread * signals. */ else if (curthread->sig_defer_count > 0) curthread->yield_on_sig_undefer = 1; else { /* Schedule the next thread: */ _thread_kern_sched(ucp); /* * This point should not be reached, so abort the * process: */ PANIC("Returned to signal function from scheduler"); } } /* * Check if the kernel has been interrupted while the scheduler * is accessing the scheduling queues or if there is a currently * running thread that has deferred signals. */ else if ((in_sched != 0) || (curthread->sig_defer_count > 0)) { /* Cast the signal number to a character variable: */ c = sig; /* * Write the signal number to the kernel pipe so that it will * be ready to read when this signal handler returns. */ if (_queue_signals != 0) { __sys_write(_thread_kern_pipe[1], &c, 1); DBG_MSG("Got signal %d, queueing to kernel pipe\n", sig); } if (_thread_sigq[sig - 1].blocked == 0) { DBG_MSG("Got signal %d, adding to _thread_sigq\n", sig); /* * Do not block this signal; it will be blocked * when the pending signals are run down. */ /* _thread_sigq[sig - 1].blocked = 1; */ /* * Queue the signal, saving siginfo and sigcontext * (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); /* Indicate that there are queued signals: */ _thread_sigq[sig - 1].pending = 1; _sigq_check_reqd = 1; } /* These signals need special handling: */ else if (sig == SIGCHLD || sig == SIGTSTP || sig == SIGTTIN || sig == SIGTTOU) { _thread_sigq[sig - 1].pending = 1; _thread_sigq[sig - 1].signo = sig; _sigq_check_reqd = 1; } else DBG_MSG("Got signal %d, ignored.\n", sig); } /* * The signal handlers should have been installed so that they * cannot be interrupted by other signals. */ else if (_thread_sigq[sig - 1].blocked == 0) { /* * The signal is not blocked; handle the signal. * * Ignore subsequent occurrences of this signal * until the current signal is handled: */ _thread_sigq[sig - 1].blocked = 1; /* This signal will be handled; clear the pending flag: */ _thread_sigq[sig - 1].pending = 0; /* * Save siginfo and sigcontext (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); SIG_SET_ACTIVE(); /* Handle special signals: */ thread_sig_handle_special(sig); pthread_h = NULL; if ((pthread = thread_sig_find(sig)) == NULL) DBG_MSG("No thread to handle signal %d\n", sig); else if (pthread == curthread) { /* * Unblock the signal and restore the process signal * mask in case we don't return from the handler: */ _thread_sigq[sig - 1].blocked = 0; __sys_sigprocmask(SIG_SETMASK, &_process_sigmask, NULL); /* Call the signal handler for the current thread: */ thread_sig_invoke_handler(sig, info, ucp); /* * Set the process signal mask in the context; it * could have changed by the handler. */ ucp->uc_sigmask = _process_sigmask; /* Resume the interrupted thread: */ __sys_sigreturn(ucp); } else { DBG_MSG("Got signal %d, adding frame to thread %p\n", sig, pthread); /* Setup the target thread to receive the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); /* Take a peek at the next ready to run thread: */ pthread_h = PTHREAD_PRIOQ_FIRST(); DBG_MSG("Finished adding frame, head of prio list %p\n", pthread_h); } SIG_SET_INACTIVE(); /* * Switch to a different context if the currently running * thread takes a signal, or if another thread takes a * signal and the currently running thread is not in a * signal handler. */ if ((pthread_h != NULL) && (pthread_h->active_priority > curthread->active_priority)) { /* Enter the kernel scheduler: */ _thread_kern_sched(ucp); } } else { SIG_SET_ACTIVE(); thread_sig_handle_special(sig); SIG_SET_INACTIVE(); } } static void thread_sig_invoke_handler(int sig, siginfo_t *info, ucontext_t *ucp) { struct pthread *curthread = _get_curthread(); void (*sigfunc)(int, siginfo_t *, void *); int saved_seqno; sigset_t saved_sigmask; /* Invoke the signal handler without going through the scheduler: */ DBG_MSG("Got signal %d, calling handler for current thread %p\n", sig, curthread); /* Save the threads signal mask: */ saved_sigmask = curthread->sigmask; saved_seqno = curthread->sigmask_seqno; /* Setup the threads signal mask: */ SIGSETOR(curthread->sigmask, _thread_sigact[sig - 1].sa_mask); sigaddset(&curthread->sigmask, sig); /* * Check that a custom handler is installed and if * the signal is not blocked: */ sigfunc = _thread_sigact[sig - 1].sa_sigaction; if (((__sighandler_t *)sigfunc != SIG_DFL) && ((__sighandler_t *)sigfunc != SIG_IGN)) { if (((_thread_sigact[sig - 1].sa_flags & SA_SIGINFO) != 0) || (info == NULL)) (*(sigfunc))(sig, info, ucp); else (*(sigfunc))(sig, (siginfo_t *)info->si_code, ucp); } /* * Only restore the signal mask if it hasn't been changed by the * application during invocation of the signal handler: */ if (curthread->sigmask_seqno == saved_seqno) curthread->sigmask = saved_sigmask; } /* * Find a thread that can handle the signal. */ struct pthread * thread_sig_find(int sig) { struct pthread *curthread = _get_curthread(); int handler_installed; struct pthread *pthread, *pthread_next; struct pthread *suspended_thread, *signaled_thread; DBG_MSG("Looking for thread to handle signal %d\n", sig); /* Check if the signal requires a dump of thread information: */ if (sig == SIGINFO) { /* Dump thread information to file: */ _thread_dump_info(); /* Unblock this signal to allow further dumps: */ _thread_sigq[sig - 1].blocked = 0; } /* Check if an interval timer signal: */ else if (sig == _SCHED_SIGNAL) { /* * This shouldn't ever occur (should this panic?). */ } else { /* * Enter a loop to look for threads that have the signal * unmasked. POSIX specifies that a thread in a sigwait * will get the signal over any other threads. Second * preference will be threads in in a sigsuspend. Third * preference will be the current thread. If none of the * above, then the signal is delivered to the first thread * that is found. Note that if a custom handler is not * installed, the signal only affects threads in sigwait. */ suspended_thread = NULL; if ((curthread != &_thread_kern_thread) && !sigismember(&curthread->sigmask, sig)) signaled_thread = curthread; else signaled_thread = NULL; if ((_thread_sigact[sig - 1].sa_handler == SIG_IGN) || (_thread_sigact[sig - 1].sa_handler == SIG_DFL)) handler_installed = 0; else handler_installed = 1; for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly destroying * the link entry. */ pthread_next = TAILQ_NEXT(pthread, pqe); if ((pthread->state == PS_SIGWAIT) && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* * A signal handler is not invoked for threads * in sigwait. Clear the blocked and pending * flags. */ _thread_sigq[sig - 1].blocked = 0; _thread_sigq[sig - 1].pending = 0; /* Return the signal number: */ pthread->signo = sig; /* * POSIX doesn't doesn't specify which thread * will get the signal if there are multiple * waiters, so we give it to the first thread * we find. * * Do not attempt to deliver this signal * to other threads and do not add the signal * to the process pending set. */ return (NULL); } else if ((handler_installed != 0) && - !sigismember(&pthread->sigmask, sig)) { + !sigismember(&pthread->sigmask, sig) && + ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) == 0)) { if (pthread->state == PS_SIGSUSPEND) { if (suspended_thread == NULL) suspended_thread = pthread; } else if (signaled_thread == NULL) signaled_thread = pthread; } } /* * Only perform wakeups and signal delivery if there is a * custom handler installed: */ if (handler_installed == 0) { /* * There is no handler installed. Unblock the * signal so that if a handler _is_ installed, any * subsequent signals can be handled. */ _thread_sigq[sig - 1].blocked = 0; } else { /* * If we didn't find a thread in the waiting queue, * check the all threads queue: */ if (suspended_thread == NULL && signaled_thread == NULL) { /* * Enter a loop to look for other threads * capable of receiving the signal: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { if (!sigismember(&pthread->sigmask, sig)) { signaled_thread = pthread; break; } } } if (suspended_thread == NULL && signaled_thread == NULL) /* * Add it to the set of signals pending * on the process: */ sigaddset(&_process_sigpending, sig); else { /* * We only deliver the signal to one thread; * give preference to the suspended thread: */ if (suspended_thread != NULL) pthread = suspended_thread; else pthread = signaled_thread; return (pthread); } } } /* Returns nothing. */ return (NULL); } void _thread_sig_check_pending(struct pthread *pthread) { sigset_t sigset; int i; /* * Check if there are pending signals for the running * thread or process that aren't blocked: */ sigset = pthread->sigpend; SIGSETOR(sigset, _process_sigpending); SIGSETNAND(sigset, pthread->sigmask); if (SIGNOTEMPTY(sigset)) { for (i = 1; i < NSIG; i++) { if (sigismember(&sigset, i) != 0) { if (sigismember(&pthread->sigpend, i) != 0) thread_sig_add(pthread, i, /*has_args*/ 0); else { thread_sig_add(pthread, i, /*has_args*/ 1); sigdelset(&_process_sigpending, i); } } } } } /* * This can only be called from the kernel scheduler. It assumes that * all thread contexts are saved and that a signal frame can safely be * added to any user thread. */ void _thread_sig_handle_pending(void) { struct pthread *pthread; int i, sig; PTHREAD_ASSERT(_thread_kern_in_sched != 0, "_thread_sig_handle_pending called from outside kernel schedule"); /* * Check the array of pending signals: */ for (i = 0; i < NSIG; i++) { if (_thread_sigq[i].pending != 0) { /* This signal is no longer pending. */ _thread_sigq[i].pending = 0; sig = _thread_sigq[i].signo; /* Some signals need special handling: */ thread_sig_handle_special(sig); if (_thread_sigq[i].blocked == 0) { /* * Block future signals until this one * is handled: */ _thread_sigq[i].blocked = 1; if ((pthread = thread_sig_find(sig)) != NULL) { /* * Setup the target thread to receive * the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); } } } } } static void thread_sig_handle_special(int sig) { struct pthread *pthread, *pthread_next; int i; switch (sig) { case SIGCHLD: /* * Go through the file list and set all files * to non-blocking again in case the child * set some of them to block. Sigh. */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file is used: */ if (_thread_fd_table[i] != NULL) { /* * Set the file descriptor to non-blocking: */ __sys_fcntl(i, F_SETFL, _thread_fd_table[i]->flags | O_NONBLOCK); } } /* * Enter a loop to wake up all threads waiting * for a process to complete: */ for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly * destroying the link entry: */ pthread_next = TAILQ_NEXT(pthread, pqe); /* * If this thread is waiting for a child * process to complete, wake it up: */ if (pthread->state == PS_WAIT_WAIT) { /* Make the thread runnable: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } } break; /* * POSIX says that pending SIGCONT signals are * discarded when one of these signals occurs. */ case SIGTSTP: case SIGTTIN: case SIGTTOU: /* * Enter a loop to discard pending SIGCONT * signals: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { sigdelset(&pthread->sigpend, SIGCONT); } break; default: break; } } /* * Perform thread specific actions in response to a signal. * This function is only called if there is a handler installed * for the signal, and if the target thread has the signal * unmasked. */ static void thread_sig_add(struct pthread *pthread, int sig, int has_args) { int restart; int suppress_handler = 0; int thread_is_active = 0; restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; /* Make sure this signal isn't still in the pending set: */ sigdelset(&pthread->sigpend, sig); /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: /* * You can't call a signal handler for threads in these * states. */ suppress_handler = 1; break; /* * States which do not need any cleanup handling when signals * occur: */ case PS_RUNNING: /* * Remove the thread from the queue before changing its * priority: */ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) PTHREAD_PRIOQ_REMOVE(pthread); else /* * This thread is running; avoid placing it in * the run queue: */ thread_is_active = 1; break; case PS_SUSPENDED: break; case PS_SPINBLOCK: /* Remove the thread from the workq and waitq: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); /* Make the thread runnable: */ PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGWAIT: /* The signal handler is not called for threads in SIGWAIT. */ suppress_handler = 1; /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* Change the state of the thread to run: */ PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else { /* * Mark the thread as interrupted only if the * restart flag is not set on the signal action: */ if (restart == 0) pthread->interrupted = 1; PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); } break; /* * States which cannot be interrupted but still require the * signal handler to run: */ case PS_COND_WAIT: case PS_MUTEX_WAIT: /* * Remove the thread from the wait queue. It will * be added back to the wait queue once all signal * handlers have been invoked. */ PTHREAD_WAITQ_REMOVE(pthread); break; case PS_JOIN: /* * Remove the thread from the wait queue. It will * be added back to the wait queue once all signal * handlers have been invoked. */ PTHREAD_WAITQ_REMOVE(pthread); /* Make the thread runnable: */ PTHREAD_SET_STATE(pthread, PS_RUNNING); break; /* * States which are interruptible but may need to be removed * from queues before any signal handler is called. * * XXX - We may not need to handle this condition, but will * mark it as a potential problem. */ case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: if (restart == 0) pthread->interrupted = 1; /* * Remove the thread from the wait queue. Our * signal handler hook will remove this thread * from the fd or file queue before invoking * the actual handler. */ PTHREAD_WAITQ_REMOVE(pthread); break; /* * States which are interruptible: */ case PS_FDR_WAIT: case PS_FDW_WAIT: if (restart == 0) { /* * Flag the operation as interrupted and * set the state to running: */ pthread->interrupted = 1; PTHREAD_SET_STATE(pthread, PS_RUNNING); } PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); break; case PS_POLL_WAIT: case PS_SELECT_WAIT: case PS_SLEEP_WAIT: /* * Unmasked signals always cause poll, select, and sleep * to terminate early, regardless of SA_RESTART: */ pthread->interrupted = 1; /* Remove threads in poll and select from the workq: */ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGSUSPEND: PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; } if (suppress_handler == 0) { /* Setup a signal frame and save the current threads state: */ thread_sigframe_add(pthread, sig, has_args); /* * Signals are deferred until just before the threads * signal handler is invoked: */ pthread->sig_defer_count = 1; /* Make sure the thread is runnable: */ if (pthread->state != PS_RUNNING) PTHREAD_SET_STATE(pthread, PS_RUNNING); /* * The thread should be removed from all scheduling * queues at this point. Raise the priority and place - * the thread in the run queue. + * the thread in the run queue. It is also possible + * for a signal to be sent to a suspended thread, + * mostly via pthread_kill(). If a thread is suspended, + * don't insert it into the priority queue; just set + * its state to suspended and it will run the signal + * handler when it is resumed. */ pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY; - if (thread_is_active == 0) + if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) + PTHREAD_SET_STATE(pthread, PS_SUSPENDED); + else if (thread_is_active == 0) PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } static void thread_sig_check_state(struct pthread *pthread, int sig) { /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: case PS_RUNNING: case PS_SUSPENDED: case PS_SPINBLOCK: case PS_COND_WAIT: case PS_JOIN: case PS_MUTEX_WAIT: break; case PS_SIGWAIT: /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* * Remove the thread from the wait queue and * make it runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } break; case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_SIGSUSPEND: case PS_SLEEP_WAIT: /* * Remove the thread from the wait queue and make it * runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; /* * These states are additionally in the work queue: */ case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_FILE_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* * Remove the thread from the wait and work queues, and * make it runnable: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; } } /* * Send a signal to a specific thread (ala pthread_kill): */ void _thread_sig_send(struct pthread *pthread, int sig) { struct pthread *curthread = _get_curthread(); /* Check for signals whose actions are SIG_DFL: */ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) { /* * Check to see if a temporary signal handler is * installed for sigwaiters: */ if (_thread_dfl_count[sig] == 0) /* * Deliver the signal to the process if a handler * is not installed: */ kill(getpid(), sig); /* * Assuming we're still running after the above kill(), * make any necessary state changes to the thread: */ thread_sig_check_state(pthread, sig); } /* * Check that the signal is not being ignored: */ else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) { if (pthread->state == PS_SIGWAIT && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else if (sigismember(&pthread->sigmask, sig)) /* Add the signal to the pending set: */ sigaddset(&pthread->sigpend, sig); else if (pthread == curthread) /* Call the signal handler for the current thread: */ thread_sig_invoke_handler(sig, NULL, NULL); else { /* Protect the scheduling queues: */ _thread_kern_sig_defer(); /* * Perform any state changes due to signal * arrival: */ thread_sig_add(pthread, sig, /* has args */ 0); /* Unprotect the scheduling queues: */ _thread_kern_sig_undefer(); } } } /* * User thread signal handler wrapper. * * thread - current running thread */ void _thread_sig_wrapper(void) { struct pthread_signal_frame *psf; struct pthread *thread = _get_curthread(); /* Get the current frame and state: */ psf = thread->curframe; thread->curframe = NULL; PTHREAD_ASSERT(psf != NULL, "Invalid signal frame in signal handler"); /* * We're coming from the kernel scheduler; clear the in * scheduler flag: */ _thread_kern_in_sched = 0; /* Check the threads previous state: */ if (psf->saved_state.psd_state != PS_RUNNING) { /* * Do a little cleanup handling for those threads in * queues before calling the signal handler. Signals * for these threads are temporarily blocked until * after cleanup handling. */ switch (psf->saved_state.psd_state) { case PS_FDLR_WAIT: case PS_FDLW_WAIT: _fd_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; case PS_COND_WAIT: _cond_wait_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; case PS_MUTEX_WAIT: _mutex_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; default: break; } } /* Unblock the signal in case we don't return from the handler: */ _thread_sigq[psf->signo - 1].blocked = 0; /* * Lower the priority before calling the handler in case * it never returns (longjmps back): */ thread->active_priority &= ~PTHREAD_SIGNAL_PRIORITY; /* * Reenable interruptions without checking for the need to * context switch: */ thread->sig_defer_count = 0; /* * Dispatch the signal via the custom signal handler: */ if (psf->sig_has_args == 0) thread_sig_invoke_handler(psf->signo, NULL, NULL); else thread_sig_invoke_handler(psf->signo, &psf->siginfo, &psf->uc); /* * Call the kernel scheduler to safely restore the frame and * schedule the next thread: */ _thread_kern_sched_frame(psf); } static void thread_sigframe_add(struct pthread *thread, int sig, int has_args) { struct pthread_signal_frame *psf = NULL; unsigned long stackp; /* Get the top of the threads stack: */ stackp = GET_STACK_JB(thread->ctx.jb); /* * Leave a little space on the stack and round down to the * nearest aligned word: */ stackp -= sizeof(double); stackp &= ~0x3UL; /* Allocate room on top of the stack for a new signal frame: */ stackp -= sizeof(struct pthread_signal_frame); psf = (struct pthread_signal_frame *) stackp; /* Save the current context in the signal frame: */ thread_sigframe_save(thread, psf); /* Set handler specific information: */ psf->sig_has_args = has_args; psf->signo = sig; if (has_args) { /* Copy the signal handler arguments to the signal frame: */ memcpy(&psf->uc, &_thread_sigq[psf->signo - 1].uc, sizeof(psf->uc)); memcpy(&psf->siginfo, &_thread_sigq[psf->signo - 1].siginfo, sizeof(psf->siginfo)); } /* Setup the signal mask: */ SIGSETOR(thread->sigmask, _thread_sigact[sig - 1].sa_mask); sigaddset(&thread->sigmask, sig); /* Set up the new frame: */ thread->curframe = psf; thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE | PTHREAD_FLAGS_IN_SYNCQ; /* * Set up the context: */ stackp -= sizeof(double); _setjmp(thread->ctx.jb); SET_STACK_JB(thread->ctx.jb, stackp); SET_RETURN_ADDR_JB(thread->ctx.jb, _thread_sig_wrapper); } void _thread_sigframe_restore(struct pthread *thread, struct pthread_signal_frame *psf) { memcpy(&thread->ctx, &psf->ctx, sizeof(thread->ctx)); /* * Only restore the signal mask if it hasn't been changed * by the application during invocation of the signal handler: */ if (thread->sigmask_seqno == psf->saved_state.psd_sigmask_seqno) thread->sigmask = psf->saved_state.psd_sigmask; thread->curframe = psf->saved_state.psd_curframe; thread->wakeup_time = psf->saved_state.psd_wakeup_time; thread->data = psf->saved_state.psd_wait_data; thread->state = psf->saved_state.psd_state; thread->flags = psf->saved_state.psd_flags; thread->interrupted = psf->saved_state.psd_interrupted; thread->signo = psf->saved_state.psd_signo; thread->sig_defer_count = psf->saved_state.psd_sig_defer_count; } static void thread_sigframe_save(struct pthread *thread, struct pthread_signal_frame *psf) { memcpy(&psf->ctx, &thread->ctx, sizeof(thread->ctx)); psf->saved_state.psd_sigmask = thread->sigmask; psf->saved_state.psd_curframe = thread->curframe; psf->saved_state.psd_wakeup_time = thread->wakeup_time; psf->saved_state.psd_wait_data = thread->data; psf->saved_state.psd_state = thread->state; psf->saved_state.psd_flags = thread->flags & (PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE); psf->saved_state.psd_interrupted = thread->interrupted; psf->saved_state.psd_sigmask_seqno = thread->sigmask_seqno; psf->saved_state.psd_signo = thread->signo; psf->saved_state.psd_sig_defer_count = thread->sig_defer_count; } diff --git a/lib/libkse/thread/thr_single_np.c b/lib/libkse/thread/thr_single_np.c index 85471b8cf5c1..1ee5e7918bd9 100644 --- a/lib/libkse/thread/thr_single_np.c +++ b/lib/libkse/thread/thr_single_np.c @@ -1,47 +1,49 @@ /* * Copyright (c) 1996 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ -#include #include -#include "pthread_private.h" +#include __weak_reference(_pthread_single_np, pthread_single_np); int _pthread_single_np() { - struct pthread *curthread = _get_curthread(); /* Enter single-threaded (non-POSIX) scheduling mode: */ - _thread_single = curthread; - return(0); + pthread_suspend_all_np(); + /* + * XXX - Do we want to do this? + * __is_threaded = 0; + */ + return (0); } diff --git a/lib/libkse/thread/thr_spinlock.c b/lib/libkse/thread/thr_spinlock.c index 73337094d431..e05aa4a5fc0b 100644 --- a/lib/libkse/thread/thr_spinlock.c +++ b/lib/libkse/thread/thr_spinlock.c @@ -1,111 +1,111 @@ /* * Copyright (c) 1997 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include #include #include #include #include #include #include #include "pthread_private.h" /* * Lock a location for the running thread. Yield to allow other * threads to run if this thread is blocked because the lock is * not available. Note that this function does not sleep. It * assumes that the lock will be available very soon. */ void _spinlock(spinlock_t *lck) { struct pthread *curthread = _get_curthread(); /* * Try to grab the lock and loop if another thread grabs * it before we do. */ while(_atomic_lock(&lck->access_lock)) { /* Block the thread until the lock. */ curthread->data.spinlock = lck; _thread_kern_sched_state(PS_SPINBLOCK, __FILE__, __LINE__); } /* The running thread now owns the lock: */ lck->lock_owner = (long) curthread; } /* * Lock a location for the running thread. Yield to allow other * threads to run if this thread is blocked because the lock is * not available. Note that this function does not sleep. It * assumes that the lock will be available very soon. * * This function checks if the running thread has already locked the * location, warns if this occurs and creates a thread dump before * returning. */ void _spinlock_debug(spinlock_t *lck, char *fname, int lineno) { struct pthread *curthread = _get_curthread(); int cnt = 0; /* * Try to grab the lock and loop if another thread grabs * it before we do. */ while(_atomic_lock(&lck->access_lock)) { cnt++; if (cnt > 100) { char str[256]; - snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", _getprogname(), curthread, lck, fname, lineno, lck->fname, lck->lineno); + snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", getprogname(), curthread, lck, fname, lineno, lck->fname, lck->lineno); __sys_write(2,str,strlen(str)); __sleep(1); cnt = 0; } /* Block the thread until the lock. */ curthread->data.spinlock = lck; _thread_kern_sched_state(PS_SPINBLOCK, fname, lineno); } /* The running thread now owns the lock: */ lck->lock_owner = (long) curthread; lck->fname = fname; lck->lineno = lineno; } diff --git a/lib/libkse/thread/thr_suspend_np.c b/lib/libkse/thread/thr_suspend_np.c index 0e272ff11d55..952baa350ec5 100644 --- a/lib/libkse/thread/thr_suspend_np.c +++ b/lib/libkse/thread/thr_suspend_np.c @@ -1,161 +1,104 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "pthread_private.h" -static void finish_suspension(void *arg); +static void suspend_common(struct pthread *thread); __weak_reference(_pthread_suspend_np, pthread_suspend_np); +__weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np); /* Suspend a thread: */ int _pthread_suspend_np(pthread_t thread) { int ret; + /* Suspending the current thread doesn't make sense. */ + if (thread == _get_curthread()) + ret = EDEADLK; + /* Find the thread in the list of active threads: */ - if ((ret = _find_thread(thread)) == 0) { + else if ((ret = _find_thread(thread)) == 0) { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); - switch (thread->state) { - case PS_RUNNING: - /* - * Remove the thread from the priority queue and - * set the state to suspended: - */ - PTHREAD_PRIOQ_REMOVE(thread); - PTHREAD_SET_STATE(thread, PS_SUSPENDED); - break; - - case PS_SPINBLOCK: - case PS_FDR_WAIT: - case PS_FDW_WAIT: - case PS_POLL_WAIT: - case PS_SELECT_WAIT: - /* - * Remove these threads from the work queue - * and mark the operation as interrupted: - */ - if ((thread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) - PTHREAD_WORKQ_REMOVE(thread); - _thread_seterrno(thread,EINTR); - - /* FALLTHROUGH */ - case PS_SLEEP_WAIT: - thread->interrupted = 1; - - /* FALLTHROUGH */ - case PS_SIGTHREAD: - case PS_WAIT_WAIT: - case PS_SIGSUSPEND: - case PS_SIGWAIT: - /* - * Remove these threads from the waiting queue and - * set their state to suspended: - */ - PTHREAD_WAITQ_REMOVE(thread); - PTHREAD_SET_STATE(thread, PS_SUSPENDED); - break; - - case PS_MUTEX_WAIT: - /* Mark the thread as suspended and still in a queue. */ - thread->suspended = SUSP_MUTEX_WAIT; - - PTHREAD_SET_STATE(thread, PS_SUSPENDED); - break; - case PS_COND_WAIT: - /* Mark the thread as suspended and still in a queue. */ - thread->suspended = SUSP_COND_WAIT; - - PTHREAD_SET_STATE(thread, PS_SUSPENDED); - break; - case PS_JOIN: - /* Mark the thread as suspended and joining: */ - thread->suspended = SUSP_JOIN; - - PTHREAD_NEW_STATE(thread, PS_SUSPENDED); - break; - case PS_FDLR_WAIT: - case PS_FDLW_WAIT: - case PS_FILE_WAIT: - /* Mark the thread as suspended: */ - thread->suspended = SUSP_YES; - - /* - * Threads in these states may be in queues. - * In order to preserve queue integrity, the - * cancelled thread must remove itself from the - * queue. Mark the thread as interrupted and - * set the state to running. When the thread - * resumes, it will remove itself from the queue - * and call the suspension completion routine. - */ - thread->interrupted = 1; - _thread_seterrno(thread, EINTR); - PTHREAD_NEW_STATE(thread, PS_RUNNING); - thread->continuation = finish_suspension; - break; - - case PS_DEAD: - case PS_DEADLOCK: - case PS_STATE_MAX: - case PS_SUSPENDED: - /* Nothing needs to be done: */ - break; - } + suspend_common(thread); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } - return(ret); + return (ret); } -static void -finish_suspension(void *arg) +void +_pthread_suspend_all_np(void) { struct pthread *curthread = _get_curthread(); + struct pthread *thread; - if (curthread->suspended != SUSP_NO) - _thread_kern_sched_state(PS_SUSPENDED, __FILE__, __LINE__); -} + /* + * Defer signals to protect the scheduling queues from + * access by the signal handler: + */ + _thread_kern_sig_defer(); + + TAILQ_FOREACH(thread, &_thread_list, tle) { + if (thread != curthread) + suspend_common(thread); + } + /* + * Undefer and handle pending signals, yielding if + * necessary: + */ + _thread_kern_sig_undefer(); +} +void +suspend_common(struct pthread *thread) +{ + thread->flags |= PTHREAD_FLAGS_SUSPENDED; + if (thread->flags & PTHREAD_FLAGS_IN_PRIOQ) { + PTHREAD_PRIOQ_REMOVE(thread); + PTHREAD_SET_STATE(thread, PS_SUSPENDED); + } +} diff --git a/lib/libpthread/thread/thr_cancel.c b/lib/libpthread/thread/thr_cancel.c index b6b070f0549d..d9324abf01aa 100644 --- a/lib/libpthread/thread/thr_cancel.c +++ b/lib/libpthread/thread/thr_cancel.c @@ -1,245 +1,231 @@ /* * David Leonard , 1999. Public domain. * $FreeBSD$ */ #include #include #include "pthread_private.h" static void finish_cancellation(void *arg); __weak_reference(_pthread_cancel, pthread_cancel); __weak_reference(_pthread_setcancelstate, pthread_setcancelstate); __weak_reference(_pthread_setcanceltype, pthread_setcanceltype); __weak_reference(_pthread_testcancel, pthread_testcancel); int _pthread_cancel(pthread_t pthread) { int ret; if ((ret = _find_thread(pthread)) != 0) { /* NOTHING */ } else if (pthread->state == PS_DEAD || pthread->state == PS_DEADLOCK || (pthread->flags & PTHREAD_EXITING) != 0) { ret = 0; } else { /* Protect the scheduling queues: */ _thread_kern_sig_defer(); if (((pthread->cancelflags & PTHREAD_CANCEL_DISABLE) != 0) || (((pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0) && ((pthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0))) /* Just mark it for cancellation: */ pthread->cancelflags |= PTHREAD_CANCELLING; else { /* * Check if we need to kick it back into the * run queue: */ switch (pthread->state) { case PS_RUNNING: /* No need to resume: */ pthread->cancelflags |= PTHREAD_CANCELLING; break; case PS_SPINBLOCK: case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* Remove these threads from the work queue: */ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) PTHREAD_WORKQ_REMOVE(pthread); /* Fall through: */ case PS_SIGTHREAD: case PS_SLEEP_WAIT: case PS_WAIT_WAIT: case PS_SIGSUSPEND: case PS_SIGWAIT: /* Interrupt and resume: */ pthread->interrupted = 1; pthread->cancelflags |= PTHREAD_CANCELLING; PTHREAD_NEW_STATE(pthread,PS_RUNNING); break; case PS_JOIN: /* * Disconnect the thread from the joinee: */ if (pthread->join_status.thread != NULL) { pthread->join_status.thread->joiner = NULL; pthread->join_status.thread = NULL; } pthread->cancelflags |= PTHREAD_CANCELLING; PTHREAD_NEW_STATE(pthread, PS_RUNNING); break; case PS_SUSPENDED: - if (pthread->suspended == SUSP_NO || - pthread->suspended == SUSP_YES || - pthread->suspended == SUSP_JOIN || - pthread->suspended == SUSP_NOWAIT) { - /* - * This thread isn't in any scheduling - * queues; just change it's state: - */ - pthread->cancelflags |= - PTHREAD_CANCELLING; - PTHREAD_SET_STATE(pthread, PS_RUNNING); - break; - } - /* FALLTHROUGH */ case PS_MUTEX_WAIT: case PS_COND_WAIT: case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: /* * Threads in these states may be in queues. * In order to preserve queue integrity, the * cancelled thread must remove itself from the * queue. Mark the thread as interrupted and * needing cancellation, and set the state to * running. When the thread resumes, it will * remove itself from the queue and call the * cancellation completion routine. */ pthread->interrupted = 1; pthread->cancelflags |= PTHREAD_CANCEL_NEEDED; - PTHREAD_NEW_STATE(pthread,PS_RUNNING); + PTHREAD_NEW_STATE(pthread, PS_RUNNING); pthread->continuation = finish_cancellation; break; case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: /* Ignore - only here to silence -Wall: */ break; } } /* Unprotect the scheduling queues: */ _thread_kern_sig_undefer(); ret = 0; } return (ret); } int _pthread_setcancelstate(int state, int *oldstate) { struct pthread *curthread = _get_curthread(); int ostate; int ret; ostate = curthread->cancelflags & PTHREAD_CANCEL_DISABLE; switch (state) { case PTHREAD_CANCEL_ENABLE: if (oldstate != NULL) *oldstate = ostate; curthread->cancelflags &= ~PTHREAD_CANCEL_DISABLE; if ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0) pthread_testcancel(); ret = 0; break; case PTHREAD_CANCEL_DISABLE: if (oldstate != NULL) *oldstate = ostate; curthread->cancelflags |= PTHREAD_CANCEL_DISABLE; ret = 0; break; default: ret = EINVAL; } return (ret); } int _pthread_setcanceltype(int type, int *oldtype) { struct pthread *curthread = _get_curthread(); int otype; int ret; otype = curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS; switch (type) { case PTHREAD_CANCEL_ASYNCHRONOUS: if (oldtype != NULL) *oldtype = otype; curthread->cancelflags |= PTHREAD_CANCEL_ASYNCHRONOUS; pthread_testcancel(); ret = 0; break; case PTHREAD_CANCEL_DEFERRED: if (oldtype != NULL) *oldtype = otype; curthread->cancelflags &= ~PTHREAD_CANCEL_ASYNCHRONOUS; ret = 0; break; default: ret = EINVAL; } return (ret); } void _pthread_testcancel(void) { struct pthread *curthread = _get_curthread(); if (((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) && ((curthread->cancelflags & PTHREAD_CANCELLING) != 0) && ((curthread->flags & PTHREAD_EXITING) == 0)) { /* * It is possible for this thread to be swapped out * while performing cancellation; do not allow it * to be cancelled again. */ curthread->cancelflags &= ~PTHREAD_CANCELLING; _thread_exit_cleanup(); pthread_exit(PTHREAD_CANCELED); PANIC("cancel"); } } void _thread_enter_cancellation_point(void) { struct pthread *curthread = _get_curthread(); /* Look for a cancellation before we block: */ pthread_testcancel(); curthread->cancelflags |= PTHREAD_AT_CANCEL_POINT; } void _thread_leave_cancellation_point(void) { struct pthread *curthread = _get_curthread(); curthread->cancelflags &= ~PTHREAD_AT_CANCEL_POINT; /* Look for a cancellation after we unblock: */ pthread_testcancel(); } static void finish_cancellation(void *arg) { struct pthread *curthread = _get_curthread(); curthread->continuation = NULL; curthread->interrupted = 0; if ((curthread->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) { curthread->cancelflags &= ~PTHREAD_CANCEL_NEEDED; _thread_exit_cleanup(); pthread_exit(PTHREAD_CANCELED); } } diff --git a/lib/libpthread/thread/thr_cond.c b/lib/libpthread/thread/thr_cond.c index 7f3fe7acb2dd..cb45725531d0 100644 --- a/lib/libpthread/thread/thr_cond.c +++ b/lib/libpthread/thread/thr_cond.c @@ -1,747 +1,735 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "pthread_private.h" /* * Prototypes */ static inline pthread_t cond_queue_deq(pthread_cond_t); static inline void cond_queue_remove(pthread_cond_t, pthread_t); static inline void cond_queue_enq(pthread_cond_t, pthread_t); __weak_reference(_pthread_cond_init, pthread_cond_init); __weak_reference(_pthread_cond_destroy, pthread_cond_destroy); __weak_reference(_pthread_cond_wait, pthread_cond_wait); __weak_reference(_pthread_cond_timedwait, pthread_cond_timedwait); __weak_reference(_pthread_cond_signal, pthread_cond_signal); __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); /* Reinitialize a condition variable to defaults. */ int _cond_reinit(pthread_cond_t *cond) { int ret = 0; if (cond == NULL) ret = EINVAL; else if (*cond == NULL) ret = pthread_cond_init(cond, NULL); else { /* * Initialize the condition variable structure: */ TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags = COND_FLAGS_INITED; (*cond)->c_type = COND_TYPE_FAST; (*cond)->c_mutex = NULL; (*cond)->c_seqno = 0; memset(&(*cond)->lock, 0, sizeof((*cond)->lock)); } return (ret); } int _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { enum pthread_cond_type type; pthread_cond_t pcond; int rval = 0; if (cond == NULL) rval = EINVAL; else { /* * Check if a pointer to a condition variable attribute * structure was passed by the caller: */ if (cond_attr != NULL && *cond_attr != NULL) { /* Default to a fast condition variable: */ type = (*cond_attr)->c_type; } else { /* Default to a fast condition variable: */ type = COND_TYPE_FAST; } /* Process according to condition variable type: */ switch (type) { /* Fast condition variable: */ case COND_TYPE_FAST: /* Nothing to do here. */ break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Check for no errors: */ if (rval == 0) { if ((pcond = (pthread_cond_t) malloc(sizeof(struct pthread_cond))) == NULL) { rval = ENOMEM; } else { /* * Initialise the condition variable * structure: */ TAILQ_INIT(&pcond->c_queue); pcond->c_flags |= COND_FLAGS_INITED; pcond->c_type = type; pcond->c_mutex = NULL; pcond->c_seqno = 0; memset(&pcond->lock,0,sizeof(pcond->lock)); *cond = pcond; } } } /* Return the completion status: */ return (rval); } int _pthread_cond_destroy(pthread_cond_t *cond) { int rval = 0; if (cond == NULL || *cond == NULL) rval = EINVAL; else { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * Free the memory allocated for the condition * variable structure: */ free(*cond); /* * NULL the caller's pointer now that the condition * variable has been destroyed: */ *cond = NULL; } /* Return the completion status: */ return (rval); } int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { struct pthread *curthread = _get_curthread(); int rval = 0; int done = 0; int interrupted = 0; int seqno; _thread_enter_cancellation_point(); if (cond == NULL) return (EINVAL); /* * If the condition variable is statically initialized, * perform the dynamic initialization: */ if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) return (rval); /* * Enter a loop waiting for a condition signal or broadcast * to wake up this thread. A loop is needed in case the waiting * thread is interrupted by a signal to execute a signal handler. * It is not (currently) possible to remain in the waiting queue * while running a handler. Instead, the thread is interrupted * and backed out of the waiting queue prior to executing the * signal handler. */ do { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * If the condvar was statically allocated, properly * initialize the tail queue. */ if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) { TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags |= COND_FLAGS_INITED; } /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return invalid argument error: */ rval = EINVAL; } else { /* Reset the timeout and interrupted flags: */ curthread->timeout = 0; curthread->interrupted = 0; /* * Queue the running thread for the condition * variable: */ cond_queue_enq(*cond, curthread); /* Remember the mutex and sequence number: */ (*cond)->c_mutex = *mutex; seqno = (*cond)->c_seqno; /* Wait forever: */ curthread->wakeup_time.tv_sec = -1; /* Unlock the mutex: */ if ((rval = _mutex_cv_unlock(mutex)) != 0) { /* * Cannot unlock the mutex, so remove * the running thread from the condition * variable queue: */ cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* * Schedule the next thread and unlock * the condition variable structure: */ _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); done = (seqno != (*cond)->c_seqno); interrupted = curthread->interrupted; /* * Check if the wait was interrupted * (canceled) or needs to be resumed * after handling a signal. */ if (interrupted != 0) { /* * Lock the mutex and ignore any * errors. Note that even * though this thread may have * been canceled, POSIX requires * that the mutex be reaquired * prior to cancellation. */ (void)_mutex_cv_lock(mutex); } else { /* * Lock the condition variable * while removing the thread. */ _SPINLOCK(&(*cond)->lock); cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; _SPINUNLOCK(&(*cond)->lock); /* Lock the mutex: */ rval = _mutex_cv_lock(mutex); } } } break; /* Trap invalid condition variable types: */ default: /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return an invalid argument error: */ rval = EINVAL; break; } if ((interrupted != 0) && (curthread->continuation != NULL)) curthread->continuation((void *) curthread); } while ((done == 0) && (rval == 0)); _thread_leave_cancellation_point(); /* Return the completion status: */ return (rval); } int _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, const struct timespec * abstime) { struct pthread *curthread = _get_curthread(); int rval = 0; int done = 0; int interrupted = 0; int seqno; _thread_enter_cancellation_point(); if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return (EINVAL); /* * If the condition variable is statically initialized, perform dynamic * initialization. */ if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) return (rval); /* * Enter a loop waiting for a condition signal or broadcast * to wake up this thread. A loop is needed in case the waiting * thread is interrupted by a signal to execute a signal handler. * It is not (currently) possible to remain in the waiting queue * while running a handler. Instead, the thread is interrupted * and backed out of the waiting queue prior to executing the * signal handler. */ do { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* * If the condvar was statically allocated, properly * initialize the tail queue. */ if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) { TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags |= COND_FLAGS_INITED; } /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { /* Return invalid argument error: */ rval = EINVAL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* Set the wakeup time: */ curthread->wakeup_time.tv_sec = abstime->tv_sec; curthread->wakeup_time.tv_nsec = abstime->tv_nsec; /* Reset the timeout and interrupted flags: */ curthread->timeout = 0; curthread->interrupted = 0; /* * Queue the running thread for the condition * variable: */ cond_queue_enq(*cond, curthread); /* Remember the mutex and sequence number: */ (*cond)->c_mutex = *mutex; seqno = (*cond)->c_seqno; /* Unlock the mutex: */ if ((rval = _mutex_cv_unlock(mutex)) != 0) { /* * Cannot unlock the mutex, so remove * the running thread from the condition * variable queue: */ cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { /* * Schedule the next thread and unlock * the condition variable structure: */ _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); done = (seqno != (*cond)->c_seqno); interrupted = curthread->interrupted; /* * Check if the wait was interrupted * (canceled) or needs to be resumed * after handling a signal. */ if (interrupted != 0) { /* * Lock the mutex and ignore any * errors. Note that even * though this thread may have * been canceled, POSIX requires * that the mutex be reaquired * prior to cancellation. */ (void)_mutex_cv_lock(mutex); } else { /* * Lock the condition variable * while removing the thread. */ _SPINLOCK(&(*cond)->lock); cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; _SPINUNLOCK(&(*cond)->lock); /* Lock the mutex: */ rval = _mutex_cv_lock(mutex); /* * Return ETIMEDOUT if the wait * timed out and there wasn't an * error locking the mutex: */ if ((curthread->timeout != 0) && rval == 0) rval = ETIMEDOUT; } } } break; /* Trap invalid condition variable types: */ default: /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* Return an invalid argument error: */ rval = EINVAL; break; } if ((interrupted != 0) && (curthread->continuation != NULL)) curthread->continuation((void *) curthread); } while ((done == 0) && (rval == 0)); _thread_leave_cancellation_point(); /* Return the completion status: */ return (rval); } int _pthread_cond_signal(pthread_cond_t * cond) { int rval = 0; pthread_t pthread; if (cond == NULL) rval = EINVAL; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: /* Increment the sequence number: */ (*cond)->c_seqno++; if ((pthread = cond_queue_deq(*cond)) != NULL) { /* - * Unless the thread is currently suspended, - * allow it to run. If the thread is suspended, - * make a note that the thread isn't in a wait - * queue any more. + * Wake up the signaled thread: */ - if (pthread->state != PS_SUSPENDED) - PTHREAD_NEW_STATE(pthread,PS_RUNNING); - else - pthread->suspended = SUSP_NOWAIT; + PTHREAD_NEW_STATE(pthread, PS_RUNNING); } /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (rval); } int _pthread_cond_broadcast(pthread_cond_t * cond) { int rval = 0; pthread_t pthread; if (cond == NULL) rval = EINVAL; /* * If the condition variable is statically initialized, perform dynamic * initialization. */ else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: /* Increment the sequence number: */ (*cond)->c_seqno++; /* * Enter a loop to bring all threads off the * condition queue: */ while ((pthread = cond_queue_deq(*cond)) != NULL) { /* - * Unless the thread is currently suspended, - * allow it to run. If the thread is suspended, - * make a note that the thread isn't in a wait - * queue any more. + * Wake up the signaled thread: */ - if (pthread->state != PS_SUSPENDED) - PTHREAD_NEW_STATE(pthread,PS_RUNNING); - else - pthread->suspended = SUSP_NOWAIT; + PTHREAD_NEW_STATE(pthread, PS_RUNNING); } /* There are no more waiting threads: */ (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (rval); } void _cond_wait_backout(pthread_t pthread) { pthread_cond_t cond; cond = pthread->data.cond; if (cond != NULL) { /* * Defer signals to protect the scheduling queues * from access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the condition variable structure: */ _SPINLOCK(&cond->lock); /* Process according to condition variable type: */ switch (cond->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: cond_queue_remove(cond, pthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&cond->c_queue) == NULL) cond->c_mutex = NULL; break; default: break; } /* Unlock the condition variable structure: */ _SPINUNLOCK(&cond->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } } /* * Dequeue a waiting thread from the head of a condition queue in * descending priority order. */ static inline pthread_t cond_queue_deq(pthread_cond_t cond) { pthread_t pthread; while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) { TAILQ_REMOVE(&cond->c_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ; if ((pthread->timeout == 0) && (pthread->interrupted == 0)) /* * Only exit the loop when we find a thread * that hasn't timed out or been canceled; * those threads are already running and don't * need their run state changed. */ break; } return(pthread); } /* * Remove a waiting thread from a condition queue in descending priority * order. */ static inline void cond_queue_remove(pthread_cond_t cond, pthread_t pthread) { /* * Because pthread_cond_timedwait() can timeout as well * as be signaled by another thread, it is necessary to * guard against removing the thread from the queue if * it isn't in the queue. */ if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) { TAILQ_REMOVE(&cond->c_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ; } } /* * Enqueue a waiting thread to a condition queue in descending priority * order. */ static inline void cond_queue_enq(pthread_cond_t cond, pthread_t pthread) { pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head); PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); /* * For the common case of all threads having equal priority, * we perform a quick check against the priority of the thread * at the tail of the queue. */ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe); else { tid = TAILQ_FIRST(&cond->c_queue); while (pthread->active_priority <= tid->active_priority) tid = TAILQ_NEXT(tid, sqe); TAILQ_INSERT_BEFORE(tid, pthread, sqe); } pthread->flags |= PTHREAD_FLAGS_IN_CONDQ; pthread->data.cond = cond; } diff --git a/lib/libpthread/thread/thr_exit.c b/lib/libpthread/thread/thr_exit.c index c9513cfac15a..fd90e2959077 100644 --- a/lib/libpthread/thread/thr_exit.c +++ b/lib/libpthread/thread/thr_exit.c @@ -1,241 +1,227 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include "pthread_private.h" #define FLAGS_IN_SCHEDQ \ (PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ) __weak_reference(_pthread_exit, pthread_exit); void _exit(int status) { int flags; int i; struct itimerval itimer; /* Disable the interval timer: */ itimer.it_interval.tv_sec = 0; itimer.it_interval.tv_usec = 0; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = 0; setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL); /* Close the pthread kernel pipe: */ __sys_close(_thread_kern_pipe[0]); __sys_close(_thread_kern_pipe[1]); /* * Enter a loop to set all file descriptors to blocking * if they were not created as non-blocking: */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file descriptor is in use: */ if (_thread_fd_table[i] != NULL && !(_thread_fd_table[i]->flags & O_NONBLOCK)) { /* Get the current flags: */ flags = __sys_fcntl(i, F_GETFL, NULL); /* Clear the nonblocking file descriptor flag: */ __sys_fcntl(i, F_SETFL, flags & ~O_NONBLOCK); } } /* Call the _exit syscall: */ __sys_exit(status); } void _thread_exit(char *fname, int lineno, char *string) { char s[256]; /* Prepare an error message string: */ snprintf(s, sizeof(s), "Fatal error '%s' at line %d in file %s (errno = %d)\n", string, lineno, fname, errno); /* Write the string to the standard error file descriptor: */ __sys_write(2, s, strlen(s)); /* Force this process to exit: */ /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */ #if defined(_PTHREADS_INVARIANTS) abort(); #else __sys_exit(1); #endif } /* * Only called when a thread is cancelled. It may be more useful * to call it from pthread_exit() if other ways of asynchronous or * abnormal thread termination can be found. */ void _thread_exit_cleanup(void) { struct pthread *curthread = _get_curthread(); /* * POSIX states that cancellation/termination of a thread should * not release any visible resources (such as mutexes) and that * it is the applications responsibility. Resources that are * internal to the threads library, including file and fd locks, * are not visible to the application and need to be released. */ /* Unlock all owned fd locks: */ _thread_fd_unlock_owned(curthread); /* Unlock all private mutexes: */ _mutex_unlock_private(curthread); /* * This still isn't quite correct because we don't account * for held spinlocks (see libc/stdlib/malloc.c). */ } void _pthread_exit(void *status) { struct pthread *curthread = _get_curthread(); pthread_t pthread; /* Check if this thread is already in the process of exiting: */ if ((curthread->flags & PTHREAD_EXITING) != 0) { char msg[128]; snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",curthread); PANIC(msg); } /* Flag this thread as exiting: */ curthread->flags |= PTHREAD_EXITING; /* Save the return value: */ curthread->ret = status; while (curthread->cleanup != NULL) { pthread_cleanup_pop(1); } if (curthread->attr.cleanup_attr != NULL) { curthread->attr.cleanup_attr(curthread->attr.arg_attr); } /* Check if there is thread specific data: */ if (curthread->specific != NULL) { /* Run the thread-specific data destructors: */ _thread_cleanupspecific(); } /* Free thread-specific poll_data structure, if allocated: */ if (curthread->poll_data.fds != NULL) { free(curthread->poll_data.fds); curthread->poll_data.fds = NULL; } /* * Lock the garbage collector mutex to ensure that the garbage * collector is not using the dead thread list. */ if (pthread_mutex_lock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); /* Add this thread to the list of dead threads. */ TAILQ_INSERT_HEAD(&_dead_list, curthread, dle); /* * Signal the garbage collector thread that there is something * to clean up. */ if (pthread_cond_signal(&_gc_cond) != 0) PANIC("Cannot signal gc cond"); /* * Avoid a race condition where a scheduling signal can occur * causing the garbage collector thread to run. If this happens, * the current thread can be cleaned out from under us. */ _thread_kern_sig_defer(); /* Unlock the garbage collector mutex: */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot unlock gc mutex"); /* Check if there is a thread joining this one: */ if (curthread->joiner != NULL) { pthread = curthread->joiner; curthread->joiner = NULL; - switch (pthread->suspended) { - case SUSP_JOIN: - /* - * The joining thread is suspended. Change the - * suspension state to make the thread runnable when it - * is resumed: - */ - pthread->suspended = SUSP_NO; - break; - case SUSP_NO: - /* Make the joining thread runnable: */ - PTHREAD_NEW_STATE(pthread, PS_RUNNING); - break; - default: - PANIC("Unreachable code reached"); - } + /* Make the joining thread runnable: */ + PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Set the return value for the joining thread: */ pthread->join_status.ret = curthread->ret; pthread->join_status.error = 0; pthread->join_status.thread = NULL; /* Make this thread collectable by the garbage collector. */ PTHREAD_ASSERT(((curthread->attr.flags & PTHREAD_DETACHED) == 0), "Cannot join a detached thread"); curthread->attr.flags |= PTHREAD_DETACHED; } /* Remove this thread from the thread list: */ TAILQ_REMOVE(&_thread_list, curthread, tle); /* This thread will never be re-scheduled. */ _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__); /* This point should not be reached. */ PANIC("Dead thread has resumed"); } diff --git a/lib/libpthread/thread/thr_init.c b/lib/libpthread/thread/thr_init.c index 2790748fd53b..74db740a07b8 100644 --- a/lib/libpthread/thread/thr_init.c +++ b/lib/libpthread/thread/thr_init.c @@ -1,486 +1,500 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* Allocate space for global thread variables here: */ #define GLOBAL_PTHREAD_PRIVATE #include "namespace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "un-namespace.h" #include "pthread_private.h" /* * All weak references used within libc should be in this table. * This will is so that static libraries will work. */ static void *references[] = { &_accept, &_bind, &_close, &_connect, &_dup, &_dup2, &_execve, &_fcntl, &_flock, &_flockfile, &_fstat, &_fstatfs, &_fsync, &_funlockfile, &_getdirentries, &_getlogin, &_getpeername, &_getsockname, &_getsockopt, &_ioctl, &_kevent, &_listen, &_nanosleep, &_open, &_pthread_getspecific, &_pthread_key_create, &_pthread_key_delete, &_pthread_mutex_destroy, &_pthread_mutex_init, &_pthread_mutex_lock, &_pthread_mutex_trylock, &_pthread_mutex_unlock, &_pthread_mutexattr_init, &_pthread_mutexattr_destroy, &_pthread_mutexattr_settype, &_pthread_once, &_pthread_setspecific, &_read, &_readv, &_recvfrom, &_recvmsg, &_select, &_sendmsg, &_sendto, &_setsockopt, &_sigaction, &_sigprocmask, &_sigsuspend, &_socket, &_socketpair, &_wait4, &_write, &_writev }; /* * These are needed when linking statically. All references within * libgcc (and in the future libc) to these routines are weak, but * if they are not (strongly) referenced by the application or other * libraries, then the actual functions will not be loaded. */ static void *libgcc_references[] = { &_pthread_once, &_pthread_key_create, &_pthread_key_delete, &_pthread_getspecific, &_pthread_setspecific, &_pthread_mutex_init, &_pthread_mutex_destroy, &_pthread_mutex_lock, &_pthread_mutex_trylock, &_pthread_mutex_unlock }; int _pthread_guard_default; int _pthread_page_size; /* * Threaded process initialization */ void _thread_init(void) { int fd; int flags; int i; size_t len; int mib[2]; int sched_stack_size; /* Size of scheduler stack. */ struct clockinfo clockinfo; struct sigaction act; _pthread_page_size = getpagesize(); _pthread_guard_default = getpagesize(); sched_stack_size = getpagesize(); pthread_attr_default.guardsize_attr = _pthread_guard_default; /* Check if this function has already been called: */ if (_thread_initial) /* Only initialise the threaded application once. */ return; /* * Make gcc quiescent about {,libgcc_}references not being * referenced: */ if ((references[0] == NULL) || (libgcc_references[0] == NULL)) PANIC("Failed loading mandatory references in _thread_init"); /* * Check for the special case of this process running as * or in place of init as pid = 1: */ if (getpid() == 1) { /* * Setup a new session for this process which is * assumed to be running as root. */ if (setsid() == -1) PANIC("Can't set session ID"); if (revoke(_PATH_CONSOLE) != 0) PANIC("Can't revoke console"); if ((fd = __sys_open(_PATH_CONSOLE, O_RDWR)) < 0) PANIC("Can't open console"); if (setlogin("root") == -1) PANIC("Can't set login to root"); - if (__sys_ioctl(fd,TIOCSCTTY, (char *) NULL) == -1) + if (__sys_ioctl(fd, TIOCSCTTY, (char *) NULL) == -1) PANIC("Can't set controlling terminal"); - if (__sys_dup2(fd,0) == -1 || - __sys_dup2(fd,1) == -1 || - __sys_dup2(fd,2) == -1) + if (__sys_dup2(fd, 0) == -1 || + __sys_dup2(fd, 1) == -1 || + __sys_dup2(fd, 2) == -1) PANIC("Can't dup2"); } /* Get the standard I/O flags before messing with them : */ - for (i = 0; i < 3; i++) + for (i = 0; i < 3; i++) { if (((_pthread_stdio_flags[i] = - __sys_fcntl(i,F_GETFL, NULL)) == -1) && + __sys_fcntl(i, F_GETFL, NULL)) == -1) && (errno != EBADF)) PANIC("Cannot get stdio flags"); + } /* * Create a pipe that is written to by the signal handler to prevent * signals being missed in calls to _select: */ if (__sys_pipe(_thread_kern_pipe) != 0) { /* Cannot create pipe, so abort: */ PANIC("Cannot create kernel pipe"); } + + /* + * Make sure the pipe does not get in the way of stdio: + */ + for (i = 0; i < 2; i++) { + if (_thread_kern_pipe[i] < 3) { + fd = __sys_fcntl(_thread_kern_pipe[i], F_DUPFD, 3); + if (fd == -1) + PANIC("Cannot create kernel pipe"); + __sys_close(_thread_kern_pipe[i]); + _thread_kern_pipe[i] = fd; + } + } /* Get the flags for the read pipe: */ - else if ((flags = __sys_fcntl(_thread_kern_pipe[0], F_GETFL, NULL)) == -1) { + if ((flags = __sys_fcntl(_thread_kern_pipe[0], F_GETFL, NULL)) == -1) { /* Abort this application: */ PANIC("Cannot get kernel read pipe flags"); } /* Make the read pipe non-blocking: */ else if (__sys_fcntl(_thread_kern_pipe[0], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ PANIC("Cannot make kernel read pipe non-blocking"); } /* Get the flags for the write pipe: */ else if ((flags = __sys_fcntl(_thread_kern_pipe[1], F_GETFL, NULL)) == -1) { /* Abort this application: */ PANIC("Cannot get kernel write pipe flags"); } /* Make the write pipe non-blocking: */ else if (__sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ PANIC("Cannot get kernel write pipe flags"); } /* Allocate and initialize the ready queue: */ else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) != 0) { /* Abort this application: */ PANIC("Cannot allocate priority ready queue."); } /* Allocate memory for the thread structure of the initial thread: */ else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) { /* * Insufficient memory to initialise this application, so * abort: */ PANIC("Cannot allocate memory for initial thread"); } /* Allocate memory for the scheduler stack: */ else if ((_thread_kern_sched_stack = malloc(sched_stack_size)) == NULL) PANIC("Failed to allocate stack for scheduler"); else { /* Zero the global kernel thread structure: */ memset(&_thread_kern_thread, 0, sizeof(struct pthread)); _thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE; memset(_thread_initial, 0, sizeof(struct pthread)); /* Initialize the waiting and work queues: */ TAILQ_INIT(&_waitingq); TAILQ_INIT(&_workq); /* Initialize the scheduling switch hook routine: */ _sched_switch_hook = NULL; /* Give this thread default attributes: */ memcpy((void *) &_thread_initial->attr, &pthread_attr_default, sizeof(struct pthread_attr)); /* Find the stack top */ mib[0] = CTL_KERN; mib[1] = KERN_USRSTACK; len = sizeof (_usrstack); if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1) _usrstack = (void *)USRSTACK; /* * Create a red zone below the main stack. All other stacks are * constrained to a maximum size by the paramters passed to * mmap(), but this stack is only limited by resource limits, so * this stack needs an explicitly mapped red zone to protect the * thread stack that is just beyond. */ if (mmap(_usrstack - PTHREAD_STACK_INITIAL - _pthread_guard_default, _pthread_guard_default, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* Set the main thread stack pointer. */ _thread_initial->stack = _usrstack - PTHREAD_STACK_INITIAL; /* Set the stack attributes: */ _thread_initial->attr.stackaddr_attr = _thread_initial->stack; _thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL; /* Setup the context for the scheduler: */ _setjmp(_thread_kern_sched_jb); SET_STACK_JB(_thread_kern_sched_jb, _thread_kern_sched_stack + sched_stack_size - sizeof(double)); SET_RETURN_ADDR_JB(_thread_kern_sched_jb, _thread_kern_scheduler); /* * Write a magic value to the thread structure * to help identify valid ones: */ _thread_initial->magic = PTHREAD_MAGIC; /* Set the initial cancel state */ _thread_initial->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; /* Default the priority of the initial thread: */ _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY; _thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY; _thread_initial->inherited_priority = 0; /* Initialise the state of the initial thread: */ _thread_initial->state = PS_RUNNING; /* Set the name of the thread: */ _thread_initial->name = strdup("_thread_initial"); /* Initialize joiner to NULL (no joiner): */ _thread_initial->joiner = NULL; /* Initialize the owned mutex queue and count: */ TAILQ_INIT(&(_thread_initial->mutexq)); _thread_initial->priority_mutex_count = 0; /* Initialize the global scheduling time: */ _sched_ticks = 0; gettimeofday((struct timeval *) &_sched_tod, NULL); /* Initialize last active: */ _thread_initial->last_active = (long) _sched_ticks; /* Initialize the initial context: */ _thread_initial->curframe = NULL; /* Initialise the rest of the fields: */ _thread_initial->poll_data.nfds = 0; _thread_initial->poll_data.fds = NULL; _thread_initial->sig_defer_count = 0; _thread_initial->yield_on_sig_undefer = 0; _thread_initial->specific = NULL; _thread_initial->cleanup = NULL; _thread_initial->flags = 0; _thread_initial->error = 0; TAILQ_INIT(&_thread_list); TAILQ_INSERT_HEAD(&_thread_list, _thread_initial, tle); _set_curthread(_thread_initial); /* Initialise the global signal action structure: */ sigfillset(&act.sa_mask); act.sa_handler = (void (*) ()) _thread_sig_handler; act.sa_flags = SA_SIGINFO | SA_ONSTACK; /* Clear pending signals for the process: */ sigemptyset(&_process_sigpending); /* Clear the signal queue: */ memset(_thread_sigq, 0, sizeof(_thread_sigq)); /* Enter a loop to get the existing signal status: */ for (i = 1; i < NSIG; i++) { /* Check for signals which cannot be trapped: */ if (i == SIGKILL || i == SIGSTOP) { } /* Get the signal handler details: */ else if (__sys_sigaction(i, NULL, &_thread_sigact[i - 1]) != 0) { /* * Abort this process if signal * initialisation fails: */ PANIC("Cannot read signal handler info"); } /* Initialize the SIG_DFL dummy handler count. */ _thread_dfl_count[i] = 0; } /* * Install the signal handler for the most important * signals that the user-thread kernel needs. Actually * SIGINFO isn't really needed, but it is nice to have. */ if (__sys_sigaction(_SCHED_SIGNAL, &act, NULL) != 0 || __sys_sigaction(SIGINFO, &act, NULL) != 0 || __sys_sigaction(SIGCHLD, &act, NULL) != 0) { /* * Abort this process if signal initialisation fails: */ PANIC("Cannot initialise signal handler"); } _thread_sigact[_SCHED_SIGNAL - 1].sa_flags = SA_SIGINFO; _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO; _thread_sigact[SIGCHLD - 1].sa_flags = SA_SIGINFO; /* Get the process signal mask: */ __sys_sigprocmask(SIG_SETMASK, NULL, &_process_sigmask); /* Get the kernel clockrate: */ mib[0] = CTL_KERN; mib[1] = KERN_CLOCKRATE; len = sizeof (struct clockinfo); if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0) _clock_res_usec = clockinfo.tick > CLOCK_RES_USEC_MIN ? clockinfo.tick : CLOCK_RES_USEC_MIN; /* Get the table size: */ if ((_thread_dtablesize = getdtablesize()) < 0) { /* * Cannot get the system defined table size, so abort * this process. */ PANIC("Cannot get dtablesize"); } /* Allocate memory for the file descriptor table: */ if ((_thread_fd_table = (struct fd_table_entry **) malloc(sizeof(struct fd_table_entry *) * _thread_dtablesize)) == NULL) { /* Avoid accesses to file descriptor table on exit: */ _thread_dtablesize = 0; /* * Cannot allocate memory for the file descriptor * table, so abort this process. */ PANIC("Cannot allocate memory for file descriptor table"); } /* Allocate memory for the pollfd table: */ if ((_thread_pfd_table = (struct pollfd *) malloc(sizeof(struct pollfd) * _thread_dtablesize)) == NULL) { /* * Cannot allocate memory for the file descriptor * table, so abort this process. */ PANIC("Cannot allocate memory for pollfd table"); } else { /* * Enter a loop to initialise the file descriptor * table: */ for (i = 0; i < _thread_dtablesize; i++) { /* Initialise the file descriptor table: */ _thread_fd_table[i] = NULL; } /* Initialize stdio file descriptor table entries: */ for (i = 0; i < 3; i++) { if ((_thread_fd_table_init(i) != 0) && (errno != EBADF)) PANIC("Cannot initialize stdio file " "descriptor table entry"); } } } /* Initialise the garbage collector mutex and condition variable. */ if (_pthread_mutex_init(&_gc_mutex,NULL) != 0 || pthread_cond_init(&_gc_cond,NULL) != 0) PANIC("Failed to initialise garbage collector mutex or condvar"); } /* * Special start up code for NetBSD/Alpha */ #if defined(__NetBSD__) && defined(__alpha__) int main(int argc, char *argv[], char *env); int _thread_main(int argc, char *argv[], char *env) { _thread_init(); return (main(argc, argv, env)); } #endif diff --git a/lib/libpthread/thread/thr_multi_np.c b/lib/libpthread/thread/thr_multi_np.c index c1a069f11ce5..bd42365621a6 100644 --- a/lib/libpthread/thread/thr_multi_np.c +++ b/lib/libpthread/thread/thr_multi_np.c @@ -1,46 +1,50 @@ /* * Copyright (c) 1996 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ -#include #include -#include "pthread_private.h" +#include __weak_reference(_pthread_multi_np, pthread_multi_np); int _pthread_multi_np() { + /* Return to multi-threaded scheduling mode: */ - _thread_single = NULL; - return(0); + /* + * XXX - Do we want to do this? + * __is_threaded = 1; + */ + pthread_resume_all_np(); + return (0); } diff --git a/lib/libpthread/thread/thr_mutex.c b/lib/libpthread/thread/thr_mutex.c index 0f67b4b01965..86e0b8bf324c 100644 --- a/lib/libpthread/thread/thr_mutex.c +++ b/lib/libpthread/thread/thr_mutex.c @@ -1,1576 +1,1544 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include "pthread_private.h" #if defined(_PTHREADS_INVARIANTS) #define _MUTEX_INIT_LINK(m) do { \ (m)->m_qe.tqe_prev = NULL; \ (m)->m_qe.tqe_next = NULL; \ } while (0) #define _MUTEX_ASSERT_IS_OWNED(m) do { \ if ((m)->m_qe.tqe_prev == NULL) \ PANIC("mutex is not on list"); \ } while (0) #define _MUTEX_ASSERT_NOT_OWNED(m) do { \ if (((m)->m_qe.tqe_prev != NULL) || \ ((m)->m_qe.tqe_next != NULL)) \ PANIC("mutex is on list"); \ } while (0) #else #define _MUTEX_INIT_LINK(m) #define _MUTEX_ASSERT_IS_OWNED(m) #define _MUTEX_ASSERT_NOT_OWNED(m) #endif /* * Prototypes */ static inline int mutex_self_trylock(pthread_mutex_t); static inline int mutex_self_lock(pthread_mutex_t); static inline int mutex_unlock_common(pthread_mutex_t *, int); static void mutex_priority_adjust(pthread_mutex_t); static void mutex_rescan_owned (pthread_t, pthread_mutex_t); static inline pthread_t mutex_queue_deq(pthread_mutex_t); static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; static struct pthread_mutex_attr static_mutex_attr = PTHREAD_MUTEXATTR_STATIC_INITIALIZER; static pthread_mutexattr_t static_mattr = &static_mutex_attr; /* Single underscore versions provided for libc internal usage: */ __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); /* No difference between libc and application usage of these: */ __weak_reference(_pthread_mutex_init, pthread_mutex_init); __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); /* Reinitialize a mutex to defaults. */ int _mutex_reinit(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; else if (*mutex == NULL) ret = pthread_mutex_init(mutex, NULL); else { /* * Initialize the mutex structure: */ (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT; (*mutex)->m_protocol = PTHREAD_PRIO_NONE; TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_owner = NULL; (*mutex)->m_data.m_count = 0; (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE; (*mutex)->m_flags |= MUTEX_FLAGS_INITED; (*mutex)->m_refcount = 0; (*mutex)->m_prio = 0; (*mutex)->m_saved_prio = 0; _MUTEX_INIT_LINK(*mutex); memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock)); } return (ret); } int _pthread_mutex_init(pthread_mutex_t * mutex, const pthread_mutexattr_t * mutex_attr) { enum pthread_mutextype type; int protocol; int ceiling; int flags; pthread_mutex_t pmutex; int ret = 0; if (mutex == NULL) ret = EINVAL; /* Check if default mutex attributes: */ else if (mutex_attr == NULL || *mutex_attr == NULL) { /* Default to a (error checking) POSIX mutex: */ type = PTHREAD_MUTEX_ERRORCHECK; protocol = PTHREAD_PRIO_NONE; ceiling = PTHREAD_MAX_PRIORITY; flags = 0; } /* Check mutex type: */ else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) /* Return an invalid argument error: */ ret = EINVAL; /* Check mutex protocol: */ else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) /* Return an invalid argument error: */ ret = EINVAL; else { /* Use the requested mutex type and protocol: */ type = (*mutex_attr)->m_type; protocol = (*mutex_attr)->m_protocol; ceiling = (*mutex_attr)->m_ceiling; flags = (*mutex_attr)->m_flags; } /* Check no errors so far: */ if (ret == 0) { if ((pmutex = (pthread_mutex_t) malloc(sizeof(struct pthread_mutex))) == NULL) ret = ENOMEM; else { /* Set the mutex flags: */ pmutex->m_flags = flags; /* Process according to mutex type: */ switch (type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: /* Nothing to do here. */ break; /* Single UNIX Spec 2 recursive mutex: */ case PTHREAD_MUTEX_RECURSIVE: /* Reset the mutex count: */ pmutex->m_data.m_count = 0; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } if (ret == 0) { /* Initialise the rest of the mutex: */ TAILQ_INIT(&pmutex->m_queue); pmutex->m_flags |= MUTEX_FLAGS_INITED; pmutex->m_owner = NULL; pmutex->m_type = type; pmutex->m_protocol = protocol; pmutex->m_refcount = 0; if (protocol == PTHREAD_PRIO_PROTECT) pmutex->m_prio = ceiling; else pmutex->m_prio = 0; pmutex->m_saved_prio = 0; _MUTEX_INIT_LINK(pmutex); memset(&pmutex->lock, 0, sizeof(pmutex->lock)); *mutex = pmutex; } else { free(pmutex); *mutex = NULL; } } } /* Return the completion status: */ return(ret); } int _pthread_mutex_destroy(pthread_mutex_t * mutex) { int ret = 0; if (mutex == NULL || *mutex == NULL) ret = EINVAL; else { /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * Check to see if this mutex is in use: */ if (((*mutex)->m_owner != NULL) || (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || ((*mutex)->m_refcount != 0)) { ret = EBUSY; /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); } else { /* * Free the memory allocated for the mutex * structure: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); free(*mutex); /* * Leave the caller's pointer NULL now that * the mutex has been destroyed: */ *mutex = NULL; } } /* Return the completion status: */ return (ret); } static int init_static(pthread_mutex_t *mutex) { int ret; _SPINLOCK(&static_init_lock); if (*mutex == NULL) ret = pthread_mutex_init(mutex, NULL); else ret = 0; _SPINUNLOCK(&static_init_lock); return(ret); } static int init_static_private(pthread_mutex_t *mutex) { int ret; _SPINLOCK(&static_init_lock); if (*mutex == NULL) ret = pthread_mutex_init(mutex, &static_mattr); else ret = 0; _SPINUNLOCK(&static_init_lock); return(ret); } static int mutex_trylock_common(pthread_mutex_t *mutex) { struct pthread *curthread = _get_curthread(); int ret = 0; PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), "Uninitialized mutex in pthread_mutex_trylock_basic"); /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); _MUTEX_INIT_LINK(*mutex); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; } /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The mutex takes on the attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = curthread->active_priority; (*mutex)->m_saved_prio = curthread->inherited_priority; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (curthread->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority. */ curthread->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); /* Return the completion status: */ return (ret); } int __pthread_mutex_trylock(pthread_mutex_t *mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization: */ else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0) ret = mutex_trylock_common(mutex); return (ret); } int _pthread_mutex_trylock(pthread_mutex_t *mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization marking the mutex private (delete safe): */ else if ((*mutex != NULL) || (ret = init_static_private(mutex)) == 0) ret = mutex_trylock_common(mutex); return (ret); } static int mutex_lock_common(pthread_mutex_t * mutex) { struct pthread *curthread = _get_curthread(); int ret = 0; PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), "Uninitialized mutex in pthread_mutex_trylock_basic"); /* Reset the interrupted flag: */ curthread->interrupted = 0; /* * Enter a loop waiting to become the mutex owner. We need a * loop in case the waiting thread is interrupted by a signal * to execute a signal handler. It is not (currently) possible * to remain in the waiting queue while running a handler. * Instead, the thread is interrupted and backed out of the * waiting queue prior to executing the signal handler. */ do { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; _MUTEX_INIT_LINK(*mutex); } /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = curthread; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The mutex takes on attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = curthread->active_priority; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; if (curthread->active_priority > (*mutex)->m_prio) /* Adjust priorities: */ mutex_priority_adjust(*mutex); /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (curthread->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* * Lock the mutex for the running * thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority: */ curthread->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; /* Clear any previous error: */ curthread->error = 0; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); /* * The threads priority may have changed while * waiting for the mutex causing a ceiling * violation. */ ret = curthread->error; curthread->error = 0; } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* * Check to see if this thread was interrupted and * is still in the mutex queue of waiting threads: */ if (curthread->interrupted != 0) mutex_queue_remove(*mutex, curthread); /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } while (((*mutex)->m_owner != curthread) && (ret == 0) && (curthread->interrupted == 0)); if (curthread->interrupted != 0 && curthread->continuation != NULL) curthread->continuation((void *) curthread); /* Return the completion status: */ return (ret); } int __pthread_mutex_lock(pthread_mutex_t *mutex) { int ret = 0; if (_thread_initial == NULL) _thread_init(); if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization: */ else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0)) ret = mutex_lock_common(mutex); return (ret); } int _pthread_mutex_lock(pthread_mutex_t *mutex) { int ret = 0; if (_thread_initial == NULL) _thread_init(); if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization marking it private (delete safe): */ else if ((*mutex != NULL) || ((ret = init_static_private(mutex)) == 0)) ret = mutex_lock_common(mutex); return (ret); } int _pthread_mutex_unlock(pthread_mutex_t * mutex) { return (mutex_unlock_common(mutex, /* add reference */ 0)); } int _mutex_cv_unlock(pthread_mutex_t * mutex) { return (mutex_unlock_common(mutex, /* add reference */ 1)); } int _mutex_cv_lock(pthread_mutex_t * mutex) { int ret; if ((ret = pthread_mutex_lock(mutex)) == 0) (*mutex)->m_refcount--; return (ret); } static inline int mutex_self_trylock(pthread_mutex_t mutex) { int ret = 0; switch (mutex->m_type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: /* * POSIX specifies that mutexes should return EDEADLK if a * recursive lock is detected. */ ret = EBUSY; break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ mutex->m_data.m_count++; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return(ret); } static inline int mutex_self_lock(pthread_mutex_t mutex) { int ret = 0; switch (mutex->m_type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: /* * POSIX specifies that mutexes should return EDEADLK if a * recursive lock is detected. */ ret = EDEADLK; break; case PTHREAD_MUTEX_NORMAL: /* * What SS2 define as a 'normal' mutex. Intentionally * deadlock on attempts to get a lock you already own. */ _thread_kern_sched_state_unlock(PS_DEADLOCK, &mutex->lock, __FILE__, __LINE__); break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ mutex->m_data.m_count++; break; default: /* Trap invalid mutex types; */ ret = EINVAL; } return(ret); } static inline int mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) { struct pthread *curthread = _get_curthread(); int ret = 0; if (mutex == NULL || *mutex == NULL) { ret = EINVAL; } else { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != curthread) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Get the next thread from the queue of * threads waiting on the mutex: */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) { - /* - * Unless the new owner of the mutex is - * currently suspended, allow the owner - * to run. If the thread is suspended, - * make a note that the thread isn't in - * a wait queue any more. - */ - if (((*mutex)->m_owner->state != - PS_SUSPENDED)) { - PTHREAD_NEW_STATE((*mutex)->m_owner, - PS_RUNNING); - } else { - (*mutex)->m_owner->suspended = - SUSP_NOWAIT; - } + /* Make the new owner runnable: */ + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); /* * Add the mutex to the threads list of * owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; } } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != curthread) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* * Restore the threads inherited priority and * recompute the active priority (being careful * not to override changes in the threads base * priority subsequent to locking the mutex). */ curthread->inherited_priority = (*mutex)->m_saved_prio; curthread->active_priority = MAX(curthread->inherited_priority, curthread->base_priority); /* * This thread now owns one less priority mutex. */ curthread->priority_mutex_count--; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Get the next thread from the queue of threads * waiting on the mutex: */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) == NULL) /* This mutex has no priority. */ (*mutex)->m_prio = 0; else { /* * Track number of priority mutexes owned: */ (*mutex)->m_owner->priority_mutex_count++; /* * Add the mutex to the threads list * of owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; /* * Set the priority of the mutex. Since * our waiting threads are in descending * priority order, the priority of the * mutex becomes the active priority of * the thread we just dequeued. */ (*mutex)->m_prio = (*mutex)->m_owner->active_priority; /* * Save the owning threads inherited * priority: */ (*mutex)->m_saved_prio = (*mutex)->m_owner->inherited_priority; /* * The owning threads inherited priority * now becomes his active priority (the * priority of the mutex). */ (*mutex)->m_owner->inherited_priority = (*mutex)->m_prio; /* - * Unless the new owner of the mutex is - * currently suspended, allow the owner - * to run. If the thread is suspended, - * make a note that the thread isn't in - * a wait queue any more. + * Make the new owner runnable: */ - if (((*mutex)->m_owner->state != - PS_SUSPENDED)) { - PTHREAD_NEW_STATE((*mutex)->m_owner, - PS_RUNNING); - } else { - (*mutex)->m_owner->suspended = - SUSP_NOWAIT; - } + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); } } break; /* POSIX priority ceiling mutex: */ case PTHREAD_PRIO_PROTECT: /* * Check if the running thread is not the owner of the * mutex: */ if ((*mutex)->m_owner != curthread) { /* * Return an invalid argument error for no * owner and a permission error otherwise: */ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; } else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && ((*mutex)->m_data.m_count > 0)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { /* * Clear the count in case this is recursive * mutex. */ (*mutex)->m_data.m_count = 0; /* * Restore the threads inherited priority and * recompute the active priority (being careful * not to override changes in the threads base * priority subsequent to locking the mutex). */ curthread->inherited_priority = (*mutex)->m_saved_prio; curthread->active_priority = MAX(curthread->inherited_priority, curthread->base_priority); /* * This thread now owns one less priority mutex. */ curthread->priority_mutex_count--; /* Remove the mutex from the threads queue. */ _MUTEX_ASSERT_IS_OWNED(*mutex); TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); _MUTEX_INIT_LINK(*mutex); /* * Enter a loop to find a waiting thread whose * active priority will not cause a ceiling * violation: */ while ((((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) && ((*mutex)->m_owner->active_priority > (*mutex)->m_prio)) { /* * Either the mutex ceiling priority * been lowered and/or this threads * priority has been raised subsequent * to this thread being queued on the * waiting list. */ (*mutex)->m_owner->error = EINVAL; PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); /* * The thread is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; } /* Check for a new owner: */ if ((*mutex)->m_owner != NULL) { /* * Track number of priority mutexes owned: */ (*mutex)->m_owner->priority_mutex_count++; /* * Add the mutex to the threads list * of owned mutexes: */ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, (*mutex), m_qe); /* * The owner is no longer waiting for * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; /* * Save the owning threads inherited * priority: */ (*mutex)->m_saved_prio = (*mutex)->m_owner->inherited_priority; /* * The owning thread inherits the * ceiling priority of the mutex and * executes at that priority: */ (*mutex)->m_owner->inherited_priority = (*mutex)->m_prio; (*mutex)->m_owner->active_priority = (*mutex)->m_prio; /* - * Unless the new owner of the mutex is - * currently suspended, allow the owner - * to run. If the thread is suspended, - * make a note that the thread isn't in - * a wait queue any more. + * Make the new owner runnable: */ - if (((*mutex)->m_owner->state != - PS_SUSPENDED)) { - PTHREAD_NEW_STATE((*mutex)->m_owner, - PS_RUNNING); - } else { - (*mutex)->m_owner->suspended = - SUSP_NOWAIT; - } + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); } } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } if ((ret == 0) && (add_reference != 0)) { /* Increment the reference count: */ (*mutex)->m_refcount++; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (ret); } /* * This function is called when a change in base priority occurs for * a thread that is holding or waiting for a priority protection or * inheritence mutex. A change in a threads base priority can effect * changes to active priorities of other threads and to the ordering * of mutex locking by waiting threads. * * This must be called while thread scheduling is deferred. */ void _mutex_notify_priochange(pthread_t pthread) { /* Adjust the priorites of any owned priority mutexes: */ if (pthread->priority_mutex_count > 0) { /* * Rescan the mutexes owned by this thread and correct * their priorities to account for this threads change * in priority. This has the side effect of changing * the threads active priority. */ mutex_rescan_owned(pthread, /* rescan all owned */ NULL); } /* * If this thread is waiting on a priority inheritence mutex, * check for priority adjustments. A change in priority can * also effect a ceiling violation(*) for a thread waiting on * a priority protection mutex; we don't perform the check here * as it is done in pthread_mutex_unlock. * * (*) It should be noted that a priority change to a thread * _after_ taking and owning a priority ceiling mutex * does not affect ownership of that mutex; the ceiling * priority is only checked before mutex ownership occurs. */ if (pthread->state == PS_MUTEX_WAIT) { /* Lock the mutex structure: */ _SPINLOCK(&pthread->data.mutex->lock); /* * Check to make sure this thread is still in the same state * (the spinlock above can yield the CPU to another thread): */ if (pthread->state == PS_MUTEX_WAIT) { /* * Remove and reinsert this thread into the list of * waiting threads to preserve decreasing priority * order. */ mutex_queue_remove(pthread->data.mutex, pthread); mutex_queue_enq(pthread->data.mutex, pthread); if (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT) { /* Adjust priorities: */ mutex_priority_adjust(pthread->data.mutex); } } /* Unlock the mutex structure: */ _SPINUNLOCK(&pthread->data.mutex->lock); } } /* * Called when a new thread is added to the mutex waiting queue or * when a threads priority changes that is already in the mutex * waiting queue. */ static void mutex_priority_adjust(pthread_mutex_t mutex) { pthread_t pthread_next, pthread = mutex->m_owner; int temp_prio; pthread_mutex_t m = mutex; /* * Calculate the mutex priority as the maximum of the highest * active priority of any waiting threads and the owning threads * active priority(*). * * (*) Because the owning threads current active priority may * reflect priority inherited from this mutex (and the mutex * priority may have changed) we must recalculate the active * priority based on the threads saved inherited priority * and its base priority. */ pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ temp_prio = MAX(pthread_next->active_priority, MAX(m->m_saved_prio, pthread->base_priority)); /* See if this mutex really needs adjusting: */ if (temp_prio == m->m_prio) /* No need to propagate the priority: */ return; /* Set new priority of the mutex: */ m->m_prio = temp_prio; while (m != NULL) { /* * Save the threads priority before rescanning the * owned mutexes: */ temp_prio = pthread->active_priority; /* * Fix the priorities for all the mutexes this thread has * locked since taking this mutex. This also has a * potential side-effect of changing the threads priority. */ mutex_rescan_owned(pthread, m); /* * If the thread is currently waiting on a mutex, check * to see if the threads new priority has affected the * priority of the mutex. */ if ((temp_prio != pthread->active_priority) && (pthread->state == PS_MUTEX_WAIT) && (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) { /* Grab the mutex this thread is waiting on: */ m = pthread->data.mutex; /* * The priority for this thread has changed. Remove * and reinsert this thread into the list of waiting * threads to preserve decreasing priority order. */ mutex_queue_remove(m, pthread); mutex_queue_enq(m, pthread); /* Grab the waiting thread with highest priority: */ pthread_next = TAILQ_FIRST(&m->m_queue); /* * Calculate the mutex priority as the maximum of the * highest active priority of any waiting threads and * the owning threads active priority. */ temp_prio = MAX(pthread_next->active_priority, MAX(m->m_saved_prio, m->m_owner->base_priority)); if (temp_prio != m->m_prio) { /* * The priority needs to be propagated to the * mutex this thread is waiting on and up to * the owner of that mutex. */ m->m_prio = temp_prio; pthread = m->m_owner; } else /* We're done: */ m = NULL; } else /* We're done: */ m = NULL; } } static void mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex) { int active_prio, inherited_prio; pthread_mutex_t m; pthread_t pthread_next; /* * Start walking the mutexes the thread has taken since * taking this mutex. */ if (mutex == NULL) { /* * A null mutex means start at the beginning of the owned * mutex list. */ m = TAILQ_FIRST(&pthread->mutexq); /* There is no inherited priority yet. */ inherited_prio = 0; } else { /* * The caller wants to start after a specific mutex. It * is assumed that this mutex is a priority inheritence * mutex and that its priority has been correctly * calculated. */ m = TAILQ_NEXT(mutex, m_qe); /* Start inheriting priority from the specified mutex. */ inherited_prio = mutex->m_prio; } active_prio = MAX(inherited_prio, pthread->base_priority); while (m != NULL) { /* * We only want to deal with priority inheritence * mutexes. This might be optimized by only placing * priority inheritence mutexes into the owned mutex * list, but it may prove to be useful having all * owned mutexes in this list. Consider a thread * exiting while holding mutexes... */ if (m->m_protocol == PTHREAD_PRIO_INHERIT) { /* * Fix the owners saved (inherited) priority to * reflect the priority of the previous mutex. */ m->m_saved_prio = inherited_prio; if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) /* Recalculate the priority of the mutex: */ m->m_prio = MAX(active_prio, pthread_next->active_priority); else m->m_prio = active_prio; /* Recalculate new inherited and active priorities: */ inherited_prio = m->m_prio; active_prio = MAX(m->m_prio, pthread->base_priority); } /* Advance to the next mutex owned by this thread: */ m = TAILQ_NEXT(m, m_qe); } /* * Fix the threads inherited priority and recalculate its * active priority. */ pthread->inherited_priority = inherited_prio; active_prio = MAX(inherited_prio, pthread->base_priority); if (active_prio != pthread->active_priority) { /* * If this thread is in the priority queue, it must be * removed and reinserted for its new priority. */ if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) { /* * Remove the thread from the priority queue * before changing its priority: */ PTHREAD_PRIOQ_REMOVE(pthread); /* * POSIX states that if the priority is being * lowered, the thread must be inserted at the * head of the queue for its priority if it owns * any priority protection or inheritence mutexes. */ if ((active_prio < pthread->active_priority) && (pthread->priority_mutex_count > 0)) { /* Set the new active priority. */ pthread->active_priority = active_prio; PTHREAD_PRIOQ_INSERT_HEAD(pthread); } else { /* Set the new active priority. */ pthread->active_priority = active_prio; PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } else { /* Set the new active priority. */ pthread->active_priority = active_prio; } } } void _mutex_unlock_private(pthread_t pthread) { struct pthread_mutex *m, *m_next; for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { m_next = TAILQ_NEXT(m, m_qe); if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) pthread_mutex_unlock(&m); } } void _mutex_lock_backout(pthread_t pthread) { struct pthread_mutex *mutex; /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { mutex = pthread->data.mutex; /* Lock the mutex structure: */ _SPINLOCK(&mutex->lock); mutex_queue_remove(mutex, pthread); /* This thread is no longer waiting for the mutex: */ pthread->data.mutex = NULL; /* Unlock the mutex structure: */ _SPINUNLOCK(&mutex->lock); } /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* * Dequeue a waiting thread from the head of a mutex queue in descending * priority order. */ static inline pthread_t mutex_queue_deq(pthread_mutex_t mutex) { pthread_t pthread; while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; /* * Only exit the loop if the thread hasn't been * cancelled. */ if (pthread->interrupted == 0) break; } return(pthread); } /* * Remove a waiting thread from a mutex queue in descending priority order. */ static inline void mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) { if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; } } /* * Enqueue a waiting thread to a queue in descending priority order. */ static inline void mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) { pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); /* * For the common case of all threads having equal priority, * we perform a quick check against the priority of the thread * at the tail of the queue. */ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); else { tid = TAILQ_FIRST(&mutex->m_queue); while (pthread->active_priority <= tid->active_priority) tid = TAILQ_NEXT(tid, sqe); TAILQ_INSERT_BEFORE(tid, pthread, sqe); } pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; } diff --git a/lib/libpthread/thread/thr_priority_queue.c b/lib/libpthread/thread/thr_priority_queue.c index 55d742b9297a..b700d97f7955 100644 --- a/lib/libpthread/thread/thr_priority_queue.c +++ b/lib/libpthread/thread/thr_priority_queue.c @@ -1,337 +1,370 @@ /* * Copyright (c) 1998 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Daniel Eischen. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "pthread_private.h" /* Prototypes: */ static void pq_insert_prio_list(pq_queue_t *pq, int prio); #if defined(_PTHREADS_INVARIANTS) static int _pq_active = 0; #define _PQ_IN_SCHEDQ (PTHREAD_FLAGS_IN_PRIOQ | PTHREAD_FLAGS_IN_WAITQ | PTHREAD_FLAGS_IN_WORKQ) #define _PQ_SET_ACTIVE() _pq_active = 1 #define _PQ_CLEAR_ACTIVE() _pq_active = 0 #define _PQ_ASSERT_ACTIVE(msg) do { \ if (_pq_active == 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_INACTIVE(msg) do { \ if (_pq_active != 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_IN_WAITQ(thrd, msg) do { \ if (((thrd)->flags & PTHREAD_FLAGS_IN_WAITQ) == 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_IN_PRIOQ(thrd, msg) do { \ if (((thrd)->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \ if (((thrd)->flags & _PQ_IN_SCHEDQ) != 0) \ PANIC(msg); \ } while (0) #define _PQ_ASSERT_PROTECTED(msg) \ PTHREAD_ASSERT((_thread_kern_in_sched != 0) || \ ((_get_curthread())->sig_defer_count > 0) ||\ (_sig_in_handler != 0), msg); #else #define _PQ_SET_ACTIVE() #define _PQ_CLEAR_ACTIVE() #define _PQ_ASSERT_ACTIVE(msg) #define _PQ_ASSERT_INACTIVE(msg) #define _PQ_ASSERT_IN_WAITQ(thrd, msg) #define _PQ_ASSERT_IN_PRIOQ(thrd, msg) #define _PQ_ASSERT_NOT_QUEUED(thrd, msg) #define _PQ_ASSERT_PROTECTED(msg) #endif int _pq_alloc(pq_queue_t *pq, int minprio, int maxprio) { int ret = 0; int prioslots = maxprio - minprio + 1; if (pq == NULL) ret = -1; /* Create the priority queue with (maxprio - minprio + 1) slots: */ else if ((pq->pq_lists = (pq_list_t *) malloc(sizeof(pq_list_t) * prioslots)) == NULL) ret = -1; else { /* Remember the queue size: */ pq->pq_size = prioslots; ret = _pq_init(pq); } return (ret); } int _pq_init(pq_queue_t *pq) { int i, ret = 0; if ((pq == NULL) || (pq->pq_lists == NULL)) ret = -1; else { /* Initialize the queue for each priority slot: */ for (i = 0; i < pq->pq_size; i++) { TAILQ_INIT(&pq->pq_lists[i].pl_head); pq->pq_lists[i].pl_prio = i; pq->pq_lists[i].pl_queued = 0; } /* Initialize the priority queue: */ TAILQ_INIT(&pq->pq_queue); _PQ_CLEAR_ACTIVE(); } return (ret); } void _pq_remove(pq_queue_t *pq, pthread_t pthread) { int prio = pthread->active_priority; /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_INACTIVE("_pq_remove: pq_active"); _PQ_SET_ACTIVE(); _PQ_ASSERT_IN_PRIOQ(pthread, "_pq_remove: Not in priority queue"); _PQ_ASSERT_PROTECTED("_pq_remove: prioq not protected!"); /* * Remove this thread from priority list. Note that if * the priority list becomes empty, it is not removed * from the priority queue because another thread may be * added to the priority list (resulting in a needless * removal/insertion). Priority lists are only removed * from the priority queue when _pq_first is called. */ TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe); /* This thread is now longer in the priority queue. */ pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ; _PQ_CLEAR_ACTIVE(); } void _pq_insert_head(pq_queue_t *pq, pthread_t pthread) { - int prio = pthread->active_priority; + int prio; /* - * Make some assertions when debugging is enabled: + * Don't insert suspended threads into the priority queue. + * The caller is responsible for setting the threads state. */ - _PQ_ASSERT_INACTIVE("_pq_insert_head: pq_active"); - _PQ_SET_ACTIVE(); - _PQ_ASSERT_NOT_QUEUED(pthread, - "_pq_insert_head: Already in priority queue"); - _PQ_ASSERT_PROTECTED("_pq_insert_head: prioq not protected!"); - - TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe); - if (pq->pq_lists[prio].pl_queued == 0) - /* Insert the list into the priority queue: */ - pq_insert_prio_list(pq, prio); - - /* Mark this thread as being in the priority queue. */ - pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ; + if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) { + /* Make sure the threads state is suspended. */ + if (pthread->state != PS_SUSPENDED) + PTHREAD_SET_STATE(pthread, PS_SUSPENDED); + } else { + /* + * Make some assertions when debugging is enabled: + */ + _PQ_ASSERT_INACTIVE("_pq_insert_head: pq_active"); + _PQ_SET_ACTIVE(); + _PQ_ASSERT_NOT_QUEUED(pthread, + "_pq_insert_head: Already in priority queue"); + _PQ_ASSERT_PROTECTED("_pq_insert_head: prioq not protected!"); + + prio = pthread->active_priority; + TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe); + if (pq->pq_lists[prio].pl_queued == 0) + /* Insert the list into the priority queue: */ + pq_insert_prio_list(pq, prio); + + /* Mark this thread as being in the priority queue. */ + pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ; - _PQ_CLEAR_ACTIVE(); + _PQ_CLEAR_ACTIVE(); + } } void _pq_insert_tail(pq_queue_t *pq, pthread_t pthread) { - int prio = pthread->active_priority; + int prio; /* - * Make some assertions when debugging is enabled: + * Don't insert suspended threads into the priority queue. + * The caller is responsible for setting the threads state. */ - _PQ_ASSERT_INACTIVE("_pq_insert_tail: pq_active"); - _PQ_SET_ACTIVE(); - _PQ_ASSERT_NOT_QUEUED(pthread, - "_pq_insert_tail: Already in priority queue"); - _PQ_ASSERT_PROTECTED("_pq_insert_tail: prioq not protected!"); - - TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe); - if (pq->pq_lists[prio].pl_queued == 0) - /* Insert the list into the priority queue: */ - pq_insert_prio_list(pq, prio); - - /* Mark this thread as being in the priority queue. */ - pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ; + if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) { + /* Make sure the threads state is suspended. */ + if (pthread->state != PS_SUSPENDED) + PTHREAD_SET_STATE(pthread, PS_SUSPENDED); + } else { + /* + * Make some assertions when debugging is enabled: + */ + _PQ_ASSERT_INACTIVE("_pq_insert_tail: pq_active"); + _PQ_SET_ACTIVE(); + _PQ_ASSERT_NOT_QUEUED(pthread, + "_pq_insert_tail: Already in priority queue"); + _PQ_ASSERT_PROTECTED("_pq_insert_tail: prioq not protected!"); + + prio = pthread->active_priority; + TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe); + if (pq->pq_lists[prio].pl_queued == 0) + /* Insert the list into the priority queue: */ + pq_insert_prio_list(pq, prio); + + /* Mark this thread as being in the priority queue. */ + pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ; - _PQ_CLEAR_ACTIVE(); + _PQ_CLEAR_ACTIVE(); + } } pthread_t _pq_first(pq_queue_t *pq) { pq_list_t *pql; pthread_t pthread = NULL; /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_INACTIVE("_pq_first: pq_active"); _PQ_SET_ACTIVE(); _PQ_ASSERT_PROTECTED("_pq_first: prioq not protected!"); while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) && (pthread == NULL)) { if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) { /* * The priority list is empty; remove the list * from the queue. */ TAILQ_REMOVE(&pq->pq_queue, pql, pl_link); /* Mark the list as not being in the queue: */ pql->pl_queued = 0; + } else if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) { + /* + * This thread is suspended; remove it from the + * list and ensure its state is suspended. + */ + TAILQ_REMOVE(&pql->pl_head, pthread, pqe); + PTHREAD_SET_STATE(pthread, PS_SUSPENDED); + + /* This thread is now longer in the priority queue. */ + pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ; + pthread = NULL; } } _PQ_CLEAR_ACTIVE(); return (pthread); } static void pq_insert_prio_list(pq_queue_t *pq, int prio) { pq_list_t *pql; /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_ACTIVE("pq_insert_prio_list: pq_active"); _PQ_ASSERT_PROTECTED("_pq_insert_prio_list: prioq not protected!"); /* * The priority queue is in descending priority order. Start at * the beginning of the queue and find the list before which the * new list should be inserted. */ pql = TAILQ_FIRST(&pq->pq_queue); while ((pql != NULL) && (pql->pl_prio > prio)) pql = TAILQ_NEXT(pql, pl_link); /* Insert the list: */ if (pql == NULL) TAILQ_INSERT_TAIL(&pq->pq_queue, &pq->pq_lists[prio], pl_link); else TAILQ_INSERT_BEFORE(pql, &pq->pq_lists[prio], pl_link); /* Mark this list as being in the queue: */ pq->pq_lists[prio].pl_queued = 1; } void _waitq_insert(pthread_t pthread) { pthread_t tid; /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_INACTIVE("_waitq_insert: pq_active"); _PQ_SET_ACTIVE(); _PQ_ASSERT_NOT_QUEUED(pthread, "_waitq_insert: Already in queue"); if (pthread->wakeup_time.tv_sec == -1) TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe); else { tid = TAILQ_FIRST(&_waitingq); while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) && ((tid->wakeup_time.tv_sec < pthread->wakeup_time.tv_sec) || ((tid->wakeup_time.tv_sec == pthread->wakeup_time.tv_sec) && (tid->wakeup_time.tv_nsec <= pthread->wakeup_time.tv_nsec)))) tid = TAILQ_NEXT(tid, pqe); if (tid == NULL) TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe); else TAILQ_INSERT_BEFORE(tid, pthread, pqe); } pthread->flags |= PTHREAD_FLAGS_IN_WAITQ; _PQ_CLEAR_ACTIVE(); } void _waitq_remove(pthread_t pthread) { /* * Make some assertions when debugging is enabled: */ _PQ_ASSERT_INACTIVE("_waitq_remove: pq_active"); _PQ_SET_ACTIVE(); _PQ_ASSERT_IN_WAITQ(pthread, "_waitq_remove: Not in queue"); TAILQ_REMOVE(&_waitingq, pthread, pqe); pthread->flags &= ~PTHREAD_FLAGS_IN_WAITQ; _PQ_CLEAR_ACTIVE(); } void _waitq_setactive(void) { _PQ_ASSERT_INACTIVE("_waitq_setactive: pq_active"); _PQ_SET_ACTIVE(); } void _waitq_clearactive(void) { _PQ_ASSERT_ACTIVE("_waitq_clearactive: ! pq_active"); _PQ_CLEAR_ACTIVE(); } diff --git a/lib/libpthread/thread/thr_private.h b/lib/libpthread/thread/thr_private.h index 04023fb81a35..3fef49c7b25f 100644 --- a/lib/libpthread/thread/thr_private.h +++ b/lib/libpthread/thread/thr_private.h @@ -1,1362 +1,1339 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Private thread definitions for the uthread kernel. * * $FreeBSD$ */ #ifndef _PTHREAD_PRIVATE_H #define _PTHREAD_PRIVATE_H /* * Evaluate the storage class specifier. */ #ifdef GLOBAL_PTHREAD_PRIVATE #define SCLASS #else #define SCLASS extern #endif /* * Include files. */ #include #include #include #include #include #include #include #include #include #include #include /* * Define machine dependent macros to get and set the stack pointer * from the supported contexts. Also define a macro to set the return * address in a jmp_buf context. * * XXX - These need to be moved into architecture dependent support files. */ #if defined(__i386__) #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2])) #define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp)) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk) #define FP_SAVE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("fnsave %0": :"m"(*fdata)); \ } while (0) #define FP_RESTORE_UC(ucp) do { \ char *fdata; \ fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \ __asm__("frstor %0": :"m"(*fdata)); \ } while (0) #define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra) #elif defined(__alpha__) #include #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[R_SP + 4])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[R_SP + 4])) #define GET_STACK_UC(ucp) ((ucp)->uc_mcontext.mc_regs[R_SP]) #define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk) #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk) #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk) #define FP_SAVE_UC(ucp) #define FP_RESTORE_UC(ucp) #define SET_RETURN_ADDR_JB(jb, ra) do { \ (jb)[0]._jb[2] = (long)(ra); \ (jb)[0]._jb[R_RA + 4] = (long)(ra); \ (jb)[0]._jb[R_T12 + 4] = (long)(ra); \ } while (0) #else #error "Don't recognize this architecture!" #endif /* * Kernel fatal error handler macro. */ #define PANIC(string) _thread_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ __sys_write(1, buf, strlen(buf)); \ } while (0) #define stderr_debug(args...) do { \ char buf[128]; \ snprintf(buf, sizeof(buf), ##args); \ __sys_write(2, buf, strlen(buf)); \ } while (0) /* * Priority queue manipulation macros (using pqe link): */ #define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd) #define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd) #define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd) #define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq) /* * Waiting queue manipulation macros (using pqe link): */ #define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd) #define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd) #if defined(_PTHREADS_INVARIANTS) #define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive() #define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive() #else #define PTHREAD_WAITQ_CLEARACTIVE() #define PTHREAD_WAITQ_SETACTIVE() #endif /* * Work queue manipulation macros (using qe link): */ #define PTHREAD_WORKQ_INSERT(thrd) do { \ TAILQ_INSERT_TAIL(&_workq,thrd,qe); \ (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \ } while (0) #define PTHREAD_WORKQ_REMOVE(thrd) do { \ TAILQ_REMOVE(&_workq,thrd,qe); \ (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \ } while (0) /* * State change macro without scheduling queue change: */ #define PTHREAD_SET_STATE(thrd, newstate) do { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } while (0) /* * State change macro with scheduling queue change - This must be * called with preemption deferred (see thread_kern_sched_[un]defer). */ #if defined(_PTHREADS_INVARIANTS) #include #define PTHREAD_ASSERT(cond, msg) do { \ if (!(cond)) \ PANIC(msg); \ } while (0) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \ PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \ "Illegal call from signal handler"); #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if (_thread_kern_new_state != 0) \ PANIC("Recursive PTHREAD_NEW_STATE"); \ _thread_kern_new_state = 1; \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ + PTHREAD_SET_STATE(thrd, newstate); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ + PTHREAD_SET_STATE(thrd, newstate); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ _thread_kern_new_state = 0; \ - PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #else #define PTHREAD_ASSERT(cond, msg) #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) #define PTHREAD_NEW_STATE(thrd, newstate) do { \ if ((thrd)->state != newstate) { \ if ((thrd)->state == PS_RUNNING) { \ PTHREAD_PRIOQ_REMOVE(thrd); \ PTHREAD_WAITQ_INSERT(thrd); \ } else if (newstate == PS_RUNNING) { \ PTHREAD_WAITQ_REMOVE(thrd); \ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ } \ } \ PTHREAD_SET_STATE(thrd, newstate); \ } while (0) #endif /* * Define the signals to be used for scheduling. */ #if defined(_PTHREADS_COMPAT_SCHED) #define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL #define _SCHED_SIGNAL SIGVTALRM #else #define _ITIMER_SCHED_TIMER ITIMER_PROF #define _SCHED_SIGNAL SIGPROF #endif /* * Priority queues. * * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. */ typedef struct pq_list { TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ int pl_prio; /* the priority of this list */ int pl_queued; /* is this in the priority queue */ } pq_list_t; typedef struct pq_queue { TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ pq_list_t *pq_lists; /* array of all priority lists */ int pq_size; /* number of priority lists */ } pq_queue_t; /* * TailQ initialization values. */ #define TAILQ_INITIALIZER { NULL, NULL } /* * Mutex definitions. */ union pthread_mutex_data { void *m_ptr; int m_count; }; struct pthread_mutex { enum pthread_mutextype m_type; int m_protocol; TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; union pthread_mutex_data m_data; long m_flags; int m_refcount; /* * Used for priority inheritence and protection. * * m_prio - For priority inheritence, the highest active * priority (threads locking the mutex inherit * this priority). For priority protection, the * ceiling priority of this mutex. * m_saved_prio - mutex owners inherited priority before * taking the mutex, restored when the owner * unlocks the mutex. */ int m_prio; int m_saved_prio; /* * Link for list of all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; /* * Lock for accesses to this structure. */ spinlock_t lock; }; /* * Flags for mutexes. */ #define MUTEX_FLAGS_PRIVATE 0x01 #define MUTEX_FLAGS_INITED 0x02 #define MUTEX_FLAGS_BUSY 0x04 /* * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \ NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \ _SPINLOCK_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; long m_flags; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } /* * Condition variable definitions. */ enum pthread_cond_type { COND_TYPE_FAST, COND_TYPE_MAX }; struct pthread_cond { enum pthread_cond_type c_type; TAILQ_HEAD(cond_head, pthread) c_queue; pthread_mutex_t c_mutex; void *c_data; long c_flags; int c_seqno; /* * Lock for accesses to this structure. */ spinlock_t lock; }; struct pthread_cond_attr { enum pthread_cond_type c_type; long c_flags; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \ 0, 0, _SPINLOCK_INITIALIZER } /* * Semaphore definitions. */ struct sem { #define SEM_MAGIC ((u_int32_t) 0x09fa4012) u_int32_t magic; pthread_mutex_t lock; pthread_cond_t gtzero; u_int32_t count; u_int32_t nwaiters; }; /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *next; void (*routine) (); void *routine_arg; }; struct pthread_attr { int sched_policy; int sched_inherit; int sched_interval; int prio; int suspend; int flags; void *arg_attr; void (*cleanup_attr) (); void *stackaddr_attr; size_t stacksize_attr; size_t guardsize_attr; }; /* * Thread creation state attributes. */ #define PTHREAD_CREATE_RUNNING 0 #define PTHREAD_CREATE_SUSPENDED 1 -/* - * Additional state for a thread suspended with pthread_suspend_np(). - */ -enum pthread_susp { - SUSP_NO, /* Not suspended. */ - SUSP_YES, /* Suspended. */ - SUSP_JOIN, /* Suspended, joining. */ - SUSP_NOWAIT, /* Suspended, was in a mutex or condition queue. */ - SUSP_MUTEX_WAIT,/* Suspended, still in a mutex queue. */ - SUSP_COND_WAIT /* Suspended, still in a condition queue. */ -}; - /* * Miscellaneous definitions. */ #define PTHREAD_STACK_DEFAULT 65536 /* * Size of default red zone at the end of each stack. In actuality, this "red * zone" is merely an unmapped region, except in the case of the initial stack. * Since mmap() makes it possible to specify the maximum growth of a MAP_STACK * region, an unmapped gap between thread stacks achieves the same effect as * explicitly mapped red zones. * This is declared and initialized in uthread_init.c. */ extern int _pthread_guard_default; extern int _pthread_page_size; /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define PTHREAD_STACK_INITIAL 0x100000 /* * Define the different priority ranges. All applications have thread * priorities constrained within 0-31. The threads library raises the * priority when delivering signals in order to ensure that signal * delivery happens (from the POSIX spec) "as soon as possible". * In the future, the threads library will also be able to map specific * threads into real-time (cooperating) processes or kernel threads. * The RT and SIGNAL priorities will be used internally and added to * thread base priorities so that the scheduling queue can handle both * normal and RT priority threads with and without signal handling. * * The approach taken is that, within each class, signal delivery * always has priority over thread execution. */ #define PTHREAD_DEFAULT_PRIORITY 15 #define PTHREAD_MIN_PRIORITY 0 #define PTHREAD_MAX_PRIORITY 31 /* 0x1F */ #define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */ #define PTHREAD_RT_PRIORITY 64 /* 0x40 */ #define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY #define PTHREAD_LAST_PRIORITY \ (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY) #define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY) /* * Clock resolution in microseconds. */ #define CLOCK_RES_USEC 10000 #define CLOCK_RES_USEC_MIN 1000 /* * Time slice period in microseconds. */ #define TIMESLICE_USEC 20000 /* * Define a thread-safe macro to get the current time of day * which is updated at regular intervals by the scheduling signal * handler. */ #define GET_CURRENT_TOD(tv) \ do { \ tv.tv_sec = _sched_tod.tv_sec; \ tv.tv_usec = _sched_tod.tv_usec; \ } while (tv.tv_sec != _sched_tod.tv_sec) struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ int state; /* 0 = idle >0 = # of readers -1 = writer */ pthread_cond_t read_signal; pthread_cond_t write_signal; int blocked_writers; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_SIGTHREAD, PS_MUTEX_WAIT, PS_COND_WAIT, PS_FDLR_WAIT, PS_FDLW_WAIT, PS_FDR_WAIT, PS_FDW_WAIT, PS_FILE_WAIT, PS_POLL_WAIT, PS_SELECT_WAIT, PS_SLEEP_WAIT, PS_WAIT_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, PS_SPINBLOCK, PS_JOIN, PS_SUSPENDED, PS_DEAD, PS_DEADLOCK, PS_STATE_MAX }; /* * File descriptor locking definitions. */ #define FD_READ 0x1 #define FD_WRITE 0x2 #define FD_RDWR (FD_READ | FD_WRITE) /* * File descriptor table structure. */ struct fd_table_entry { /* * Lock for accesses to this file descriptor table * entry. This is passed to _spinlock() to provide atomic * access to this structure. It does *not* represent the * state of the lock on the file descriptor. */ spinlock_t lock; TAILQ_HEAD(, pthread) r_queue; /* Read queue. */ TAILQ_HEAD(, pthread) w_queue; /* Write queue. */ struct pthread *r_owner; /* Ptr to thread owning read lock. */ struct pthread *w_owner; /* Ptr to thread owning write lock. */ char *r_fname; /* Ptr to read lock source file name */ int r_lineno; /* Read lock source line number. */ char *w_fname; /* Ptr to write lock source file name */ int w_lineno; /* Write lock source line number. */ int r_lockcount; /* Count for FILE read locks. */ int w_lockcount; /* Count for FILE write locks. */ int flags; /* Flags used in open. */ }; struct pthread_poll_data { int nfds; struct pollfd *fds; }; union pthread_wait_data { pthread_mutex_t mutex; pthread_cond_t cond; const sigset_t *sigwait; /* Waiting on a signal in sigwait */ struct { short fd; /* Used when thread waiting on fd */ short branch; /* Line number, for debugging. */ char *fname; /* Source file name for debugging.*/ } fd; FILE *fp; struct pthread_poll_data *poll_data; spinlock_t *spinlock; struct pthread *thread; }; /* * Define a continuation routine that can be used to perform a * transfer of control: */ typedef void (*thread_continuation_t) (void *); struct pthread_signal_frame; struct pthread_state_data { struct pthread_signal_frame *psd_curframe; sigset_t psd_sigmask; struct timespec psd_wakeup_time; union pthread_wait_data psd_wait_data; enum pthread_state psd_state; int psd_flags; int psd_interrupted; int psd_longjmp_val; int psd_sigmask_seqno; int psd_signo; int psd_sig_defer_count; /* XXX - What about thread->timeout and/or thread->error? */ }; struct join_status { struct pthread *thread; void *ret; int error; }; /* * The frame that is added to the top of a threads stack when setting up * up the thread to run a signal handler. */ struct pthread_signal_frame { /* * This stores the threads state before the signal. */ struct pthread_state_data saved_state; /* * Threads return context; we use only jmp_buf's for now. */ union { jmp_buf jb; ucontext_t uc; } ctx; int signo; /* signal, arg 1 to sighandler */ int sig_has_args; /* use signal args if true */ ucontext_t uc; siginfo_t siginfo; }; struct pthread_specific_elem { const void *data; int seqno; }; /* * Thread structure. */ struct pthread { /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; char *name; u_int64_t uniqueid; /* for gdb */ /* * Lock for accesses to this thread structure. */ spinlock_t lock; /* Queue entry for list of all threads: */ TAILQ_ENTRY(pthread) tle; /* Queue entry for list of dead threads: */ TAILQ_ENTRY(pthread) dle; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; void *stack; struct pthread_attr attr; /* * Threads return context; we use only jmp_buf's for now. */ union { jmp_buf jb; ucontext_t uc; } ctx; /* * Used for tracking delivery of signal handlers. */ struct pthread_signal_frame *curframe; /* * Cancelability flags - the lower 2 bits are used by cancel * definitions in pthread.h */ #define PTHREAD_AT_CANCEL_POINT 0x0004 #define PTHREAD_CANCELLING 0x0008 #define PTHREAD_CANCEL_NEEDED 0x0010 int cancelflags; - enum pthread_susp suspended; - thread_continuation_t continuation; /* * Current signal mask and pending signals. */ sigset_t sigmask; sigset_t sigpend; int sigmask_seqno; int check_pending; /* Thread state: */ enum pthread_state state; /* Scheduling clock when this thread was last made active. */ long last_active; /* Scheduling clock when this thread was last made inactive. */ long last_inactive; /* * Number of microseconds accumulated by this thread when * time slicing is active. */ long slice_usec; /* * Time to wake up thread. This is used for sleeping threads and * for any operation which may time out (such as select). */ struct timespec wakeup_time; /* TRUE if operation has timed out. */ int timeout; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* * The joiner is the thread that is joining to this thread. The * join status keeps track of a join operation to another thread. */ struct pthread *joiner; struct join_status join_status; /* * The current thread can belong to only one scheduling queue at * a time (ready or waiting queue). It can also belong to: * * o A queue of threads waiting for a mutex * o A queue of threads waiting for a condition variable * o A queue of threads waiting for a file descriptor lock * o A queue of threads needing work done by the kernel thread * (waiting for a spinlock or file I/O) * * A thread can also be joining a thread (the joiner field above). * * It must not be possible for a thread to belong to any of the * above queues while it is handling a signal. Signal handlers * may longjmp back to previous stack frames circumventing normal * control flow. This could corrupt queue integrity if the thread * retains membership in the queue. Therefore, if a thread is a * member of one of these queues when a signal handler is invoked, * it must remove itself from the queue before calling the signal * handler and reinsert itself after normal return of the handler. * * Use pqe for the scheduling queue link (both ready and waiting), * sqe for synchronization (mutex and condition variable) queue * links, and qe for all other links. */ TAILQ_ENTRY(pthread) pqe; /* priority queue link */ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ TAILQ_ENTRY(pthread) qe; /* all other queues link */ /* Wait data. */ union pthread_wait_data data; /* * Allocated for converting select into poll. */ struct pthread_poll_data poll_data; /* * Set to TRUE if a blocking operation was * interrupted by a signal: */ int interrupted; /* Signal number when in state PS_SIGWAIT: */ int signo; /* * Set to non-zero when this thread has deferred signals. * We allow for recursive deferral. */ int sig_defer_count; /* * Set to TRUE if this thread should yield after undeferring * signals. */ int yield_on_sig_undefer; /* Miscellaneous flags; only set with signals deferred. */ int flags; #define PTHREAD_FLAGS_PRIVATE 0x0001 #define PTHREAD_EXITING 0x0002 #define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */ #define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */ #define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */ #define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */ #define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */ #define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/ #define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */ -#define PTHREAD_FLAGS_TRACE 0x0200 /* for debugging purposes */ +#define PTHREAD_FLAGS_SUSPENDED 0x0200 /* thread is suspended */ +#define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */ #define PTHREAD_FLAGS_IN_SYNCQ \ (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ) /* * Base priority is the user setable and retrievable priority * of the thread. It is only affected by explicit calls to * set thread priority and upon thread creation via a thread * attribute or default priority. */ char base_priority; /* * Inherited priority is the priority a thread inherits by * taking a priority inheritence or protection mutex. It * is not affected by base priority changes. Inherited * priority defaults to and remains 0 until a mutex is taken * that is being waited on by any other thread whose priority * is non-zero. */ char inherited_priority; /* * Active priority is always the maximum of the threads base * priority and inherited priority. When there is a change * in either the base or inherited priority, the active * priority must be recalculated. */ char active_priority; /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; /* * Queue of currently owned mutexes. */ TAILQ_HEAD(, pthread_mutex) mutexq; void *ret; struct pthread_specific_elem *specific; int specific_data_count; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; char *fname; /* Ptr to source file name */ int lineno; /* Source line number. */ }; /* * Global variables for the uthread kernel. */ SCLASS void *_usrstack #ifdef GLOBAL_PTHREAD_PRIVATE = (void *) USRSTACK; #else ; #endif /* Kernel thread structure used when there are no running threads: */ SCLASS struct pthread _thread_kern_thread; /* Ptr to the thread structure for the running thread: */ SCLASS struct pthread * volatile _thread_run #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif /* Ptr to the thread structure for the last user thread to run: */ SCLASS struct pthread * volatile _last_user_thread #ifdef GLOBAL_PTHREAD_PRIVATE = &_thread_kern_thread; #else ; #endif -/* - * Ptr to the thread running in single-threaded mode or NULL if - * running multi-threaded (default POSIX behaviour). - */ -SCLASS struct pthread * volatile _thread_single -#ifdef GLOBAL_PTHREAD_PRIVATE -= NULL; -#else -; -#endif - /* List of all threads: */ SCLASS TAILQ_HEAD(, pthread) _thread_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_thread_list); #else ; #endif /* * Array of kernel pipe file descriptors that are used to ensure that * no signals are missed in calls to _select. */ SCLASS int _thread_kern_pipe[2] #ifdef GLOBAL_PTHREAD_PRIVATE = { -1, -1 }; #else ; #endif SCLASS int volatile _queue_signals #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _thread_kern_in_sched #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _sig_in_handler #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Time of day at last scheduling timer signal: */ SCLASS struct timeval volatile _sched_tod #ifdef GLOBAL_PTHREAD_PRIVATE = { 0, 0 }; #else ; #endif /* * Current scheduling timer ticks; used as resource usage. */ SCLASS unsigned int volatile _sched_ticks #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif /* Dead threads: */ SCLASS TAILQ_HEAD(, pthread) _dead_list #ifdef GLOBAL_PTHREAD_PRIVATE = TAILQ_HEAD_INITIALIZER(_dead_list); #else ; #endif /* Initial thread: */ SCLASS struct pthread *_thread_initial #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Default thread attributes: */ SCLASS struct pthread_attr pthread_attr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, PTHREAD_STACK_DEFAULT, -1 }; #else ; #endif /* Default mutex attributes: */ SCLASS struct pthread_mutex_attr pthread_mutexattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }; #else ; #endif /* Default condition variable attributes: */ SCLASS struct pthread_cond_attr pthread_condattr_default #ifdef GLOBAL_PTHREAD_PRIVATE = { COND_TYPE_FAST, 0 }; #else ; #endif /* * Standard I/O file descriptors need special flag treatment since * setting one to non-blocking does all on *BSD. Sigh. This array * is used to store the initial flag settings. */ SCLASS int _pthread_stdio_flags[3]; /* File table information: */ SCLASS struct fd_table_entry **_thread_fd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif /* Table for polling file descriptors: */ SCLASS struct pollfd *_thread_pfd_table #ifdef GLOBAL_PTHREAD_PRIVATE = NULL; #else ; #endif SCLASS const int dtablecount #ifdef GLOBAL_PTHREAD_PRIVATE = 4096/sizeof(struct fd_table_entry); #else ; #endif SCLASS int _thread_dtablesize /* Descriptor table size. */ #ifdef GLOBAL_PTHREAD_PRIVATE = 0; #else ; #endif SCLASS int _clock_res_usec /* Clock resolution in usec. */ #ifdef GLOBAL_PTHREAD_PRIVATE = CLOCK_RES_USEC; #else ; #endif /* Garbage collector mutex and condition variable. */ SCLASS pthread_mutex_t _gc_mutex #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; SCLASS pthread_cond_t _gc_cond #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Array of signal actions for this process. */ SCLASS struct sigaction _thread_sigact[NSIG]; /* * Array of counts of dummy handlers for SIG_DFL signals. This is used to * assure that there is always a dummy signal handler installed while there is a * thread sigwait()ing on the corresponding signal. */ SCLASS int _thread_dfl_count[NSIG]; /* * Pending signals and mask for this process: */ SCLASS sigset_t _process_sigpending; SCLASS sigset_t _process_sigmask #ifdef GLOBAL_PTHREAD_PRIVATE = { {0, 0, 0, 0} } #endif ; /* * Scheduling queues: */ SCLASS pq_queue_t _readyq; SCLASS TAILQ_HEAD(, pthread) _waitingq; /* * Work queue: */ SCLASS TAILQ_HEAD(, pthread) _workq; /* Tracks the number of threads blocked while waiting for a spinlock. */ SCLASS volatile int _spinblock_count #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Used to maintain pending and active signals: */ struct sigstatus { int pending; /* Is this a pending signal? */ int blocked; /* * A handler is currently active for * this signal; ignore subsequent * signals until the handler is done. */ int signo; /* arg 1 to signal handler */ siginfo_t siginfo; /* arg 2 to signal handler */ ucontext_t uc; /* arg 3 to signal handler */ }; SCLASS struct sigstatus _thread_sigq[NSIG]; /* Indicates that the signal queue needs to be checked. */ SCLASS volatile int _sigq_check_reqd #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Thread switch hook. */ SCLASS pthread_switch_routine_t _sched_switch_hook #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* * Declare the kernel scheduler jump buffer and stack: */ SCLASS jmp_buf _thread_kern_sched_jb; SCLASS void * _thread_kern_sched_stack #ifdef GLOBAL_PTHREAD_PRIVATE = NULL #endif ; /* Used for _PTHREADS_INVARIANTS checking. */ SCLASS int _thread_kern_new_state #ifdef GLOBAL_PTHREAD_PRIVATE = 0 #endif ; /* Undefine the storage class specifier: */ #undef SCLASS #ifdef _LOCK_DEBUG #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \ _ts, __FILE__, __LINE__) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \ __FILE__, __LINE__) #else #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts) #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type) #endif /* * Function prototype definitions. */ __BEGIN_DECLS char *__ttyname_basic(int); char *__ttyname_r_basic(int, char *, size_t); char *ttyname_r(int, char *, size_t); void _cond_wait_backout(pthread_t); void _fd_lock_backout(pthread_t); int _find_thread(pthread_t); struct pthread *_get_curthread(void); void _set_curthread(struct pthread *); void *_thread_stack_alloc(size_t, size_t); void _thread_stack_free(void *, size_t, size_t); int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t); int _thread_fd_lock(int, int, struct timespec *); int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); void _mutex_lock_backout(pthread_t); void _mutex_notify_priochange(pthread_t); int _mutex_reinit(pthread_mutex_t *); void _mutex_unlock_private(pthread_t); int _cond_reinit(pthread_cond_t *); int _pq_alloc(struct pq_queue *, int, int); int _pq_init(struct pq_queue *); void _pq_remove(struct pq_queue *pq, struct pthread *); void _pq_insert_head(struct pq_queue *pq, struct pthread *); void _pq_insert_tail(struct pq_queue *pq, struct pthread *); struct pthread *_pq_first(struct pq_queue *pq); void *_pthread_getspecific(pthread_key_t); int _pthread_key_create(pthread_key_t *, void (*) (void *)); int _pthread_key_delete(pthread_key_t); int _pthread_mutex_destroy(pthread_mutex_t *); int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); int _pthread_mutex_lock(pthread_mutex_t *); int _pthread_mutex_trylock(pthread_mutex_t *); int _pthread_mutex_unlock(pthread_mutex_t *); int _pthread_mutexattr_init(pthread_mutexattr_t *); int _pthread_mutexattr_destroy(pthread_mutexattr_t *); int _pthread_mutexattr_settype(pthread_mutexattr_t *, int); int _pthread_once(pthread_once_t *, void (*) (void)); pthread_t _pthread_self(void); int _pthread_setspecific(pthread_key_t, const void *); void _waitq_insert(pthread_t pthread); void _waitq_remove(pthread_t pthread); #if defined(_PTHREADS_INVARIANTS) void _waitq_setactive(void); void _waitq_clearactive(void); #endif void _thread_exit(char *, int, char *); void _thread_exit_cleanup(void); void _thread_fd_unlock(int, int); void _thread_fd_unlock_debug(int, int, char *, int); void _thread_fd_unlock_owned(pthread_t); void *_thread_cleanup(pthread_t); void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_init(void); void _thread_kern_sched(ucontext_t *); void _thread_kern_scheduler(void); void _thread_kern_sched_frame(struct pthread_signal_frame *psf); void _thread_kern_sched_sig(void); void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno); void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno); void _thread_kern_set_timeout(const struct timespec *); void _thread_kern_sig_defer(void); void _thread_kern_sig_undefer(void); void _thread_sig_handler(int, siginfo_t *, ucontext_t *); void _thread_sig_check_pending(struct pthread *pthread); void _thread_sig_handle_pending(void); void _thread_sig_send(struct pthread *pthread, int sig); void _thread_sig_wrapper(void); void _thread_sigframe_restore(struct pthread *thread, struct pthread_signal_frame *psf); void _thread_start(void); void _thread_seterrno(pthread_t, int); int _thread_fd_table_init(int fd); pthread_addr_t _thread_gc(pthread_addr_t); void _thread_enter_cancellation_point(void); void _thread_leave_cancellation_point(void); void _thread_cancellation_point(void); /* #include */ #ifdef _SYS_ACL_H int __sys___acl_aclcheck_fd(int, acl_type_t, struct acl *); int __sys___acl_delete_fd(int, acl_type_t); int __sys___acl_get_fd(int, acl_type_t, struct acl *); int __sys___acl_set_fd(int, acl_type_t, struct acl *); #endif /* #include */ #ifdef _SYS_AIO_H_ int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); #endif /* #include */ #ifdef _SYS_CAPABILITY_H int __sys___cap_get_fd(int, struct cap *); int __sys___cap_set_fd(int, struct cap *); #endif /* #include */ #ifdef _SYS_EVENT_H_ int __sys_kevent(int, const struct kevent *, int, struct kevent *, int, const struct timespec *); #endif /* #include */ #ifdef _SYS_IOCTL_H_ int __sys_ioctl(int, unsigned long, ...); #endif /* #include */ #ifdef _SYS_MMAN_H_ int __sys_msync(void *, size_t, int); #endif /* #include */ #ifdef _SYS_MOUNT_H_ int __sys_fstatfs(int, struct statfs *); #endif /* #include */ #ifdef _SYS_SOCKET_H_ int __sys_accept(int, struct sockaddr *, socklen_t *); int __sys_bind(int, const struct sockaddr *, socklen_t); int __sys_connect(int, const struct sockaddr *, socklen_t); int __sys_getpeername(int, struct sockaddr *, socklen_t *); int __sys_getsockname(int, struct sockaddr *, socklen_t *); int __sys_getsockopt(int, int, int, void *, socklen_t *); int __sys_listen(int, int); ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, socklen_t *); ssize_t __sys_recvmsg(int, struct msghdr *, int); int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int); ssize_t __sys_sendmsg(int, const struct msghdr *, int); ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, socklen_t); int __sys_setsockopt(int, int, int, const void *, socklen_t); int __sys_shutdown(int, int); int __sys_socket(int, int, int); int __sys_socketpair(int, int, int, int *); #endif /* #include */ #ifdef _SYS_STAT_H_ int __sys_fchflags(int, u_long); int __sys_fchmod(int, mode_t); int __sys_fstat(int, struct stat *); #endif /* #include */ #ifdef _SYS_UIO_H_ ssize_t __sys_readv(int, const struct iovec *, int); ssize_t __sys_writev(int, const struct iovec *, int); #endif /* #include */ #ifdef WNOHANG pid_t __sys_wait4(pid_t, int *, int, struct rusage *); #endif /* #include */ #ifdef _DIRENT_H_ int __sys_getdirentries(int, char *, int, long *); #endif /* #include */ #ifdef _SYS_FCNTL_H_ int __sys_fcntl(int, int, ...); int __sys_flock(int, int); int __sys_open(const char *, int, ...); #endif /* #include */ #ifdef _SYS_POLL_H_ int __sys_poll(struct pollfd *, unsigned, int); #endif /* #include */ #ifdef _SIGNAL_H_ int __sys_sigaction(int, const struct sigaction *, struct sigaction *); int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); int __sys_sigprocmask(int, const sigset_t *, sigset_t *); int __sys_sigreturn(ucontext_t *); #endif /* #include */ #ifdef _UNISTD_H_ int __sys_close(int); int __sys_dup(int); int __sys_dup2(int, int); int __sys_execve(const char *, char * const *, char * const *); void __sys_exit(int); int __sys_fchown(int, uid_t, gid_t); pid_t __sys_fork(void); long __sys_fpathconf(int, int); int __sys_fsync(int); int __sys_pipe(int *); ssize_t __sys_read(int, void *, size_t); ssize_t __sys_write(int, const void *, size_t); #endif /* #include */ #ifdef _SETJMP_H_ extern void __siglongjmp(sigjmp_buf, int) __dead2; extern void __longjmp(jmp_buf, int) __dead2; extern void ___longjmp(jmp_buf, int) __dead2; #endif __END_DECLS #endif /* !_PTHREAD_PRIVATE_H */ diff --git a/lib/libpthread/thread/thr_resume_np.c b/lib/libpthread/thread/thr_resume_np.c index 9cbcf8563790..ed20b6a8d2f5 100644 --- a/lib/libpthread/thread/thr_resume_np.c +++ b/lib/libpthread/thread/thr_resume_np.c @@ -1,96 +1,111 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "pthread_private.h" +static void resume_common(struct pthread *); + __weak_reference(_pthread_resume_np, pthread_resume_np); +__weak_reference(_pthread_resume_all_np, pthread_resume_all_np); /* Resume a thread: */ int _pthread_resume_np(pthread_t thread) { - int ret; - enum pthread_susp old_suspended; + int ret; /* Find the thread in the list of active threads: */ if ((ret = _find_thread(thread)) == 0) { - /* Cancel any pending suspensions: */ - old_suspended = thread->suspended; - thread->suspended = SUSP_NO; + /* + * Defer signals to protect the scheduling queues + * from access by the signal handler: + */ + _thread_kern_sig_defer(); + + if ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) + resume_common(thread); + + /* + * Undefer and handle pending signals, yielding if + * necessary: + */ + _thread_kern_sig_undefer(); + } + return (ret); +} + +void +_pthread_resume_all_np(void) +{ + struct pthread *curthread = _get_curthread(); + struct pthread *thread; + + /* + * Defer signals to protect the scheduling queues from access + * by the signal handler: + */ + _thread_kern_sig_defer(); + + TAILQ_FOREACH(thread, &_thread_list, tle) { + if ((thread != curthread) && + ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)) + resume_common(thread); + } - /* Is it currently suspended? */ - if (thread->state == PS_SUSPENDED) { - /* - * Defer signals to protect the scheduling queues - * from access by the signal handler: - */ - _thread_kern_sig_defer(); + /* + * Undefer and handle pending signals, yielding if necessary: + */ + _thread_kern_sig_undefer(); +} - switch (old_suspended) { - case SUSP_MUTEX_WAIT: - /* Set the thread's state back. */ - PTHREAD_SET_STATE(thread,PS_MUTEX_WAIT); - break; - case SUSP_COND_WAIT: - /* Set the thread's state back. */ - PTHREAD_SET_STATE(thread,PS_COND_WAIT); - break; - case SUSP_JOIN: - /* Set the thread's state back. */ - PTHREAD_SET_STATE(thread,PS_JOIN); - break; - case SUSP_NOWAIT: - /* Allow the thread to run. */ - PTHREAD_SET_STATE(thread,PS_RUNNING); - PTHREAD_WAITQ_REMOVE(thread); - PTHREAD_PRIOQ_INSERT_TAIL(thread); - break; - case SUSP_NO: - case SUSP_YES: - /* Allow the thread to run. */ - PTHREAD_SET_STATE(thread,PS_RUNNING); - PTHREAD_PRIOQ_INSERT_TAIL(thread); - break; - } +static void +resume_common(struct pthread *thread) +{ + /* Clear the suspend flag: */ + thread->flags &= ~PTHREAD_FLAGS_SUSPENDED; - /* - * Undefer and handle pending signals, yielding if - * necessary: - */ - _thread_kern_sig_undefer(); - } + /* + * If the thread's state is suspended, that means it is + * now runnable but not in any scheduling queue. Set the + * state to running and insert it into the run queue. + */ + if (thread->state == PS_SUSPENDED) { + PTHREAD_SET_STATE(thread, PS_RUNNING); + if (thread->priority_mutex_count > 0) + PTHREAD_PRIOQ_INSERT_HEAD(thread); + else + PTHREAD_PRIOQ_INSERT_TAIL(thread); } - return(ret); } diff --git a/lib/libpthread/thread/thr_sig.c b/lib/libpthread/thread/thr_sig.c index 1bd93b7d67cb..7aa9b53967b3 100644 --- a/lib/libpthread/thread/thr_sig.c +++ b/lib/libpthread/thread/thr_sig.c @@ -1,1117 +1,1125 @@ /* * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include "pthread_private.h" /* Prototypes: */ static void thread_sig_add(struct pthread *pthread, int sig, int has_args); static void thread_sig_check_state(struct pthread *pthread, int sig); static struct pthread *thread_sig_find(int sig); static void thread_sig_handle_special(int sig); static void thread_sigframe_add(struct pthread *thread, int sig, int has_args); static void thread_sigframe_save(struct pthread *thread, struct pthread_signal_frame *psf); static void thread_sig_invoke_handler(int sig, siginfo_t *info, ucontext_t *ucp); -/* #define DEBUG_SIGNAL */ +/*#define DEBUG_SIGNAL*/ #ifdef DEBUG_SIGNAL #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif #if defined(_PTHREADS_INVARIANTS) #define SIG_SET_ACTIVE() _sig_in_handler = 1 #define SIG_SET_INACTIVE() _sig_in_handler = 0 #else #define SIG_SET_ACTIVE() #define SIG_SET_INACTIVE() #endif void _thread_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) { struct pthread *curthread = _get_curthread(); struct pthread *pthread, *pthread_h; int in_sched = _thread_kern_in_sched; char c; if (ucp == NULL) PANIC("Thread signal handler received null context"); DBG_MSG("Got signal %d, current thread %p\n", sig, curthread); /* Check if an interval timer signal: */ if (sig == _SCHED_SIGNAL) { /* Update the scheduling clock: */ gettimeofday((struct timeval *)&_sched_tod, NULL); _sched_ticks++; if (in_sched != 0) { /* * The scheduler is already running; ignore this * signal. */ } /* * Check if the scheduler interrupt has come when * the currently running thread has deferred thread * signals. */ else if (curthread->sig_defer_count > 0) curthread->yield_on_sig_undefer = 1; else { /* Schedule the next thread: */ _thread_kern_sched(ucp); /* * This point should not be reached, so abort the * process: */ PANIC("Returned to signal function from scheduler"); } } /* * Check if the kernel has been interrupted while the scheduler * is accessing the scheduling queues or if there is a currently * running thread that has deferred signals. */ else if ((in_sched != 0) || (curthread->sig_defer_count > 0)) { /* Cast the signal number to a character variable: */ c = sig; /* * Write the signal number to the kernel pipe so that it will * be ready to read when this signal handler returns. */ if (_queue_signals != 0) { __sys_write(_thread_kern_pipe[1], &c, 1); DBG_MSG("Got signal %d, queueing to kernel pipe\n", sig); } if (_thread_sigq[sig - 1].blocked == 0) { DBG_MSG("Got signal %d, adding to _thread_sigq\n", sig); /* * Do not block this signal; it will be blocked * when the pending signals are run down. */ /* _thread_sigq[sig - 1].blocked = 1; */ /* * Queue the signal, saving siginfo and sigcontext * (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); /* Indicate that there are queued signals: */ _thread_sigq[sig - 1].pending = 1; _sigq_check_reqd = 1; } /* These signals need special handling: */ else if (sig == SIGCHLD || sig == SIGTSTP || sig == SIGTTIN || sig == SIGTTOU) { _thread_sigq[sig - 1].pending = 1; _thread_sigq[sig - 1].signo = sig; _sigq_check_reqd = 1; } else DBG_MSG("Got signal %d, ignored.\n", sig); } /* * The signal handlers should have been installed so that they * cannot be interrupted by other signals. */ else if (_thread_sigq[sig - 1].blocked == 0) { /* * The signal is not blocked; handle the signal. * * Ignore subsequent occurrences of this signal * until the current signal is handled: */ _thread_sigq[sig - 1].blocked = 1; /* This signal will be handled; clear the pending flag: */ _thread_sigq[sig - 1].pending = 0; /* * Save siginfo and sigcontext (ucontext). * * XXX - Do we need to copy siginfo and ucp? */ _thread_sigq[sig - 1].signo = sig; if (info != NULL) memcpy(&_thread_sigq[sig - 1].siginfo, info, sizeof(*info)); memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp)); SIG_SET_ACTIVE(); /* Handle special signals: */ thread_sig_handle_special(sig); pthread_h = NULL; if ((pthread = thread_sig_find(sig)) == NULL) DBG_MSG("No thread to handle signal %d\n", sig); else if (pthread == curthread) { /* * Unblock the signal and restore the process signal * mask in case we don't return from the handler: */ _thread_sigq[sig - 1].blocked = 0; __sys_sigprocmask(SIG_SETMASK, &_process_sigmask, NULL); /* Call the signal handler for the current thread: */ thread_sig_invoke_handler(sig, info, ucp); /* * Set the process signal mask in the context; it * could have changed by the handler. */ ucp->uc_sigmask = _process_sigmask; /* Resume the interrupted thread: */ __sys_sigreturn(ucp); } else { DBG_MSG("Got signal %d, adding frame to thread %p\n", sig, pthread); /* Setup the target thread to receive the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); /* Take a peek at the next ready to run thread: */ pthread_h = PTHREAD_PRIOQ_FIRST(); DBG_MSG("Finished adding frame, head of prio list %p\n", pthread_h); } SIG_SET_INACTIVE(); /* * Switch to a different context if the currently running * thread takes a signal, or if another thread takes a * signal and the currently running thread is not in a * signal handler. */ if ((pthread_h != NULL) && (pthread_h->active_priority > curthread->active_priority)) { /* Enter the kernel scheduler: */ _thread_kern_sched(ucp); } } else { SIG_SET_ACTIVE(); thread_sig_handle_special(sig); SIG_SET_INACTIVE(); } } static void thread_sig_invoke_handler(int sig, siginfo_t *info, ucontext_t *ucp) { struct pthread *curthread = _get_curthread(); void (*sigfunc)(int, siginfo_t *, void *); int saved_seqno; sigset_t saved_sigmask; /* Invoke the signal handler without going through the scheduler: */ DBG_MSG("Got signal %d, calling handler for current thread %p\n", sig, curthread); /* Save the threads signal mask: */ saved_sigmask = curthread->sigmask; saved_seqno = curthread->sigmask_seqno; /* Setup the threads signal mask: */ SIGSETOR(curthread->sigmask, _thread_sigact[sig - 1].sa_mask); sigaddset(&curthread->sigmask, sig); /* * Check that a custom handler is installed and if * the signal is not blocked: */ sigfunc = _thread_sigact[sig - 1].sa_sigaction; if (((__sighandler_t *)sigfunc != SIG_DFL) && ((__sighandler_t *)sigfunc != SIG_IGN)) { if (((_thread_sigact[sig - 1].sa_flags & SA_SIGINFO) != 0) || (info == NULL)) (*(sigfunc))(sig, info, ucp); else (*(sigfunc))(sig, (siginfo_t *)info->si_code, ucp); } /* * Only restore the signal mask if it hasn't been changed by the * application during invocation of the signal handler: */ if (curthread->sigmask_seqno == saved_seqno) curthread->sigmask = saved_sigmask; } /* * Find a thread that can handle the signal. */ struct pthread * thread_sig_find(int sig) { struct pthread *curthread = _get_curthread(); int handler_installed; struct pthread *pthread, *pthread_next; struct pthread *suspended_thread, *signaled_thread; DBG_MSG("Looking for thread to handle signal %d\n", sig); /* Check if the signal requires a dump of thread information: */ if (sig == SIGINFO) { /* Dump thread information to file: */ _thread_dump_info(); /* Unblock this signal to allow further dumps: */ _thread_sigq[sig - 1].blocked = 0; } /* Check if an interval timer signal: */ else if (sig == _SCHED_SIGNAL) { /* * This shouldn't ever occur (should this panic?). */ } else { /* * Enter a loop to look for threads that have the signal * unmasked. POSIX specifies that a thread in a sigwait * will get the signal over any other threads. Second * preference will be threads in in a sigsuspend. Third * preference will be the current thread. If none of the * above, then the signal is delivered to the first thread * that is found. Note that if a custom handler is not * installed, the signal only affects threads in sigwait. */ suspended_thread = NULL; if ((curthread != &_thread_kern_thread) && !sigismember(&curthread->sigmask, sig)) signaled_thread = curthread; else signaled_thread = NULL; if ((_thread_sigact[sig - 1].sa_handler == SIG_IGN) || (_thread_sigact[sig - 1].sa_handler == SIG_DFL)) handler_installed = 0; else handler_installed = 1; for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly destroying * the link entry. */ pthread_next = TAILQ_NEXT(pthread, pqe); if ((pthread->state == PS_SIGWAIT) && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* * A signal handler is not invoked for threads * in sigwait. Clear the blocked and pending * flags. */ _thread_sigq[sig - 1].blocked = 0; _thread_sigq[sig - 1].pending = 0; /* Return the signal number: */ pthread->signo = sig; /* * POSIX doesn't doesn't specify which thread * will get the signal if there are multiple * waiters, so we give it to the first thread * we find. * * Do not attempt to deliver this signal * to other threads and do not add the signal * to the process pending set. */ return (NULL); } else if ((handler_installed != 0) && - !sigismember(&pthread->sigmask, sig)) { + !sigismember(&pthread->sigmask, sig) && + ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) == 0)) { if (pthread->state == PS_SIGSUSPEND) { if (suspended_thread == NULL) suspended_thread = pthread; } else if (signaled_thread == NULL) signaled_thread = pthread; } } /* * Only perform wakeups and signal delivery if there is a * custom handler installed: */ if (handler_installed == 0) { /* * There is no handler installed. Unblock the * signal so that if a handler _is_ installed, any * subsequent signals can be handled. */ _thread_sigq[sig - 1].blocked = 0; } else { /* * If we didn't find a thread in the waiting queue, * check the all threads queue: */ if (suspended_thread == NULL && signaled_thread == NULL) { /* * Enter a loop to look for other threads * capable of receiving the signal: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { if (!sigismember(&pthread->sigmask, sig)) { signaled_thread = pthread; break; } } } if (suspended_thread == NULL && signaled_thread == NULL) /* * Add it to the set of signals pending * on the process: */ sigaddset(&_process_sigpending, sig); else { /* * We only deliver the signal to one thread; * give preference to the suspended thread: */ if (suspended_thread != NULL) pthread = suspended_thread; else pthread = signaled_thread; return (pthread); } } } /* Returns nothing. */ return (NULL); } void _thread_sig_check_pending(struct pthread *pthread) { sigset_t sigset; int i; /* * Check if there are pending signals for the running * thread or process that aren't blocked: */ sigset = pthread->sigpend; SIGSETOR(sigset, _process_sigpending); SIGSETNAND(sigset, pthread->sigmask); if (SIGNOTEMPTY(sigset)) { for (i = 1; i < NSIG; i++) { if (sigismember(&sigset, i) != 0) { if (sigismember(&pthread->sigpend, i) != 0) thread_sig_add(pthread, i, /*has_args*/ 0); else { thread_sig_add(pthread, i, /*has_args*/ 1); sigdelset(&_process_sigpending, i); } } } } } /* * This can only be called from the kernel scheduler. It assumes that * all thread contexts are saved and that a signal frame can safely be * added to any user thread. */ void _thread_sig_handle_pending(void) { struct pthread *pthread; int i, sig; PTHREAD_ASSERT(_thread_kern_in_sched != 0, "_thread_sig_handle_pending called from outside kernel schedule"); /* * Check the array of pending signals: */ for (i = 0; i < NSIG; i++) { if (_thread_sigq[i].pending != 0) { /* This signal is no longer pending. */ _thread_sigq[i].pending = 0; sig = _thread_sigq[i].signo; /* Some signals need special handling: */ thread_sig_handle_special(sig); if (_thread_sigq[i].blocked == 0) { /* * Block future signals until this one * is handled: */ _thread_sigq[i].blocked = 1; if ((pthread = thread_sig_find(sig)) != NULL) { /* * Setup the target thread to receive * the signal: */ thread_sig_add(pthread, sig, /*has_args*/ 1); } } } } } static void thread_sig_handle_special(int sig) { struct pthread *pthread, *pthread_next; int i; switch (sig) { case SIGCHLD: /* * Go through the file list and set all files * to non-blocking again in case the child * set some of them to block. Sigh. */ for (i = 0; i < _thread_dtablesize; i++) { /* Check if this file is used: */ if (_thread_fd_table[i] != NULL) { /* * Set the file descriptor to non-blocking: */ __sys_fcntl(i, F_SETFL, _thread_fd_table[i]->flags | O_NONBLOCK); } } /* * Enter a loop to wake up all threads waiting * for a process to complete: */ for (pthread = TAILQ_FIRST(&_waitingq); pthread != NULL; pthread = pthread_next) { /* * Grab the next thread before possibly * destroying the link entry: */ pthread_next = TAILQ_NEXT(pthread, pqe); /* * If this thread is waiting for a child * process to complete, wake it up: */ if (pthread->state == PS_WAIT_WAIT) { /* Make the thread runnable: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } } break; /* * POSIX says that pending SIGCONT signals are * discarded when one of these signals occurs. */ case SIGTSTP: case SIGTTIN: case SIGTTOU: /* * Enter a loop to discard pending SIGCONT * signals: */ TAILQ_FOREACH(pthread, &_thread_list, tle) { sigdelset(&pthread->sigpend, SIGCONT); } break; default: break; } } /* * Perform thread specific actions in response to a signal. * This function is only called if there is a handler installed * for the signal, and if the target thread has the signal * unmasked. */ static void thread_sig_add(struct pthread *pthread, int sig, int has_args) { int restart; int suppress_handler = 0; int thread_is_active = 0; restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART; /* Make sure this signal isn't still in the pending set: */ sigdelset(&pthread->sigpend, sig); /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: /* * You can't call a signal handler for threads in these * states. */ suppress_handler = 1; break; /* * States which do not need any cleanup handling when signals * occur: */ case PS_RUNNING: /* * Remove the thread from the queue before changing its * priority: */ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) PTHREAD_PRIOQ_REMOVE(pthread); else /* * This thread is running; avoid placing it in * the run queue: */ thread_is_active = 1; break; case PS_SUSPENDED: break; case PS_SPINBLOCK: /* Remove the thread from the workq and waitq: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); /* Make the thread runnable: */ PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGWAIT: /* The signal handler is not called for threads in SIGWAIT. */ suppress_handler = 1; /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* Change the state of the thread to run: */ PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else { /* * Mark the thread as interrupted only if the * restart flag is not set on the signal action: */ if (restart == 0) pthread->interrupted = 1; PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); } break; /* * States which cannot be interrupted but still require the * signal handler to run: */ case PS_COND_WAIT: case PS_MUTEX_WAIT: /* * Remove the thread from the wait queue. It will * be added back to the wait queue once all signal * handlers have been invoked. */ PTHREAD_WAITQ_REMOVE(pthread); break; case PS_JOIN: /* * Remove the thread from the wait queue. It will * be added back to the wait queue once all signal * handlers have been invoked. */ PTHREAD_WAITQ_REMOVE(pthread); /* Make the thread runnable: */ PTHREAD_SET_STATE(pthread, PS_RUNNING); break; /* * States which are interruptible but may need to be removed * from queues before any signal handler is called. * * XXX - We may not need to handle this condition, but will * mark it as a potential problem. */ case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: if (restart == 0) pthread->interrupted = 1; /* * Remove the thread from the wait queue. Our * signal handler hook will remove this thread * from the fd or file queue before invoking * the actual handler. */ PTHREAD_WAITQ_REMOVE(pthread); break; /* * States which are interruptible: */ case PS_FDR_WAIT: case PS_FDW_WAIT: if (restart == 0) { /* * Flag the operation as interrupted and * set the state to running: */ pthread->interrupted = 1; PTHREAD_SET_STATE(pthread, PS_RUNNING); } PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); break; case PS_POLL_WAIT: case PS_SELECT_WAIT: case PS_SLEEP_WAIT: /* * Unmasked signals always cause poll, select, and sleep * to terminate early, regardless of SA_RESTART: */ pthread->interrupted = 1; /* Remove threads in poll and select from the workq: */ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; case PS_SIGSUSPEND: PTHREAD_WAITQ_REMOVE(pthread); PTHREAD_SET_STATE(pthread, PS_RUNNING); break; } if (suppress_handler == 0) { /* Setup a signal frame and save the current threads state: */ thread_sigframe_add(pthread, sig, has_args); /* * Signals are deferred until just before the threads * signal handler is invoked: */ pthread->sig_defer_count = 1; /* Make sure the thread is runnable: */ if (pthread->state != PS_RUNNING) PTHREAD_SET_STATE(pthread, PS_RUNNING); /* * The thread should be removed from all scheduling * queues at this point. Raise the priority and place - * the thread in the run queue. + * the thread in the run queue. It is also possible + * for a signal to be sent to a suspended thread, + * mostly via pthread_kill(). If a thread is suspended, + * don't insert it into the priority queue; just set + * its state to suspended and it will run the signal + * handler when it is resumed. */ pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY; - if (thread_is_active == 0) + if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) + PTHREAD_SET_STATE(pthread, PS_SUSPENDED); + else if (thread_is_active == 0) PTHREAD_PRIOQ_INSERT_TAIL(pthread); } } static void thread_sig_check_state(struct pthread *pthread, int sig) { /* * Process according to thread state: */ switch (pthread->state) { /* * States which do not change when a signal is trapped: */ case PS_DEAD: case PS_DEADLOCK: case PS_STATE_MAX: case PS_SIGTHREAD: case PS_RUNNING: case PS_SUSPENDED: case PS_SPINBLOCK: case PS_COND_WAIT: case PS_JOIN: case PS_MUTEX_WAIT: break; case PS_SIGWAIT: /* Wake up the thread if the signal is blocked. */ if (sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else /* Increment the pending signal count. */ sigaddset(&pthread->sigpend, sig); break; /* * The wait state is a special case due to the handling of * SIGCHLD signals. */ case PS_WAIT_WAIT: if (sig == SIGCHLD) { /* * Remove the thread from the wait queue and * make it runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } break; case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_SIGSUSPEND: case PS_SLEEP_WAIT: /* * Remove the thread from the wait queue and make it * runnable: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; /* * These states are additionally in the work queue: */ case PS_FDR_WAIT: case PS_FDW_WAIT: case PS_FILE_WAIT: case PS_POLL_WAIT: case PS_SELECT_WAIT: /* * Remove the thread from the wait and work queues, and * make it runnable: */ PTHREAD_WORKQ_REMOVE(pthread); PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Flag the operation as interrupted: */ pthread->interrupted = 1; break; } } /* * Send a signal to a specific thread (ala pthread_kill): */ void _thread_sig_send(struct pthread *pthread, int sig) { struct pthread *curthread = _get_curthread(); /* Check for signals whose actions are SIG_DFL: */ if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) { /* * Check to see if a temporary signal handler is * installed for sigwaiters: */ if (_thread_dfl_count[sig] == 0) /* * Deliver the signal to the process if a handler * is not installed: */ kill(getpid(), sig); /* * Assuming we're still running after the above kill(), * make any necessary state changes to the thread: */ thread_sig_check_state(pthread, sig); } /* * Check that the signal is not being ignored: */ else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) { if (pthread->state == PS_SIGWAIT && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ PTHREAD_NEW_STATE(pthread, PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; } else if (sigismember(&pthread->sigmask, sig)) /* Add the signal to the pending set: */ sigaddset(&pthread->sigpend, sig); else if (pthread == curthread) /* Call the signal handler for the current thread: */ thread_sig_invoke_handler(sig, NULL, NULL); else { /* Protect the scheduling queues: */ _thread_kern_sig_defer(); /* * Perform any state changes due to signal * arrival: */ thread_sig_add(pthread, sig, /* has args */ 0); /* Unprotect the scheduling queues: */ _thread_kern_sig_undefer(); } } } /* * User thread signal handler wrapper. * * thread - current running thread */ void _thread_sig_wrapper(void) { struct pthread_signal_frame *psf; struct pthread *thread = _get_curthread(); /* Get the current frame and state: */ psf = thread->curframe; thread->curframe = NULL; PTHREAD_ASSERT(psf != NULL, "Invalid signal frame in signal handler"); /* * We're coming from the kernel scheduler; clear the in * scheduler flag: */ _thread_kern_in_sched = 0; /* Check the threads previous state: */ if (psf->saved_state.psd_state != PS_RUNNING) { /* * Do a little cleanup handling for those threads in * queues before calling the signal handler. Signals * for these threads are temporarily blocked until * after cleanup handling. */ switch (psf->saved_state.psd_state) { case PS_FDLR_WAIT: case PS_FDLW_WAIT: _fd_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; case PS_COND_WAIT: _cond_wait_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; case PS_MUTEX_WAIT: _mutex_lock_backout(thread); psf->saved_state.psd_state = PS_RUNNING; break; default: break; } } /* Unblock the signal in case we don't return from the handler: */ _thread_sigq[psf->signo - 1].blocked = 0; /* * Lower the priority before calling the handler in case * it never returns (longjmps back): */ thread->active_priority &= ~PTHREAD_SIGNAL_PRIORITY; /* * Reenable interruptions without checking for the need to * context switch: */ thread->sig_defer_count = 0; /* * Dispatch the signal via the custom signal handler: */ if (psf->sig_has_args == 0) thread_sig_invoke_handler(psf->signo, NULL, NULL); else thread_sig_invoke_handler(psf->signo, &psf->siginfo, &psf->uc); /* * Call the kernel scheduler to safely restore the frame and * schedule the next thread: */ _thread_kern_sched_frame(psf); } static void thread_sigframe_add(struct pthread *thread, int sig, int has_args) { struct pthread_signal_frame *psf = NULL; unsigned long stackp; /* Get the top of the threads stack: */ stackp = GET_STACK_JB(thread->ctx.jb); /* * Leave a little space on the stack and round down to the * nearest aligned word: */ stackp -= sizeof(double); stackp &= ~0x3UL; /* Allocate room on top of the stack for a new signal frame: */ stackp -= sizeof(struct pthread_signal_frame); psf = (struct pthread_signal_frame *) stackp; /* Save the current context in the signal frame: */ thread_sigframe_save(thread, psf); /* Set handler specific information: */ psf->sig_has_args = has_args; psf->signo = sig; if (has_args) { /* Copy the signal handler arguments to the signal frame: */ memcpy(&psf->uc, &_thread_sigq[psf->signo - 1].uc, sizeof(psf->uc)); memcpy(&psf->siginfo, &_thread_sigq[psf->signo - 1].siginfo, sizeof(psf->siginfo)); } /* Setup the signal mask: */ SIGSETOR(thread->sigmask, _thread_sigact[sig - 1].sa_mask); sigaddset(&thread->sigmask, sig); /* Set up the new frame: */ thread->curframe = psf; thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE | PTHREAD_FLAGS_IN_SYNCQ; /* * Set up the context: */ stackp -= sizeof(double); _setjmp(thread->ctx.jb); SET_STACK_JB(thread->ctx.jb, stackp); SET_RETURN_ADDR_JB(thread->ctx.jb, _thread_sig_wrapper); } void _thread_sigframe_restore(struct pthread *thread, struct pthread_signal_frame *psf) { memcpy(&thread->ctx, &psf->ctx, sizeof(thread->ctx)); /* * Only restore the signal mask if it hasn't been changed * by the application during invocation of the signal handler: */ if (thread->sigmask_seqno == psf->saved_state.psd_sigmask_seqno) thread->sigmask = psf->saved_state.psd_sigmask; thread->curframe = psf->saved_state.psd_curframe; thread->wakeup_time = psf->saved_state.psd_wakeup_time; thread->data = psf->saved_state.psd_wait_data; thread->state = psf->saved_state.psd_state; thread->flags = psf->saved_state.psd_flags; thread->interrupted = psf->saved_state.psd_interrupted; thread->signo = psf->saved_state.psd_signo; thread->sig_defer_count = psf->saved_state.psd_sig_defer_count; } static void thread_sigframe_save(struct pthread *thread, struct pthread_signal_frame *psf) { memcpy(&psf->ctx, &thread->ctx, sizeof(thread->ctx)); psf->saved_state.psd_sigmask = thread->sigmask; psf->saved_state.psd_curframe = thread->curframe; psf->saved_state.psd_wakeup_time = thread->wakeup_time; psf->saved_state.psd_wait_data = thread->data; psf->saved_state.psd_state = thread->state; psf->saved_state.psd_flags = thread->flags & (PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE); psf->saved_state.psd_interrupted = thread->interrupted; psf->saved_state.psd_sigmask_seqno = thread->sigmask_seqno; psf->saved_state.psd_signo = thread->signo; psf->saved_state.psd_sig_defer_count = thread->sig_defer_count; } diff --git a/lib/libpthread/thread/thr_single_np.c b/lib/libpthread/thread/thr_single_np.c index 85471b8cf5c1..1ee5e7918bd9 100644 --- a/lib/libpthread/thread/thr_single_np.c +++ b/lib/libpthread/thread/thr_single_np.c @@ -1,47 +1,49 @@ /* * Copyright (c) 1996 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ -#include #include -#include "pthread_private.h" +#include __weak_reference(_pthread_single_np, pthread_single_np); int _pthread_single_np() { - struct pthread *curthread = _get_curthread(); /* Enter single-threaded (non-POSIX) scheduling mode: */ - _thread_single = curthread; - return(0); + pthread_suspend_all_np(); + /* + * XXX - Do we want to do this? + * __is_threaded = 0; + */ + return (0); } diff --git a/lib/libpthread/thread/thr_spinlock.c b/lib/libpthread/thread/thr_spinlock.c index 73337094d431..e05aa4a5fc0b 100644 --- a/lib/libpthread/thread/thr_spinlock.c +++ b/lib/libpthread/thread/thr_spinlock.c @@ -1,111 +1,111 @@ /* * Copyright (c) 1997 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include #include #include #include #include #include #include #include "pthread_private.h" /* * Lock a location for the running thread. Yield to allow other * threads to run if this thread is blocked because the lock is * not available. Note that this function does not sleep. It * assumes that the lock will be available very soon. */ void _spinlock(spinlock_t *lck) { struct pthread *curthread = _get_curthread(); /* * Try to grab the lock and loop if another thread grabs * it before we do. */ while(_atomic_lock(&lck->access_lock)) { /* Block the thread until the lock. */ curthread->data.spinlock = lck; _thread_kern_sched_state(PS_SPINBLOCK, __FILE__, __LINE__); } /* The running thread now owns the lock: */ lck->lock_owner = (long) curthread; } /* * Lock a location for the running thread. Yield to allow other * threads to run if this thread is blocked because the lock is * not available. Note that this function does not sleep. It * assumes that the lock will be available very soon. * * This function checks if the running thread has already locked the * location, warns if this occurs and creates a thread dump before * returning. */ void _spinlock_debug(spinlock_t *lck, char *fname, int lineno) { struct pthread *curthread = _get_curthread(); int cnt = 0; /* * Try to grab the lock and loop if another thread grabs * it before we do. */ while(_atomic_lock(&lck->access_lock)) { cnt++; if (cnt > 100) { char str[256]; - snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", _getprogname(), curthread, lck, fname, lineno, lck->fname, lck->lineno); + snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", getprogname(), curthread, lck, fname, lineno, lck->fname, lck->lineno); __sys_write(2,str,strlen(str)); __sleep(1); cnt = 0; } /* Block the thread until the lock. */ curthread->data.spinlock = lck; _thread_kern_sched_state(PS_SPINBLOCK, fname, lineno); } /* The running thread now owns the lock: */ lck->lock_owner = (long) curthread; lck->fname = fname; lck->lineno = lineno; } diff --git a/lib/libpthread/thread/thr_suspend_np.c b/lib/libpthread/thread/thr_suspend_np.c index 0e272ff11d55..952baa350ec5 100644 --- a/lib/libpthread/thread/thr_suspend_np.c +++ b/lib/libpthread/thread/thr_suspend_np.c @@ -1,161 +1,104 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "pthread_private.h" -static void finish_suspension(void *arg); +static void suspend_common(struct pthread *thread); __weak_reference(_pthread_suspend_np, pthread_suspend_np); +__weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np); /* Suspend a thread: */ int _pthread_suspend_np(pthread_t thread) { int ret; + /* Suspending the current thread doesn't make sense. */ + if (thread == _get_curthread()) + ret = EDEADLK; + /* Find the thread in the list of active threads: */ - if ((ret = _find_thread(thread)) == 0) { + else if ((ret = _find_thread(thread)) == 0) { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); - switch (thread->state) { - case PS_RUNNING: - /* - * Remove the thread from the priority queue and - * set the state to suspended: - */ - PTHREAD_PRIOQ_REMOVE(thread); - PTHREAD_SET_STATE(thread, PS_SUSPENDED); - break; - - case PS_SPINBLOCK: - case PS_FDR_WAIT: - case PS_FDW_WAIT: - case PS_POLL_WAIT: - case PS_SELECT_WAIT: - /* - * Remove these threads from the work queue - * and mark the operation as interrupted: - */ - if ((thread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0) - PTHREAD_WORKQ_REMOVE(thread); - _thread_seterrno(thread,EINTR); - - /* FALLTHROUGH */ - case PS_SLEEP_WAIT: - thread->interrupted = 1; - - /* FALLTHROUGH */ - case PS_SIGTHREAD: - case PS_WAIT_WAIT: - case PS_SIGSUSPEND: - case PS_SIGWAIT: - /* - * Remove these threads from the waiting queue and - * set their state to suspended: - */ - PTHREAD_WAITQ_REMOVE(thread); - PTHREAD_SET_STATE(thread, PS_SUSPENDED); - break; - - case PS_MUTEX_WAIT: - /* Mark the thread as suspended and still in a queue. */ - thread->suspended = SUSP_MUTEX_WAIT; - - PTHREAD_SET_STATE(thread, PS_SUSPENDED); - break; - case PS_COND_WAIT: - /* Mark the thread as suspended and still in a queue. */ - thread->suspended = SUSP_COND_WAIT; - - PTHREAD_SET_STATE(thread, PS_SUSPENDED); - break; - case PS_JOIN: - /* Mark the thread as suspended and joining: */ - thread->suspended = SUSP_JOIN; - - PTHREAD_NEW_STATE(thread, PS_SUSPENDED); - break; - case PS_FDLR_WAIT: - case PS_FDLW_WAIT: - case PS_FILE_WAIT: - /* Mark the thread as suspended: */ - thread->suspended = SUSP_YES; - - /* - * Threads in these states may be in queues. - * In order to preserve queue integrity, the - * cancelled thread must remove itself from the - * queue. Mark the thread as interrupted and - * set the state to running. When the thread - * resumes, it will remove itself from the queue - * and call the suspension completion routine. - */ - thread->interrupted = 1; - _thread_seterrno(thread, EINTR); - PTHREAD_NEW_STATE(thread, PS_RUNNING); - thread->continuation = finish_suspension; - break; - - case PS_DEAD: - case PS_DEADLOCK: - case PS_STATE_MAX: - case PS_SUSPENDED: - /* Nothing needs to be done: */ - break; - } + suspend_common(thread); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } - return(ret); + return (ret); } -static void -finish_suspension(void *arg) +void +_pthread_suspend_all_np(void) { struct pthread *curthread = _get_curthread(); + struct pthread *thread; - if (curthread->suspended != SUSP_NO) - _thread_kern_sched_state(PS_SUSPENDED, __FILE__, __LINE__); -} + /* + * Defer signals to protect the scheduling queues from + * access by the signal handler: + */ + _thread_kern_sig_defer(); + + TAILQ_FOREACH(thread, &_thread_list, tle) { + if (thread != curthread) + suspend_common(thread); + } + /* + * Undefer and handle pending signals, yielding if + * necessary: + */ + _thread_kern_sig_undefer(); +} +void +suspend_common(struct pthread *thread) +{ + thread->flags |= PTHREAD_FLAGS_SUSPENDED; + if (thread->flags & PTHREAD_FLAGS_IN_PRIOQ) { + PTHREAD_PRIOQ_REMOVE(thread); + PTHREAD_SET_STATE(thread, PS_SUSPENDED); + } +}