Index: head/lib/libkse/Makefile =================================================================== --- head/lib/libkse/Makefile (revision 113660) +++ head/lib/libkse/Makefile (revision 113661) @@ -1,32 +1,32 @@ # $FreeBSD$ # # All library objects contain FreeBSD revision strings by default; they may be # excluded as a space-saving measure. To produce a library that does # not contain these strings, add -DSTRIP_FBSDID (see ) to CFLAGS # below. Note, there are no IDs for syscall stubs whose sources are generated. # To included legacy CSRG sccsid strings, add -DLIBC_SCCS and -DSYSLIBC_SCCS # (for system call stubs) to CFLAGS below. -DSYSLIBC_SCCS affects just the # system call stubs. LIB=kse SHLIB_MAJOR= 1 CFLAGS+=-DPTHREAD_KERNEL CFLAGS+=-I${.CURDIR}/../libc/include -I${.CURDIR}/thread \ -I${.CURDIR}/../../include CFLAGS+=-I${.CURDIR}/arch/${MACHINE_ARCH}/include CFLAGS+=-I${.CURDIR}/sys # Uncomment this if you want libpthread to contain debug information for # thread locking. #CFLAGS+=-D_LOCK_DEBUG -g # enable extra internal consistancy checks CFLAGS+=-D_PTHREADS_INVARIANTS -Wall AINC= -I${.CURDIR}/../libc/${MACHINE_ARCH} -I${.CURDIR}/thread PRECIOUSLIB= yes -.include "${.CURDIR}/man/Makefile.inc" +#.include "${.CURDIR}/man/Makefile.inc" .include "${.CURDIR}/thread/Makefile.inc" .include "${.CURDIR}/sys/Makefile.inc" .include Index: head/lib/libkse/arch/i386/i386/thr_getcontext.S =================================================================== --- head/lib/libkse/arch/i386/i386/thr_getcontext.S (revision 113660) +++ head/lib/libkse/arch/i386/i386/thr_getcontext.S (revision 113661) @@ -1,157 +1,157 @@ -/* +/*- * Copyright (c) 2001 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Where do we define these? */ #define UC_MC_OFFSET 16 /* offset to mcontext from ucontext */ #define MC_LEN_OFFSET 80 /* offset to mc_len from mcontext */ #define MC_LEN 640 /* mc_len */ #define MC_FPFMT_OFFSET 84 #define MC_FPFMT_NODEV 0x10000 #define MC_FPFMT_387 0x10001 #define MC_FPFMT_XMM 0x10002 #define MC_OWNEDFP_OFFSET 88 #define MC_OWNEDFP_NONE 0x20000 #define MC_OWNEDFP_FPU 0x20001 #define MC_OWNEDFP_PCB 0x20002 #define MC_FPREGS_OFFSET 96 /* offset to FP regs from mcontext */ #define MC_FP_CW_OFFSET 96 /* offset to FP control word */ /* * int thr_setcontext(ucontext_t *ucp) * * Restores the context in ucp. * * Returns 0 if there are no errors; -1 otherwise */ .weak CNAME(_thr_setcontext) .set CNAME(_thr_setcontext),CNAME(__thr_setcontext) ENTRY(__thr_setcontext) movl 4(%esp), %eax /* get address of context and sigset */ cmpl $0, %eax /* check for null pointer */ jne 1f movl $-1, %eax jmp 7f 1: addl $UC_MC_OFFSET, %eax /* add offset to mcontext */ cmpl $MC_LEN, MC_LEN_OFFSET(%eax) /* is context valid? */ je 2f movl $-1, %eax /* bzzzt, invalid context */ jmp 7f /*2: movl 4(%edx), %gs*/ /* we don't touch %gs */ 2: movl 8(%edx), %fs movl 12(%edx), %es movl 16(%edx), %ds movl 76(%edx), %ss movl 20(%edx), %edi movl 24(%edx), %esi movl 28(%edx), %ebp movl %esp, %ecx /* save current stack in ecx */ movl 72(%edx), %esp /* switch to context defined stack */ movl 60(%edx), %eax /* put return address at top of stack */ pushl %eax movl 44(%edx), %eax /* get ecx from context, */ pushl %eax /* push on top of stack */ movl 48(%edx), %eax /* get eax from context, */ pushl %eax /* push on top of stack */ /* * if (mc_fpowned == MC_OWNEDFP_FPU || mc_fpowned == MC_OWNEDFP_PCB) { * if (mc_fpformat == MC_FPFMT_387) * restore 387 FP register format * else if (mc_fpformat == MC_FPFMT_XMM) * restore XMM/SSE FP register format * } */ cmpl $MC_OWNEDFP_FPU, MC_OWNEDFP_OFFSET(%edx) je 3f cmpl $MC_OWNEDFP_PCB, MC_OWNEDFP_OFFSET(%edx) jne 5f 3: cmpl $MC_FPFMT_387, MC_FPFMT_OFFSET(%edx) jne 5f frstor MC_FPREGS_OFFSET(%edx) /* restore 387 FP regs */ jmp 5f 4: cmpl $MC_FPFMT_XMM, MC_FPFMT_OFFSET(%edx) jne 5f fxrstor MC_FPREGS_OFFSET(%edx) /* restore XMM FP regs */ jmp 6f 5: fninit fldcw MC_FP_CW_OFFSET(%edx) 6: pushl 68(%edx) /* restore flags register */ popf movl 36(%edx), %ebx /* restore ebx and edx */ movl 40(%edx), %edx popl %eax /* restore eax and ecx last */ popl %ecx 7: ret /* * int thr_getcontext(ucontext_t *ucp); * * Returns 0 if there are no errors; -1 otherwise */ .weak CNAME(_thr_getcontext) .set CNAME(_thr_getcontext),CNAME(__thr_getcontext) ENTRY(__thr_getcontext) movl 4(%esp), %eax /* get address of context */ cmpl $0, %eax /* check for null pointer */ jne 1f movl $-1, %eax jmp 2f movl 4(%esp), %eax /* get address of context and sigset */ 1: pushl %edx /* save value of edx */ movl 8(%esp), %edx /* get address of context */ addl $UC_MC_OFFSET, %edx /* add offset to mcontext */ /*movl %gs, 4(%edx)*/ /* we don't touch %gs */ movl %fs, 8(%edx) movl %es, 12(%edx) movl %ds, 16(%edx) movl %ss, 76(%edx) movl %edi, 20(%edx) movl %esi, 24(%edx) movl %ebp, 28(%edx) movl %ebx, 36(%edx) movl $0, 48(%edx) /* store successful return in eax */ popl %eax /* get saved value of edx */ movl %eax, 40(%edx) /* save edx */ movl %ecx, 44(%edx) movl (%esp), %eax /* get return address */ movl %eax, 60(%edx) /* save return address */ fnstcw MC_FP_CW_OFFSET(%edx) movl $MC_LEN, MC_LEN_OFFSET(%edx) movl $MC_FPFMT_NODEV, MC_FPFMT_OFFSET(%edx) /* no FP */ movl $MC_OWNEDFP_NONE, MC_OWNEDFP_OFFSET(%edx) /* no FP */ pushfl popl %eax /* get eflags */ movl %eax, 68(%edx) /* store eflags */ movl %esp, %eax /* setcontext pushes the return */ addl $4, %eax /* address onto the top of the */ movl %eax, 72(%edx) /* stack; account for this */ movl 40(%edx), %edx /* restore edx -- is this needed? */ xorl %eax, %eax /* return 0 */ 2: ret Index: head/lib/libkse/arch/i386/include/atomic_ops.h =================================================================== --- head/lib/libkse/arch/i386/include/atomic_ops.h (revision 113660) +++ head/lib/libkse/arch/i386/include/atomic_ops.h (revision 113661) @@ -1,51 +1,51 @@ -/* +/*- * Copyright (c) 2001 Daniel Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _ATOMIC_OPS_H_ #define _ATOMIC_OPS_H_ /* * Atomic swap: * Atomic (tmp = *dst, *dst = val), then *res = tmp * * void atomic_swap_long(long *dst, long val, long *res); */ static inline void atomic_swap_long(long *dst, long val, long *res) { __asm __volatile( "xchgl %2, %1; movl %2, %0" : "=m" (*res) : "m" (*dst), "r" (val) : "memory"); } #define atomic_swap_int(d, v, r) \ atomic_swap_long((long *)(d), (long)(v), (long *)(r)) #define atomic_swap_ptr atomic_swap_int #endif Index: head/lib/libkse/arch/i386/include/pthread_md.h =================================================================== --- head/lib/libkse/arch/i386/include/pthread_md.h (revision 113660) +++ head/lib/libkse/arch/i386/include/pthread_md.h (revision 113661) @@ -1,54 +1,51 @@ /*- * Copyright (c) 2002 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions for the thread kernel. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ -#include #include #include -extern int _thread_enter_uts(struct kse_thr_mailbox *, struct kse_mailbox *); -extern int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **); extern int _thr_setcontext(ucontext_t *); extern int _thr_getcontext(ucontext_t *); /* * These are needed to ensure an application doesn't attempt to jump * between stacks of different threads. They return the stack of * jmp_buf, sigjmp_buf, and ucontext respectively. */ #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2])) #define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp)) #define THR_GETCONTEXT(ucp) _thr_getcontext(ucp) #define THR_SETCONTEXT(ucp) _thr_setcontext(ucp) #endif Index: head/lib/libkse/sys/lock.c =================================================================== --- head/lib/libkse/sys/lock.c (revision 113660) +++ head/lib/libkse/sys/lock.c (revision 113661) @@ -1,268 +1,268 @@ /*- - * Copyright (c) 2001 Daniel Eischen . + * Copyright (c) 2001, 2003 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "atomic_ops.h" #include "lock.h" #define LCK_ASSERT assert #define MAX_SPINS 500 void _lock_destroy(struct lock *lck) { if ((lck != NULL) && (lck->l_head != NULL)) { free(lck->l_head); lck->l_head = NULL; lck->l_tail = NULL; } } int _lock_init(struct lock *lck, enum lock_type ltype, lock_handler_t *waitfunc, lock_handler_t *wakeupfunc) { if (lck == NULL) return (-1); else if ((lck->l_head = malloc(sizeof(struct lockreq))) == NULL) return (-1); else { lck->l_type = ltype; lck->l_wait = waitfunc; lck->l_wakeup = wakeupfunc; lck->l_head->lr_locked = 0; lck->l_head->lr_watcher = NULL; lck->l_head->lr_owner = NULL; lck->l_head->lr_waiting = 0; lck->l_tail = lck->l_head; } return (0); } int _lockuser_init(struct lockuser *lu, void *priv) { if (lu == NULL) return (-1); else if ((lu->lu_myreq == NULL) && ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL)) return (-1); else { lu->lu_myreq->lr_locked = 1; lu->lu_myreq->lr_watcher = NULL; lu->lu_myreq->lr_owner = lu; lu->lu_myreq->lr_waiting = 0; lu->lu_watchreq = NULL; lu->lu_priority = 0; lu->lu_private = priv; lu->lu_private2 = NULL; } return (0); } void _lockuser_destroy(struct lockuser *lu) { if ((lu != NULL) && (lu->lu_myreq != NULL)) free(lu->lu_myreq); } /* * Acquire a lock waiting (spin or sleep) for it to become available. */ void _lock_acquire(struct lock *lck, struct lockuser *lu, int prio) { int i; /** * XXX - We probably want to remove these checks to optimize * performance. It is also a bug if any one of the * checks fail, so it's probably better to just let it * SEGV and fix it. */ #if 0 if (lck == NULL || lu == NULL || lck->l_head == NULL) return; #endif if ((lck->l_type & LCK_PRIORITY) == 0) atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq); else { LCK_ASSERT(lu->lu_myreq->lr_locked == 1); LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL); LCK_ASSERT(lu->lu_myreq->lr_owner == lu); LCK_ASSERT(lu->lu_myreq->lr_waiting == 0); LCK_ASSERT(lu->lu_watchreq == NULL); lu->lu_priority = prio; /* * Atomically swap the head of the lock request with * this request. */ atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq); } if (lu->lu_watchreq->lr_locked != 0) { atomic_store_rel_ptr(&lu->lu_watchreq->lr_watcher, lu); if ((lck->l_wait == NULL) || ((lck->l_type & LCK_ADAPTIVE) == 0)) { while (lu->lu_watchreq->lr_locked == 0) ; /* spin, then yield? */ } else { /* * Spin for a bit before invoking the wait function. * * We should be a little smarter here. If we're * running on a single processor, then the lock * owner got preempted and spinning will accomplish * nothing but waste time. If we're running on * multiple processors, the owner could be running * on another CPU and we might acquire the lock if * we spin for a bit. * * The other thing to keep in mind is that threads * acquiring these locks are considered to be in * critical regions; they will not be preempted by * the _UTS_ until they release the lock. It is * therefore safe to assume that if a lock can't * be acquired, it is currently held by a thread * running in another KSE. */ for (i = 0; i < MAX_SPINS; i++) { if (lu->lu_watchreq->lr_locked == 0) return; } atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 1); while (lu->lu_watchreq->lr_locked != 0) lck->l_wait(lck, lu); atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 0); } } } /* * Release a lock. */ void _lock_release(struct lock *lck, struct lockuser *lu) { struct lockuser *lu_tmp, *lu_h; struct lockreq *myreq; int prio_h; /** * XXX - We probably want to remove these checks to optimize * performance. It is also a bug if any one of the * checks fail, so it's probably better to just let it * SEGV and fix it. */ #if 0 if ((lck == NULL) || (lu == NULL)) return; #endif if ((lck->l_type & LCK_PRIORITY) != 0) { prio_h = 0; lu_h = NULL; /* Update tail if our request is last. */ if (lu->lu_watchreq->lr_owner == NULL) { atomic_store_rel_ptr(&lck->l_tail, lu->lu_myreq); atomic_store_rel_ptr(&lu->lu_myreq->lr_owner, NULL); } else { /* Remove ourselves from the list. */ atomic_store_rel_ptr(&lu->lu_myreq->lr_owner, lu->lu_watchreq->lr_owner); atomic_store_rel_ptr( &lu->lu_watchreq->lr_owner->lu_myreq, lu->lu_myreq); } /* * The watch request now becomes our own because we've * traded away our previous request. Save our previous * request so that we can grant the lock. */ myreq = lu->lu_myreq; lu->lu_myreq = lu->lu_watchreq; lu->lu_watchreq = NULL; lu->lu_myreq->lr_locked = 1; lu->lu_myreq->lr_owner = lu; lu->lu_myreq->lr_watcher = NULL; lu->lu_myreq->lr_waiting = 0; /* * Traverse the list of lock requests in reverse order * looking for the user with the highest priority. */ for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL; lu_tmp = lu_tmp->lu_myreq->lr_watcher) { if (lu_tmp->lu_priority > prio_h) { lu_h = lu_tmp; prio_h = lu_tmp->lu_priority; } } if (lu_h != NULL) { /* Give the lock to the highest priority user. */ atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0); if ((lu_h->lu_watchreq->lr_waiting != 0) && (lck->l_wakeup != NULL)) /* Notify the sleeper */ lck->l_wakeup(lck, lu_h->lu_myreq->lr_watcher); } else { /* Give the lock to the previous request. */ atomic_store_rel_long(&myreq->lr_locked, 0); if ((myreq->lr_waiting != 0) && (lck->l_wakeup != NULL)) /* Notify the sleeper */ lck->l_wakeup(lck, myreq->lr_watcher); } } else { /* * The watch request now becomes our own because we've * traded away our previous request. Save our previous * request so that we can grant the lock. */ myreq = lu->lu_myreq; lu->lu_myreq = lu->lu_watchreq; lu->lu_watchreq = NULL; lu->lu_myreq->lr_locked = 1; lu->lu_myreq->lr_waiting = 0; /* Give the lock to the previous request. */ atomic_store_rel_long(&myreq->lr_locked, 0); if ((myreq->lr_waiting != 0) && (lck->l_wakeup != NULL)) /* Notify the sleeper */ lck->l_wakeup(lck, myreq->lr_watcher); } } Index: head/lib/libkse/sys/lock.h =================================================================== --- head/lib/libkse/sys/lock.h (revision 113660) +++ head/lib/libkse/sys/lock.h (revision 113661) @@ -1,89 +1,89 @@ /* - * Copyright (c) 2001 Daniel Eischen . + * Copyright (c) 2001, 2003 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LOCK_H_ #define _LOCK_H_ struct lockreq; struct lockuser; struct lock; enum lock_type { LCK_DEFAULT = 0x0000, /* default is FIFO spin locks */ LCK_PRIORITY = 0x0001, LCK_ADAPTIVE = 0x0002 /* call user-supplied handlers */ }; typedef void lock_handler_t(struct lock *, struct lockuser *); struct lock { struct lockreq *l_head; struct lockreq *l_tail; /* only used for priority locks */ enum lock_type l_type; lock_handler_t *l_wait; /* only used for adaptive locks */ lock_handler_t *l_wakeup; /* only used for adaptive locks */ }; /* Try to make this >= CACHELINESIZE */ struct lockreq { volatile long lr_locked; /* lock granted = 0, busy otherwise */ struct lockuser *lr_watcher; /* only used for priority locks */ struct lockuser *lr_owner; /* only used for priority locks */ long lr_waiting; /* non-zero when wakeup needed */ }; struct lockuser { struct lockreq *lu_myreq; /* request to give up/trade */ struct lockreq *lu_watchreq; /* watch this request */ int lu_priority; /* only used for priority locks */ void *lu_private1; /* private{1,2} are initialized to */ void *lu_private2; /* NULL and can be used by caller */ #define lu_private lu_private1 }; #define _LCK_INITIALIZER(lck_req) { &lck_req, NULL, LCK_DEFAULT, \ NULL, NULL } #define _LCK_REQUEST_INITIALIZER { 0, NULL, NULL, 0 } #define _LCK_BUSY(lu) ((lu)->lu_watchreq->lr_locked != 0) #define _LCK_GRANTED(lu) ((lu)->lu_watchreq->lr_locked == 0) #define _LCK_SET_PRIVATE(lu, p) (lu)->lu_private = (void *)(p) #define _LCK_GET_PRIVATE(lu) (lu)->lu_private #define _LCK_SET_PRIVATE2(lu, p) (lu)->lu_private2 = (void *)(p) #define _LCK_GET_PRIVATE2(lu) (lu)->lu_private2 void _lock_destroy(struct lock *); int _lock_init(struct lock *, enum lock_type, lock_handler_t *, lock_handler_t *); int _lockuser_init(struct lockuser *lu, void *priv); void _lockuser_destroy(struct lockuser *lu); void _lock_acquire(struct lock *, struct lockuser *, int); void _lock_release(struct lock *, struct lockuser *); #endif Index: head/lib/libkse/thread/thr_create.c =================================================================== --- head/lib/libkse/thread/thr_create.c (revision 113660) +++ head/lib/libkse/thread/thr_create.c (revision 113661) @@ -1,311 +1,312 @@ /* + * Copyright (c) 2003 Daniel M. Eischen * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include "thr_private.h" #include "libc_private.h" static u_int64_t next_uniqueid = 1; #define OFF(f) offsetof(struct pthread, f) int _thread_next_offset = OFF(tle.tqe_next); int _thread_uniqueid_offset = OFF(uniqueid); int _thread_state_offset = OFF(state); int _thread_name_offset = OFF(name); int _thread_ctx_offset = OFF(tmbx.tm_context); #undef OFF int _thread_PS_RUNNING_value = PS_RUNNING; int _thread_PS_DEAD_value = PS_DEAD; static int create_stack(struct pthread_attr *pattr); static void thread_start(struct pthread *curthread, void *(*start_routine) (void *), void *arg); __weak_reference(_pthread_create, pthread_create); /* * Some notes on new thread creation and first time initializion * to enable multi-threading. * * There are basically two things that need to be done. * * 1) The internal library variables must be initialized. * 2) Upcalls need to be enabled to allow multiple threads * to be run. * * The first may be done as a result of other pthread functions * being called. When _thr_initial is null, _libpthread_init is * called to initialize the internal variables; this also creates * or sets the initial thread. It'd be nice to automatically * have _libpthread_init called on program execution so we don't * have to have checks throughout the library. * * The second part is only triggered by the creation of the first * thread (other than the initial/main thread). If the thread * being created is a scope system thread, then a new KSE/KSEG * pair needs to be allocated. Also, if upcalls haven't been * enabled on the initial thread's KSE, they must be now that * there is more than one thread; this could be delayed until * the initial KSEG has more than one thread. */ int _pthread_create(pthread_t * thread, const pthread_attr_t * attr, void *(*start_routine) (void *), void *arg) { struct kse *curkse; struct pthread *curthread, *new_thread; struct kse *kse = NULL; struct kse_group *kseg = NULL; kse_critical_t crit; int i; int ret = 0; if (_thr_initial == NULL) _libpthread_init(NULL); crit = _kse_critical_enter(); curthread = _get_curthread(); curkse = curthread->kse; /* Allocate memory for the thread structure: */ - if ((new_thread = _thr_alloc(curkse)) == NULL) { + if ((new_thread = _thr_alloc(curthread)) == NULL) { /* Insufficient memory to create a thread: */ ret = EAGAIN; } else { /* Initialize the thread structure: */ memset(new_thread, 0, sizeof(struct pthread)); /* Check if default thread attributes are required: */ if (attr == NULL || *attr == NULL) /* Use the default thread attributes: */ new_thread->attr = _pthread_attr_default; else new_thread->attr = *(*attr); if (create_stack(&new_thread->attr) != 0) { /* Insufficient memory to create a stack: */ ret = EAGAIN; - _thr_free(curkse, new_thread); + _thr_free(curthread, new_thread); } else if (((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) && - (((kse = _kse_alloc(curkse)) == NULL) - || ((kseg = _kseg_alloc(curkse)) == NULL))) { + (((kse = _kse_alloc(curthread)) == NULL) + || ((kseg = _kseg_alloc(curthread)) == NULL))) { /* Insufficient memory to create a new KSE/KSEG: */ ret = EAGAIN; if (kse != NULL) - _kse_free(curkse, kse); + _kse_free(curthread, kse); if ((new_thread->attr.flags & THR_STACK_USER) == 0) { KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); _thr_stack_free(&new_thread->attr); KSE_LOCK_RELEASE(curkse, &_thread_list_lock); } - _thr_free(curkse, new_thread); + _thr_free(curthread, new_thread); } else { if (kseg != NULL) { /* Add the KSE to the KSEG's list of KSEs. */ TAILQ_INSERT_HEAD(&kseg->kg_kseq, kse, k_qe); kse->k_kseg = kseg; kse->k_schedq = &kseg->kg_schedq; } /* * Write a magic value to the thread structure * to help identify valid ones: */ new_thread->magic = THR_MAGIC; new_thread->slice_usec = -1; new_thread->start_routine = start_routine; new_thread->arg = arg; new_thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; /* Initialize the thread for signals: */ new_thread->sigmask = curthread->sigmask; /* No thread is wanting to join to this one: */ new_thread->joiner = NULL; /* Initialize the signal frame: */ new_thread->curframe = NULL; /* Initialize the machine context: */ THR_GETCONTEXT(&new_thread->tmbx.tm_context); new_thread->tmbx.tm_udata = new_thread; new_thread->tmbx.tm_context.uc_sigmask = new_thread->sigmask; new_thread->tmbx.tm_context.uc_stack.ss_size = new_thread->attr.stacksize_attr; new_thread->tmbx.tm_context.uc_stack.ss_sp = new_thread->attr.stackaddr_attr; makecontext(&new_thread->tmbx.tm_context, (void (*)(void))thread_start, 4, new_thread, start_routine, arg); /* * Check if this thread is to inherit the scheduling * attributes from its parent: */ if ((new_thread->attr.flags & PTHREAD_INHERIT_SCHED) != 0) { /* Copy the scheduling attributes: */ new_thread->base_priority = curthread->base_priority & ~THR_SIGNAL_PRIORITY; new_thread->attr.prio = curthread->base_priority & ~THR_SIGNAL_PRIORITY; new_thread->attr.sched_policy = curthread->attr.sched_policy; } else { /* * Use just the thread priority, leaving the * other scheduling attributes as their * default values: */ new_thread->base_priority = new_thread->attr.prio; } new_thread->active_priority = new_thread->base_priority; new_thread->inherited_priority = 0; /* Initialize the mutex queue: */ TAILQ_INIT(&new_thread->mutexq); /* Initialize thread locking. */ if (_lock_init(&new_thread->lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize thread lock"); for (i = 0; i < MAX_THR_LOCKLEVEL; i++) { _lockuser_init(&new_thread->lockusers[i], (void *)new_thread); _LCK_SET_PRIVATE2(&new_thread->lockusers[i], (void *)new_thread); } /* Initialise hooks in the thread structure: */ new_thread->specific = NULL; new_thread->cleanup = NULL; new_thread->flags = 0; new_thread->continuation = NULL; if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) new_thread->state = PS_SUSPENDED; else new_thread->state = PS_RUNNING; /* * System scope threads have their own kse and * kseg. Process scope threads are all hung * off the main process kseg. */ if ((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) == 0) { new_thread->kseg = _kse_initial->k_kseg; new_thread->kse = _kse_initial; } else { kse->k_curthread = NULL; kse->k_kseg->kg_flags |= KGF_SINGLE_THREAD; new_thread->kse = kse; new_thread->kseg = kse->k_kseg; kse->k_mbx.km_udata = kse; kse->k_mbx.km_curthread = NULL; } KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); /* * Initialise the unique id which GDB uses to * track threads. */ new_thread->uniqueid = next_uniqueid++; /* Add the thread to the linked list of all threads: */ THR_LIST_ADD(new_thread); KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); /* * Schedule the new thread starting a new KSEG/KSE * pair if necessary. */ _thr_schedule_add(curthread, new_thread); /* Return a pointer to the thread structure: */ (*thread) = new_thread; } } _kse_critical_leave(crit); if ((ret == 0) && (_kse_isthreaded() == 0)) _kse_setthreaded(1); /* Return the status: */ return (ret); } static int create_stack(struct pthread_attr *pattr) { int ret; /* Check if a stack was specified in the thread attributes: */ if ((pattr->stackaddr_attr) != NULL) { pattr->guardsize_attr = 0; pattr->flags = THR_STACK_USER; ret = 0; } else ret = _thr_stack_alloc(pattr); return (ret); } static void thread_start(struct pthread *curthread, void *(*start_routine) (void *), void *arg) { /* Run the current thread's start routine with argument: */ pthread_exit(start_routine(arg)); /* This point should never be reached. */ PANIC("Thread has resumed after exit"); } Index: head/lib/libkse/thread/thr_detach.c =================================================================== --- head/lib/libkse/thread/thr_detach.c (revision 113660) +++ head/lib/libkse/thread/thr_detach.c (revision 113661) @@ -1,102 +1,126 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "thr_private.h" __weak_reference(_pthread_detach, pthread_detach); int _pthread_detach(pthread_t pthread) { - struct pthread *curthread, *joiner; + struct pthread *curthread = _get_curthread(); + struct pthread *joiner; + kse_critical_t crit; + int dead; int rval = 0; /* Check for invalid calling parameters: */ if (pthread == NULL || pthread->magic != THR_MAGIC) /* Return an invalid argument error: */ rval = EINVAL; + else if ((rval = _thr_ref_add(curthread, pthread, + /*include dead*/1)) != 0) { + /* Return an error: */ + _thr_leave_cancellation_point(curthread); + } + /* Check if the thread is already detached: */ - else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) + else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) { /* Return an error: */ + _thr_ref_delete(curthread, pthread); rval = EINVAL; - else { + } else { /* Lock the detached thread: */ - curthread = _get_curthread(); THR_SCHED_LOCK(curthread, pthread); /* Flag the thread as detached: */ pthread->attr.flags |= PTHREAD_DETACHED; /* Retrieve any joining thread and remove it: */ joiner = pthread->joiner; pthread->joiner = NULL; + if (joiner->kseg == pthread->kseg) { + /* + * We already own the scheduler lock for the joiner. + * Take advantage of that and make the joiner runnable. + */ + if (joiner->join_status.thread == pthread) { + /* + * Set the return value for the woken thread: + */ + joiner->join_status.error = ESRCH; + joiner->join_status.ret = NULL; + joiner->join_status.thread = NULL; - /* We are already in a critical region. */ - KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); - if ((pthread->flags & THR_FLAGS_GC_SAFE) != 0) { - THR_LIST_REMOVE(pthread); - THR_GCLIST_ADD(pthread); - atomic_store_rel_int(&_gc_check, 1); - if (KSE_WAITING(_kse_initial)) - KSE_WAKEUP(_kse_initial); + _thr_setrunnable_unlocked(joiner); + } + joiner = NULL; } - KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); - + dead = (pthread->flags & THR_FLAGS_GC_SAFE) != 0; THR_SCHED_UNLOCK(curthread, pthread); + + if (dead != 0) { + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); + THR_GCLIST_ADD(pthread); + KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); + _kse_critical_leave(crit); + } + _thr_ref_delete(curthread, pthread); /* See if there is a thread waiting in pthread_join(): */ if (joiner != NULL) { /* Lock the joiner before fiddling with it. */ THR_SCHED_LOCK(curthread, joiner); if (joiner->join_status.thread == pthread) { /* * Set the return value for the woken thread: */ joiner->join_status.error = ESRCH; joiner->join_status.ret = NULL; joiner->join_status.thread = NULL; _thr_setrunnable_unlocked(joiner); } THR_SCHED_UNLOCK(curthread, joiner); } } /* Return the completion status: */ return (rval); } Index: head/lib/libkse/thread/thr_find_thread.c =================================================================== --- head/lib/libkse/thread/thr_find_thread.c (revision 113660) +++ head/lib/libkse/thread/thr_find_thread.c (revision 113661) @@ -1,100 +1,91 @@ /* * Copyright (c) 2003 Daniel Eischen * Copyright (c) 1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "thr_private.h" /* * Find a thread in the linked list of active threads and add a reference * to it. Threads with positive reference counts will not be deallocated * until all references are released. */ int _thr_ref_add(struct pthread *curthread, struct pthread *thread, int include_dead) { kse_critical_t crit; struct pthread *pthread; if (thread == NULL) /* Invalid thread: */ return (EINVAL); crit = _kse_critical_enter(); KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); TAILQ_FOREACH(pthread, &_thread_list, tle) { if (pthread == thread) { if ((include_dead == 0) && ((pthread->state == PS_DEAD) || ((pthread->state == PS_DEADLOCK) || ((pthread->flags & THR_FLAGS_EXITING) != 0)))) pthread = NULL; else { thread->refcount++; curthread->critical_count++; } break; } } KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); _kse_critical_leave(crit); /* Return zero if the thread exists: */ return ((pthread != NULL) ? 0 : ESRCH); } void _thr_ref_delete(struct pthread *curthread, struct pthread *thread) { kse_critical_t crit; if (thread != NULL) { crit = _kse_critical_enter(); KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); thread->refcount--; curthread->critical_count--; - if (((thread->flags & THR_FLAGS_GC_SAFE) != 0) && - (thread->refcount == 0) && - ((thread->attr.flags & PTHREAD_DETACHED) != 0)) { - THR_LIST_REMOVE(thread); - THR_GCLIST_ADD(thread); - _gc_check = 1; - if (KSE_WAITING(_kse_initial)) - KSE_WAKEUP(_kse_initial); - } KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); _kse_critical_leave(crit); } } Index: head/lib/libkse/thread/thr_init.c =================================================================== --- head/lib/libkse/thread/thr_init.c (revision 113660) +++ head/lib/libkse/thread/thr_init.c (revision 113661) @@ -1,519 +1,519 @@ /* - * Copyright (c) 2003 Daniel M. Eischen + * Copyright (c) 2003 Daniel M. Eischen * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* Allocate space for global thread variables here: */ #define GLOBAL_PTHREAD_PRIVATE #include "namespace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "un-namespace.h" #include "libc_private.h" #include "thr_private.h" #include "ksd.h" int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *); int __pthread_mutex_lock(pthread_mutex_t *); int __pthread_mutex_trylock(pthread_mutex_t *); static void init_private(void); static void init_main_thread(struct pthread *thread); /* * All weak references used within libc should be in this table. * This is so that static libraries will work. */ static void *references[] = { &_accept, &_bind, &_close, &_connect, &_dup, &_dup2, &_execve, &_fcntl, &_flock, &_flockfile, &_fstat, &_fstatfs, &_fsync, &_funlockfile, &_getdirentries, &_getlogin, &_getpeername, &_getsockname, &_getsockopt, &_ioctl, &_kevent, &_listen, &_nanosleep, &_open, &_pthread_getspecific, &_pthread_key_create, &_pthread_key_delete, &_pthread_mutex_destroy, &_pthread_mutex_init, &_pthread_mutex_lock, &_pthread_mutex_trylock, &_pthread_mutex_unlock, &_pthread_mutexattr_init, &_pthread_mutexattr_destroy, &_pthread_mutexattr_settype, &_pthread_once, &_pthread_setspecific, &_read, &_readv, &_recvfrom, &_recvmsg, &_select, &_sendmsg, &_sendto, &_setsockopt, &_sigaction, &_sigprocmask, &_sigsuspend, &_socket, &_socketpair, &_wait4, &_write, &_writev }; /* * These are needed when linking statically. All references within * libgcc (and in the future libc) to these routines are weak, but * if they are not (strongly) referenced by the application or other * libraries, then the actual functions will not be loaded. */ static void *libgcc_references[] = { &_pthread_once, &_pthread_key_create, &_pthread_key_delete, &_pthread_getspecific, &_pthread_setspecific, &_pthread_mutex_init, &_pthread_mutex_destroy, &_pthread_mutex_lock, &_pthread_mutex_trylock, &_pthread_mutex_unlock }; #define DUAL_ENTRY(entry) \ (pthread_func_t)entry, (pthread_func_t)entry static pthread_func_t jmp_table[][2] = { {DUAL_ENTRY(_pthread_cond_broadcast)}, /* PJT_COND_BROADCAST */ {DUAL_ENTRY(_pthread_cond_destroy)}, /* PJT_COND_DESTROY */ {DUAL_ENTRY(_pthread_cond_init)}, /* PJT_COND_INIT */ {DUAL_ENTRY(_pthread_cond_signal)}, /* PJT_COND_SIGNAL */ {(pthread_func_t)__pthread_cond_wait, (pthread_func_t)_pthread_cond_wait}, /* PJT_COND_WAIT */ {DUAL_ENTRY(_pthread_getspecific)}, /* PJT_GETSPECIFIC */ {DUAL_ENTRY(_pthread_key_create)}, /* PJT_KEY_CREATE */ {DUAL_ENTRY(_pthread_key_delete)}, /* PJT_KEY_DELETE*/ {DUAL_ENTRY(_pthread_main_np)}, /* PJT_MAIN_NP */ {DUAL_ENTRY(_pthread_mutex_destroy)}, /* PJT_MUTEX_DESTROY */ {DUAL_ENTRY(_pthread_mutex_init)}, /* PJT_MUTEX_INIT */ {(pthread_func_t)__pthread_mutex_lock, (pthread_func_t)_pthread_mutex_lock}, /* PJT_MUTEX_LOCK */ {(pthread_func_t)__pthread_mutex_trylock, (pthread_func_t)_pthread_mutex_trylock},/* PJT_MUTEX_TRYLOCK */ {DUAL_ENTRY(_pthread_mutex_unlock)}, /* PJT_MUTEX_UNLOCK */ {DUAL_ENTRY(_pthread_mutexattr_destroy)}, /* PJT_MUTEXATTR_DESTROY */ {DUAL_ENTRY(_pthread_mutexattr_init)}, /* PJT_MUTEXATTR_INIT */ {DUAL_ENTRY(_pthread_mutexattr_settype)}, /* PJT_MUTEXATTR_SETTYPE */ {DUAL_ENTRY(_pthread_once)}, /* PJT_ONCE */ {DUAL_ENTRY(_pthread_rwlock_destroy)}, /* PJT_RWLOCK_DESTROY */ {DUAL_ENTRY(_pthread_rwlock_init)}, /* PJT_RWLOCK_INIT */ {DUAL_ENTRY(_pthread_rwlock_rdlock)}, /* PJT_RWLOCK_RDLOCK */ {DUAL_ENTRY(_pthread_rwlock_tryrdlock)},/* PJT_RWLOCK_TRYRDLOCK */ {DUAL_ENTRY(_pthread_rwlock_trywrlock)},/* PJT_RWLOCK_TRYWRLOCK */ {DUAL_ENTRY(_pthread_rwlock_unlock)}, /* PJT_RWLOCK_UNLOCK */ {DUAL_ENTRY(_pthread_rwlock_wrlock)}, /* PJT_RWLOCK_WRLOCK */ {DUAL_ENTRY(_pthread_self)}, /* PJT_SELF */ {DUAL_ENTRY(_pthread_setspecific)}, /* PJT_SETSPECIFIC */ {DUAL_ENTRY(_pthread_sigmask)} /* PJT_SIGMASK */ }; static int init_once = 0; /* * Threaded process initialization. * * This is only called under two conditions: * * 1) Some thread routines have detected that the library hasn't yet * been initialized (_thr_initial == NULL && curthread == NULL), or * * 2) An explicit call to reinitialize after a fork (indicated * by curthread != NULL) */ void _libpthread_init(struct pthread *curthread) { int fd; /* Check if this function has already been called: */ if ((_thr_initial != NULL) && (curthread == NULL)) /* Only initialize the threaded application once. */ return; /* * Make gcc quiescent about {,libgcc_}references not being * referenced: */ if ((references[0] == NULL) || (libgcc_references[0] == NULL)) PANIC("Failed loading mandatory references in _thread_init"); /* * Check the size of the jump table to make sure it is preset * with the correct number of entries. */ if (sizeof(jmp_table) != (sizeof(pthread_func_t) * PJT_MAX * 2)) PANIC("Thread jump table not properly initialized"); memcpy(__thr_jtable, jmp_table, sizeof(jmp_table)); /* * Check for the special case of this process running as * or in place of init as pid = 1: */ if ((_thr_pid = getpid()) == 1) { /* * Setup a new session for this process which is * assumed to be running as root. */ if (setsid() == -1) PANIC("Can't set session ID"); if (revoke(_PATH_CONSOLE) != 0) PANIC("Can't revoke console"); if ((fd = __sys_open(_PATH_CONSOLE, O_RDWR)) < 0) PANIC("Can't open console"); if (setlogin("root") == -1) PANIC("Can't set login to root"); if (__sys_ioctl(fd, TIOCSCTTY, (char *) NULL) == -1) PANIC("Can't set controlling terminal"); } /* Initialize pthread private data. */ init_private(); _kse_init(); /* Initialize the initial kse and kseg. */ _kse_initial = _kse_alloc(NULL); if (_kse_initial == NULL) PANIC("Can't allocate initial kse."); _kse_initial->k_kseg = _kseg_alloc(NULL); if (_kse_initial->k_kseg == NULL) PANIC("Can't allocate initial kseg."); _kse_initial->k_schedq = &_kse_initial->k_kseg->kg_schedq; /* Set the initial thread. */ if (curthread == NULL) { /* Create and initialize the initial thread. */ curthread = _thr_alloc(NULL); if (curthread == NULL) PANIC("Can't allocate initial thread"); _thr_initial = curthread; init_main_thread(curthread); } else { /* * The initial thread is the current thread. It is * assumed that the current thread is already initialized * because it is left over from a fork(). */ _thr_initial = curthread; } - _kse_initial->k_kseg->kg_threadcount = 1; + _kse_initial->k_kseg->kg_threadcount = 0; _thr_initial->kse = _kse_initial; _thr_initial->kseg = _kse_initial->k_kseg; _thr_initial->active = 1; /* * Add the thread to the thread list and to the KSEG's thread * queue. */ THR_LIST_ADD(_thr_initial); - TAILQ_INSERT_TAIL(&_kse_initial->k_kseg->kg_threadq, _thr_initial, kle); + KSEG_THRQ_ADD(_kse_initial->k_kseg, _thr_initial); /* Setup the KSE/thread specific data for the current KSE/thread. */ if (_ksd_setprivate(&_thr_initial->kse->k_ksd) != 0) PANIC("Can't set initial KSE specific data"); _set_curkse(_thr_initial->kse); _thr_initial->kse->k_curthread = _thr_initial; _thr_initial->kse->k_flags |= KF_INITIALIZED; _kse_initial->k_curthread = _thr_initial; } /* * This function and pthread_create() do a lot of the same things. * It'd be nice to consolidate the common stuff in one place. */ static void init_main_thread(struct pthread *thread) { int i; /* Zero the initial thread structure. */ memset(thread, 0, sizeof(struct pthread)); /* Setup the thread attributes. */ thread->attr = _pthread_attr_default; /* * Set up the thread stack. * * Create a red zone below the main stack. All other stacks * are constrained to a maximum size by the parameters * passed to mmap(), but this stack is only limited by * resource limits, so this stack needs an explicitly mapped * red zone to protect the thread stack that is just beyond. */ if (mmap((void *)_usrstack - THR_STACK_INITIAL - _thr_guard_default, _thr_guard_default, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* * Mark the stack as an application supplied stack so that it * isn't deallocated. * * XXX - I'm not sure it would hurt anything to deallocate * the main thread stack because deallocation doesn't * actually free() it; it just puts it in the free * stack queue for later reuse. */ thread->attr.stackaddr_attr = (void *)_usrstack - THR_STACK_INITIAL; thread->attr.stacksize_attr = THR_STACK_INITIAL; thread->attr.guardsize_attr = _thr_guard_default; thread->attr.flags |= THR_STACK_USER; /* * Write a magic value to the thread structure * to help identify valid ones: */ thread->magic = THR_MAGIC; thread->slice_usec = -1; thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; thread->name = strdup("initial thread"); /* Initialize the thread for signals: */ sigemptyset(&thread->sigmask); /* * Set up the thread mailbox. The threads saved context * is also in the mailbox. */ thread->tmbx.tm_udata = thread; thread->tmbx.tm_context.uc_sigmask = thread->sigmask; thread->tmbx.tm_context.uc_stack.ss_size = thread->attr.stacksize_attr; thread->tmbx.tm_context.uc_stack.ss_sp = thread->attr.stackaddr_attr; /* Default the priority of the initial thread: */ thread->base_priority = THR_DEFAULT_PRIORITY; thread->active_priority = THR_DEFAULT_PRIORITY; thread->inherited_priority = 0; /* Initialize the mutex queue: */ TAILQ_INIT(&thread->mutexq); /* Initialize thread locking. */ if (_lock_init(&thread->lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize initial thread lock"); for (i = 0; i < MAX_THR_LOCKLEVEL; i++) { _lockuser_init(&thread->lockusers[i], (void *)thread); _LCK_SET_PRIVATE2(&thread->lockusers[i], (void *)thread); } /* Initialize hooks in the thread structure: */ thread->specific = NULL; thread->cleanup = NULL; thread->flags = 0; thread->continuation = NULL; thread->state = PS_RUNNING; thread->uniqueid = 0; } static void init_private(void) { struct clockinfo clockinfo; struct sigaction act; size_t len; int mib[2]; int i; /* * Avoid reinitializing some things if they don't need to be, * e.g. after a fork(). */ if (init_once == 0) { /* Find the stack top */ mib[0] = CTL_KERN; mib[1] = KERN_USRSTACK; len = sizeof (_usrstack); if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1) PANIC("Cannot get kern.usrstack from sysctl"); /* * Create a red zone below the main stack. All other * stacks are constrained to a maximum size by the * parameters passed to mmap(), but this stack is only * limited by resource limits, so this stack needs an * explicitly mapped red zone to protect the thread stack * that is just beyond. */ if (mmap((void *)_usrstack - THR_STACK_INITIAL - _thr_guard_default, _thr_guard_default, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* Get the kernel clockrate: */ mib[0] = CTL_KERN; mib[1] = KERN_CLOCKRATE; len = sizeof (struct clockinfo); if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0) _clock_res_usec = clockinfo.tick; else _clock_res_usec = CLOCK_RES_USEC; _thr_page_size = getpagesize(); _thr_guard_default = _thr_page_size; init_once = 1; /* Don't do this again. */ } else { /* * Destroy the locks before creating them. We don't * know what state they are in so it is better to just * recreate them. */ _lock_destroy(&_thread_signal_lock); _lock_destroy(&_mutex_static_lock); _lock_destroy(&_rwlock_static_lock); _lock_destroy(&_keytable_lock); } /* Initialize everything else. */ TAILQ_INIT(&_thread_list); TAILQ_INIT(&_thread_gc_list); /* Enter a loop to get the existing signal status: */ for (i = 1; i < NSIG; i++) { /* Check for signals which cannot be trapped: */ if (i == SIGKILL || i == SIGSTOP) { } /* Get the signal handler details: */ else if (__sys_sigaction(i, NULL, &_thread_sigact[i - 1]) != 0) { /* * Abort this process if signal * initialisation fails: */ PANIC("Cannot read signal handler info"); } /* Initialize the SIG_DFL dummy handler count. */ _thread_dfl_count[i] = 0; } /* * Install the signal handler for SIGINFO. It isn't * really needed, but it is nice to have for debugging * purposes. */ if (__sys_sigaction(SIGINFO, &act, NULL) != 0) { /* * Abort this process if signal initialisation fails: */ PANIC("Cannot initialize signal handler"); } _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO | SA_RESTART; /* * Initialize the lock for temporary installation of signal * handlers (to support sigwait() semantics) and for the * process signal mask and pending signal sets. */ if (_lock_init(&_thread_signal_lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize _thread_signal_lock"); if (_lock_init(&_mutex_static_lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize mutex static init lock"); if (_lock_init(&_rwlock_static_lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize rwlock static init lock"); if (_lock_init(&_keytable_lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize thread specific keytable lock"); /* Clear pending signals and get the process signal mask. */ sigemptyset(&_thr_proc_sigpending); __sys_sigprocmask(SIG_SETMASK, NULL, &_thr_proc_sigmask); /* * _thread_list_lock and _kse_count are initialized * by _kse_init() */ } Index: head/lib/libkse/thread/thr_join.c =================================================================== --- head/lib/libkse/thread/thr_join.c (revision 113660) +++ head/lib/libkse/thread/thr_join.c (revision 113661) @@ -1,132 +1,149 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "thr_private.h" __weak_reference(_pthread_join, pthread_join); int _pthread_join(pthread_t pthread, void **thread_return) { - struct pthread *curthread = _get_curthread(); - int ret = 0; + struct pthread *curthread = _get_curthread(); + kse_critical_t crit; + int ret = 0; _thr_enter_cancellation_point(curthread); /* Check if the caller has specified an invalid thread: */ if (pthread == NULL || pthread->magic != THR_MAGIC) { /* Invalid thread: */ _thr_leave_cancellation_point(curthread); return (EINVAL); } /* Check if the caller has specified itself: */ if (pthread == curthread) { /* Avoid a deadlock condition: */ _thr_leave_cancellation_point(curthread); return (EDEADLK); } /* * Find the thread in the list of active threads or in the * list of dead threads: */ if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/1)) != 0) { /* Return an error: */ _thr_leave_cancellation_point(curthread); return (ESRCH); } /* Check if this thread has been detached: */ if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) { /* Remove the reference and return an error: */ _thr_ref_delete(curthread, pthread); ret = ESRCH; } else { /* Lock the target thread while checking its state. */ THR_SCHED_LOCK(curthread, pthread); if ((pthread->state == PS_DEAD) || ((pthread->flags & THR_FLAGS_EXITING) != 0)) { if (thread_return != NULL) /* Return the thread's return value: */ *thread_return = pthread->ret; - /* Unlock the thread and remove the reference. */ + /* Detach the thread. */ + pthread->attr.flags |= PTHREAD_DETACHED; + + /* Unlock the thread. */ THR_SCHED_UNLOCK(curthread, pthread); + + /* + * Remove the thread from the list of active + * threads and add it to the GC list. + */ + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); + THR_LIST_REMOVE(pthread); + THR_GCLIST_ADD(pthread); + KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); + _kse_critical_leave(crit); + + /* Remove the reference. */ _thr_ref_delete(curthread, pthread); } else if (pthread->joiner != NULL) { /* Unlock the thread and remove the reference. */ THR_SCHED_UNLOCK(curthread, pthread); _thr_ref_delete(curthread, pthread); /* Multiple joiners are not supported. */ ret = ENOTSUP; } else { /* Set the running thread to be the joiner: */ pthread->joiner = curthread; /* Keep track of which thread we're joining to: */ curthread->join_status.thread = pthread; /* Unlock the thread and remove the reference. */ THR_SCHED_UNLOCK(curthread, pthread); _thr_ref_delete(curthread, pthread); THR_SCHED_LOCK(curthread, curthread); if (curthread->join_status.thread == pthread) THR_SET_STATE(curthread, PS_JOIN); THR_SCHED_UNLOCK(curthread, curthread); while (curthread->join_status.thread == pthread) { /* Schedule the next thread: */ _thr_sched_switch(curthread); } /* * The thread return value and error are set by the * thread we're joining to when it exits or detaches: */ ret = curthread->join_status.error; if ((ret == 0) && (thread_return != NULL)) *thread_return = curthread->join_status.ret; } } _thr_leave_cancellation_point(curthread); /* Return the completion status: */ return (ret); } Index: head/lib/libkse/thread/thr_kern.c =================================================================== --- head/lib/libkse/thread/thr_kern.c (revision 113660) +++ head/lib/libkse/thread/thr_kern.c (revision 113661) @@ -1,1804 +1,1850 @@ /* * Copyright (C) 2003 Daniel M. Eischen * Copyright (C) 2002 Jonathon Mini * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include __FBSDID("$FreeBSD"); #include #include #include #include #include #include #include #include #include #include #include #include #include "atomic_ops.h" #include "thr_private.h" #include "pthread_md.h" #include "libc_private.h" /*#define DEBUG_THREAD_KERN */ #ifdef DEBUG_THREAD_KERN #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif /* * Define a high water mark for the maximum number of threads that * will be cached. Once this level is reached, any extra threads * will be free()'d. * * XXX - It doesn't make sense to worry about the maximum number of * KSEs that we can cache because the system will limit us to * something *much* less than the maximum number of threads * that we can have. Disregarding KSEs in their own group, * the maximum number of KSEs is the number of processors in * the system. */ #define MAX_CACHED_THREADS 100 #define KSE_STACKSIZE 16384 #define KSE_SET_MBOX(kse, thrd) \ (kse)->k_mbx.km_curthread = &(thrd)->tmbx #define KSE_SET_EXITED(kse) (kse)->k_flags |= KF_EXITED /* - * Add/remove threads from a KSE's scheduling queue. - * For now the scheduling queue is hung off the KSEG. - */ -#define KSEG_THRQ_ADD(kseg, thr) \ - TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle) -#define KSEG_THRQ_REMOVE(kseg, thr) \ - TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle) - - -/* * Macros for manipulating the run queues. The priority queue * routines use the thread's pqe link and also handle the setting * and clearing of the thread's THR_FLAGS_IN_RUNQ flag. */ #define KSE_RUNQ_INSERT_HEAD(kse, thrd) \ _pq_insert_head(&(kse)->k_schedq->sq_runq, thrd) #define KSE_RUNQ_INSERT_TAIL(kse, thrd) \ _pq_insert_tail(&(kse)->k_schedq->sq_runq, thrd) #define KSE_RUNQ_REMOVE(kse, thrd) \ _pq_remove(&(kse)->k_schedq->sq_runq, thrd) #define KSE_RUNQ_FIRST(kse) _pq_first(&(kse)->k_schedq->sq_runq) /* * We've got to keep track of everything that is allocated, not only * to have a speedy free list, but also so they can be deallocated * after a fork(). */ static TAILQ_HEAD(, kse) active_kseq; static TAILQ_HEAD(, kse) free_kseq; static TAILQ_HEAD(, kse_group) free_kse_groupq; static TAILQ_HEAD(, kse_group) active_kse_groupq; +static TAILQ_HEAD(, kse_group) gc_ksegq; static struct lock kse_lock; /* also used for kseg queue */ static int free_kse_count = 0; static int free_kseg_count = 0; static TAILQ_HEAD(, pthread) free_threadq; static struct lock thread_lock; static int free_thread_count = 0; static int inited = 0; static int active_kse_count = 0; static int active_kseg_count = 0; static void kse_check_completed(struct kse *kse); static void kse_check_waitq(struct kse *kse); static void kse_check_signals(struct kse *kse); static void kse_entry(struct kse_mailbox *mbx); static void kse_fini(struct kse *curkse); static void kse_sched_multi(struct kse *curkse); static void kse_sched_single(struct kse *curkse); static void kse_switchout_thread(struct kse *kse, struct pthread *thread); static void kse_wait(struct kse *kse); +static void kse_free_unlocked(struct kse *kse); static void kseg_free(struct kse_group *kseg); static void kseg_init(struct kse_group *kseg); static void kse_waitq_insert(struct pthread *thread); static void thr_cleanup(struct kse *kse, struct pthread *curthread); -static void thr_gc(struct kse *curkse); +#ifdef NOT_YET static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp); +#endif static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp, struct pthread_sigframe *psf); static int thr_timedout(struct pthread *thread, struct timespec *curtime); /* * This is called after a fork(). * No locks need to be taken here since we are guaranteed to be * single threaded. */ void _kse_single_thread(struct pthread *curthread) { struct kse *kse, *kse_next; struct kse_group *kseg, *kseg_next; struct pthread *thread, *thread_next; kse_critical_t crit; int i; /* * Disable upcalls and clear the threaded flag. * XXX - I don't think we need to disable upcalls after a fork(). * but it doesn't hurt. */ crit = _kse_critical_enter(); __isthreaded = 0; /* * Enter a loop to remove and free all threads other than * the running thread from the active thread list: */ for (thread = TAILQ_FIRST(&_thread_list); thread != NULL; thread = thread_next) { /* * Advance to the next thread before the destroying * the current thread. */ thread_next = TAILQ_NEXT(thread, tle); /* * Remove this thread from the list (the current * thread will be removed but re-added by libpthread * initialization. */ TAILQ_REMOVE(&_thread_list, thread, tle); /* Make sure this isn't the running thread: */ if (thread != curthread) { _thr_stack_free(&thread->attr); if (thread->specific != NULL) free(thread->specific); for (i = 0; i < MAX_THR_LOCKLEVEL; i++) { _lockuser_destroy(&thread->lockusers[i]); } _lock_destroy(&thread->lock); free(thread); } } TAILQ_INIT(&curthread->mutexq); /* initialize mutex queue */ curthread->joiner = NULL; /* no joining threads yet */ sigemptyset(&curthread->sigpend); /* clear pending signals */ if (curthread->specific != NULL) { free(curthread->specific); curthread->specific = NULL; curthread->specific_data_count = 0; } /* Free the free KSEs: */ while ((kse = TAILQ_FIRST(&free_kseq)) != NULL) { TAILQ_REMOVE(&free_kseq, kse, k_qe); _ksd_destroy(&kse->k_ksd); free(kse); } free_kse_count = 0; /* Free the active KSEs: */ for (kse = TAILQ_FIRST(&active_kseq); kse != NULL; kse = kse_next) { kse_next = TAILQ_NEXT(kse, k_qe); TAILQ_REMOVE(&active_kseq, kse, k_qe); for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) { _lockuser_destroy(&kse->k_lockusers[i]); } _lock_destroy(&kse->k_lock); free(kse); } active_kse_count = 0; /* Free the free KSEGs: */ while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) { TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe); _lock_destroy(&kseg->kg_lock); + _pq_free(&kseg->kg_schedq.sq_runq); free(kseg); } free_kseg_count = 0; /* Free the active KSEGs: */ for (kseg = TAILQ_FIRST(&active_kse_groupq); kseg != NULL; kseg = kseg_next) { kseg_next = TAILQ_NEXT(kseg, kg_qe); TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe); _lock_destroy(&kseg->kg_lock); + _pq_free(&kseg->kg_schedq.sq_runq); free(kseg); } active_kseg_count = 0; /* Free the free threads. */ while ((thread = TAILQ_FIRST(&free_threadq)) != NULL) { TAILQ_REMOVE(&free_threadq, thread, tle); if (thread->specific != NULL) free(thread->specific); for (i = 0; i < MAX_THR_LOCKLEVEL; i++) { _lockuser_destroy(&thread->lockusers[i]); } _lock_destroy(&thread->lock); free(thread); } free_thread_count = 0; /* Free the to-be-gc'd threads. */ while ((thread = TAILQ_FIRST(&_thread_gc_list)) != NULL) { - TAILQ_REMOVE(&_thread_gc_list, thread, tle); + TAILQ_REMOVE(&_thread_gc_list, thread, gcle); free(thread); } + TAILQ_INIT(&gc_ksegq); + _gc_count = 0; if (inited != 0) { /* * Destroy these locks; they'll be recreated to assure they * are in the unlocked state. */ _lock_destroy(&kse_lock); _lock_destroy(&thread_lock); _lock_destroy(&_thread_list_lock); inited = 0; } /* * After a fork(), the leftover thread goes back to being * scope process. */ curthread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM; curthread->attr.flags |= PTHREAD_SCOPE_PROCESS; /* * After a fork, we are still operating on the thread's original * stack. Don't clear the THR_FLAGS_USER from the thread's * attribute flags. */ /* Initialize the threads library. */ curthread->kse = NULL; curthread->kseg = NULL; _kse_initial = NULL; _libpthread_init(curthread); } /* * This is used to initialize housekeeping and to initialize the * KSD for the KSE. */ void _kse_init(void) { if (inited == 0) { TAILQ_INIT(&active_kseq); TAILQ_INIT(&active_kse_groupq); TAILQ_INIT(&free_kseq); TAILQ_INIT(&free_kse_groupq); TAILQ_INIT(&free_threadq); + TAILQ_INIT(&gc_ksegq); if (_lock_init(&kse_lock, LCK_ADAPTIVE, _kse_lock_wait, _kse_lock_wakeup) != 0) PANIC("Unable to initialize free KSE queue lock"); if (_lock_init(&thread_lock, LCK_ADAPTIVE, _kse_lock_wait, _kse_lock_wakeup) != 0) PANIC("Unable to initialize free thread queue lock"); if (_lock_init(&_thread_list_lock, LCK_ADAPTIVE, _kse_lock_wait, _kse_lock_wakeup) != 0) PANIC("Unable to initialize thread list lock"); active_kse_count = 0; active_kseg_count = 0; + _gc_count = 0; inited = 1; } } int _kse_isthreaded(void) { return (__isthreaded != 0); } /* * This is called when the first thread (other than the initial * thread) is created. */ void _kse_setthreaded(int threaded) { if ((threaded != 0) && (__isthreaded == 0)) { /* * Locking functions in libc are required when there are * threads other than the initial thread. */ __isthreaded = 1; /* * Tell the kernel to create a KSE for the initial thread * and enable upcalls in it. */ kse_create(&_kse_initial->k_mbx, 0); KSE_SET_MBOX(_kse_initial, _thr_initial); } } /* * Lock wait and wakeup handlers for KSE locks. These are only used by * KSEs, and should never be used by threads. KSE locks include the * KSE group lock (used for locking the scheduling queue) and the * kse_lock defined above. * * When a KSE lock attempt blocks, the entire KSE blocks allowing another * KSE to run. For the most part, it doesn't make much sense to try and * schedule another thread because you need to lock the scheduling queue * in order to do that. And since the KSE lock is used to lock the scheduling * queue, you would just end up blocking again. */ void _kse_lock_wait(struct lock *lock, struct lockuser *lu) { struct kse *curkse = (struct kse *)_LCK_GET_PRIVATE(lu); struct timespec ts; kse_critical_t crit; /* * Enter a loop to wait until we get the lock. */ ts.tv_sec = 0; ts.tv_nsec = 1000000; /* 1 sec */ KSE_SET_WAIT(curkse); while (_LCK_BUSY(lu)) { /* * Yield the kse and wait to be notified when the lock * is granted. */ crit = _kse_critical_enter(); __sys_nanosleep(&ts, NULL); _kse_critical_leave(crit); /* * Make sure that the wait flag is set again in case * we wokeup without the lock being granted. */ KSE_SET_WAIT(curkse); } KSE_CLEAR_WAIT(curkse); } void _kse_lock_wakeup(struct lock *lock, struct lockuser *lu) { struct kse *curkse; struct kse *kse; curkse = _get_curkse(); kse = (struct kse *)_LCK_GET_PRIVATE(lu); if (kse == curkse) PANIC("KSE trying to wake itself up in lock"); else if (KSE_WAITING(kse)) { /* * Notify the owning kse that it has the lock. */ KSE_WAKEUP(kse); } } /* * Thread wait and wakeup handlers for thread locks. These are only used * by threads, never by KSEs. Thread locks include the per-thread lock * (defined in its structure), and condition variable and mutex locks. */ void _thr_lock_wait(struct lock *lock, struct lockuser *lu) { struct pthread *curthread = (struct pthread *)lu->lu_private; int count; /* * Spin for a bit. * * XXX - We probably want to make this a bit smarter. It * doesn't make sense to spin unless there is more * than 1 CPU. A thread that is holding one of these * locks is prevented from being swapped out for another * thread within the same scheduling entity. */ count = 0; while (_LCK_BUSY(lu) && count < 300) count++; while (_LCK_BUSY(lu)) { THR_SCHED_LOCK(curthread, curthread); if (_LCK_BUSY(lu)) { /* Wait for the lock: */ atomic_store_rel_int(&curthread->need_wakeup, 1); THR_SET_STATE(curthread, PS_LOCKWAIT); THR_SCHED_UNLOCK(curthread, curthread); _thr_sched_switch(curthread); } else THR_SCHED_UNLOCK(curthread, curthread); } } void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu) { struct pthread *thread; struct pthread *curthread; curthread = _get_curthread(); thread = (struct pthread *)_LCK_GET_PRIVATE(lu); THR_SCHED_LOCK(curthread, thread); _thr_setrunnable_unlocked(thread); atomic_store_rel_int(&thread->need_wakeup, 0); THR_SCHED_UNLOCK(curthread, thread); } kse_critical_t _kse_critical_enter(void) { kse_critical_t crit; crit = _ksd_readandclear_tmbx; return (crit); } void _kse_critical_leave(kse_critical_t crit) { struct pthread *curthread; _ksd_set_tmbx(crit); if ((crit != NULL) && ((curthread = _get_curthread()) != NULL)) THR_YIELD_CHECK(curthread); } void _thr_critical_enter(struct pthread *thread) { thread->critical_count++; } void _thr_critical_leave(struct pthread *thread) { thread->critical_count--; THR_YIELD_CHECK(thread); } /* * XXX - We may need to take the scheduling lock before calling * this, or perhaps take the lock within here before * doing anything else. */ void _thr_sched_switch(struct pthread *curthread) { struct pthread_sigframe psf; kse_critical_t crit; struct kse *curkse; volatile int once = 0; /* We're in the scheduler, 5 by 5: */ crit = _kse_critical_enter(); curkse = _get_curkse(); curthread->need_switchout = 1; /* The thread yielded on its own. */ curthread->critical_yield = 0; /* No need to yield anymore. */ curthread->slice_usec = -1; /* Restart the time slice. */ /* * The signal frame is allocated off the stack because * a thread can be interrupted by other signals while * it is running down pending signals. */ sigemptyset(&psf.psf_sigset); curthread->curframe = &psf; _thread_enter_uts(&curthread->tmbx, &curkse->k_mbx); /* * This thread is being resumed; check for cancellations. */ if ((once == 0) && (!THR_IN_CRITICAL(curthread))) { once = 1; thr_resume_check(curthread, &curthread->tmbx.tm_context, &psf); } } /* * This is the entry point of the KSE upcall. */ static void kse_entry(struct kse_mailbox *mbx) { struct kse *curkse; /* The kernel should always clear this before making the upcall. */ assert(mbx->km_curthread == NULL); curkse = (struct kse *)mbx->km_udata; /* Check for first time initialization: */ if ((curkse->k_flags & KF_INITIALIZED) == 0) { /* Setup this KSEs specific data. */ _ksd_setprivate(&curkse->k_ksd); _set_curkse(curkse); /* Set this before grabbing the context. */ curkse->k_flags |= KF_INITIALIZED; } /* Avoid checking the type of KSE more than once. */ if ((curkse->k_kseg->kg_flags & KGF_SINGLE_THREAD) != 0) { curkse->k_mbx.km_func = (void *)kse_sched_single; kse_sched_single(curkse); } else { curkse->k_mbx.km_func = (void *)kse_sched_multi; kse_sched_multi(curkse); } } /* * This is the scheduler for a KSE which runs a scope system thread. * The multi-thread KSE scheduler should also work for a single threaded * KSE, but we use a separate scheduler so that it can be fine-tuned * to be more efficient (and perhaps not need a separate stack for * the KSE, allowing it to use the thread's stack). * * XXX - This probably needs some work. */ static void kse_sched_single(struct kse *curkse) { struct pthread *curthread; struct timespec ts; int level; /* This may have returned from a kse_release(). */ if (KSE_WAITING(curkse)) KSE_CLEAR_WAIT(curkse); curthread = curkse->k_curthread; if (curthread->active == 0) { if (curthread->state != PS_RUNNING) { /* Check to see if the thread has timed out. */ KSE_GET_TOD(curkse, &ts); if (thr_timedout(curthread, &ts) != 0) { curthread->timeout = 1; curthread->state = PS_RUNNING; } } } else if (curthread->need_switchout != 0) { /* * This has to do the job of kse_switchout_thread(), only * for a single threaded KSE/KSEG. */ /* This thread no longer needs to yield the CPU: */ curthread->critical_yield = 0; curthread->need_switchout = 0; /* * Lock the scheduling queue. * * There is no scheduling queue for single threaded KSEs, * but we need a lock for protection regardless. */ KSE_SCHED_LOCK(curkse, curkse->k_kseg); switch (curthread->state) { case PS_DEAD: /* Unlock the scheduling queue and exit the KSE. */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); kse_fini(curkse); /* does not return */ break; case PS_COND_WAIT: case PS_SLEEP_WAIT: /* Only insert threads that can timeout: */ if (curthread->wakeup_time.tv_sec != -1) { /* Insert into the waiting queue: */ KSE_WAITQ_INSERT(curkse, curthread); } break; case PS_LOCKWAIT: level = curthread->locklevel - 1; if (_LCK_BUSY(&curthread->lockusers[level])) KSE_WAITQ_INSERT(curkse, curthread); else THR_SET_STATE(curthread, PS_RUNNING); break; case PS_JOIN: case PS_MUTEX_WAIT: case PS_RUNNING: case PS_SIGSUSPEND: case PS_SIGWAIT: case PS_SUSPENDED: case PS_DEADLOCK: default: /* * These states don't timeout and don't need * to be in the waiting queue. */ break; } if (curthread->state != PS_RUNNING) curthread->active = 0; } while (curthread->state != PS_RUNNING) { kse_wait(curkse); } /* Remove the frame reference. */ curthread->curframe = NULL; /* Unlock the scheduling queue. */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); /* * Continue the thread at its current frame: */ _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread); } void dump_queues(struct kse *curkse) { struct pthread *thread; DBG_MSG("Threads in waiting queue:\n"); TAILQ_FOREACH(thread, &curkse->k_kseg->kg_schedq.sq_waitq, pqe) { DBG_MSG(" thread %p, state %d, blocked %d\n", thread, thread->state, thread->blocked); } } /* * This is the scheduler for a KSE which runs multiple threads. */ static void kse_sched_multi(struct kse *curkse) { struct pthread *curthread; struct pthread_sigframe *curframe; int ret; /* This may have returned from a kse_release(). */ if (KSE_WAITING(curkse)) KSE_CLEAR_WAIT(curkse); /* Lock the scheduling lock. */ KSE_SCHED_LOCK(curkse, curkse->k_kseg); /* * If the current thread was completed in another KSE, then * it will be in the run queue. Don't mark it as being blocked. */ if (((curthread = curkse->k_curthread) != NULL) && ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) && (curthread->need_switchout == 0)) { /* * Assume the current thread is blocked; when the * completed threads are checked and if the current * thread is among the completed, the blocked flag * will be cleared. */ curthread->blocked = 1; } /* Check for any unblocked threads in the kernel. */ kse_check_completed(curkse); /* * Check for threads that have timed-out. */ kse_check_waitq(curkse); /* * Switchout the current thread, if necessary, as the last step * so that it is inserted into the run queue (if it's runnable) * _after_ any other threads that were added to it above. */ if (curthread == NULL) ; /* Nothing to do here. */ else if ((curthread->need_switchout == 0) && (curthread->blocked == 0) && (THR_IN_CRITICAL(curthread))) { /* * Resume the thread and tell it to yield when * it leaves the critical region. */ curthread->critical_yield = 0; curthread->active = 1; if ((curthread->flags & THR_FLAGS_IN_RUNQ) != 0) KSE_RUNQ_REMOVE(curkse, curthread); curkse->k_curthread = curthread; curthread->kse = curkse; KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); DBG_MSG("Continuing thread %p in critical region\n", curthread); ret = _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread); if (ret != 0) PANIC("Can't resume thread in critical region\n"); } else if ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) kse_switchout_thread(curkse, curthread); curkse->k_curthread = NULL; /* This has to be done without the scheduling lock held. */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); kse_check_signals(curkse); - - /* Check for GC: */ - if (_gc_check != 0) - thr_gc(curkse); KSE_SCHED_LOCK(curkse, curkse->k_kseg); dump_queues(curkse); /* Check if there are no threads ready to run: */ while (((curthread = KSE_RUNQ_FIRST(curkse)) == NULL) && (curkse->k_kseg->kg_threadcount != 0)) { /* * Wait for a thread to become active or until there are * no more threads. */ kse_wait(curkse); kse_check_waitq(curkse); KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); kse_check_signals(curkse); - if (_gc_check != 0) - thr_gc(curkse); KSE_SCHED_LOCK(curkse, curkse->k_kseg); } /* Check for no more threads: */ if (curkse->k_kseg->kg_threadcount == 0) { /* * Normally this shouldn't return, but it will if there * are other KSEs running that create new threads that * are assigned to this KSE[G]. For instance, if a scope * system thread were to create a scope process thread * and this kse[g] is the initial kse[g], then that newly * created thread would be assigned to us (the initial * kse[g]). */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); kse_fini(curkse); KSE_SCHED_LOCK(curkse, curkse->k_kseg); curthread = KSE_RUNQ_FIRST(curkse); } THR_ASSERT(curthread != NULL, "Return from kse_wait/fini without thread."); THR_ASSERT(curthread->state != PS_DEAD, "Trying to resume dead thread!"); KSE_RUNQ_REMOVE(curkse, curthread); /* * Make the selected thread the current thread. */ curkse->k_curthread = curthread; /* * Make sure the current thread's kse points to this kse. */ curthread->kse = curkse; /* * Reset accounting. */ curthread->tmbx.tm_uticks = 0; curthread->tmbx.tm_sticks = 0; /* * Reset the time slice if this thread is running for the first * time or running again after using its full time slice allocation. */ if (curthread->slice_usec == -1) curthread->slice_usec = 0; /* Mark the thread active. */ curthread->active = 1; /* Remove the frame reference. */ curframe = curthread->curframe; curthread->curframe = NULL; /* Unlock the scheduling queue: */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); /* * The thread's current signal frame will only be NULL if it * is being resumed after being blocked in the kernel. In * this case, and if the thread needs to run down pending * signals or needs a cancellation check, we need to add a * signal frame to the thread's context. */ -#if 0 +#ifdef NOT_YET if ((curframe == NULL) && ((curthread->check_pending != 0) || (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) && ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)))) { signalcontext(&curthread->tmbx.tm_context, 0, (__sighandler_t *)thr_resume_wrapper); } #endif /* * Continue the thread at its current frame: */ DBG_MSG("Continuing thread %p\n", curthread); ret = _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread); if (ret != 0) PANIC("Thread has returned from _thread_switch"); /* This point should not be reached. */ PANIC("Thread has returned from _thread_switch"); } static void kse_check_signals(struct kse *curkse) { sigset_t sigset; int i; /* Deliver posted signals. */ for (i = 0; i < _SIG_WORDS; i++) { atomic_swap_int(&curkse->k_mbx.km_sigscaught.__bits[i], 0, &sigset.__bits[i]); } if (SIGNOTEMPTY(sigset)) { /* * Dispatch each signal. * * XXX - There is no siginfo for any of these. * I think there should be, especially for * signals from other processes (si_pid, si_uid). */ for (i = 1; i < NSIG; i++) { if (sigismember(&sigset, i) != 0) { DBG_MSG("Dispatching signal %d\n", i); _thr_sig_dispatch(curkse, i, NULL /* no siginfo */); } } sigemptyset(&sigset); __sys_sigprocmask(SIG_SETMASK, &sigset, NULL); } } +#ifdef NOT_YET static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp) { struct pthread *curthread = _get_curthread(); thr_resume_check(curthread, ucp, NULL); } +#endif static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp, struct pthread_sigframe *psf) { /* Check signals before cancellations. */ while (curthread->check_pending != 0) { /* Clear the pending flag. */ curthread->check_pending = 0; /* * It's perfectly valid, though not portable, for * signal handlers to munge their interrupted context * and expect to return to it. Ensure we use the * correct context when running down signals. */ _thr_sig_rundown(curthread, ucp, psf); } if (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) && ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) pthread_testcancel(); } /* * Clean up a thread. This must be called with the thread's KSE * scheduling lock held. The thread must be a thread from the * KSE's group. */ static void thr_cleanup(struct kse *curkse, struct pthread *thread) { struct pthread *joiner; - int free_thread = 0; if ((joiner = thread->joiner) != NULL) { thread->joiner = NULL; if ((joiner->state == PS_JOIN) && (joiner->join_status.thread == thread)) { joiner->join_status.thread = NULL; /* Set the return status for the joining thread: */ joiner->join_status.ret = thread->ret; /* Make the thread runnable. */ if (joiner->kseg == curkse->k_kseg) _thr_setrunnable_unlocked(joiner); else { KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); KSE_SCHED_LOCK(curkse, joiner->kseg); _thr_setrunnable_unlocked(joiner); KSE_SCHED_UNLOCK(curkse, joiner->kseg); KSE_SCHED_LOCK(curkse, curkse->k_kseg); } } thread->attr.flags |= PTHREAD_DETACHED; } + if ((thread->attr.flags & PTHREAD_SCOPE_PROCESS) == 0) { + /* + * Remove the thread from the KSEG's list of threads. + */ + KSEG_THRQ_REMOVE(thread->kseg, thread); + /* + * Migrate the thread to the main KSE so that this + * KSE and KSEG can be cleaned when their last thread + * exits. + */ + thread->kseg = _kse_initial->k_kseg; + thread->kse = _kse_initial; + } thread->flags |= THR_FLAGS_GC_SAFE; - thread->kseg->kg_threadcount--; + + /* + * We can't hold the thread list lock while holding the + * scheduler lock. + */ + KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); + DBG_MSG("Adding thread %p to GC list\n", thread); KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); - _thr_stack_free(&thread->attr); - if ((thread->attr.flags & PTHREAD_DETACHED) != 0) { - /* Remove this thread from the list of all threads: */ - THR_LIST_REMOVE(thread); - if (thread->refcount == 0) { - THR_GCLIST_REMOVE(thread); - TAILQ_REMOVE(&thread->kseg->kg_threadq, thread, kle); - free_thread = 1; - } - } + THR_GCLIST_ADD(thread); KSE_LOCK_RELEASE(curkse, &_thread_list_lock); - if (free_thread != 0) - _thr_free(curkse, thread); + KSE_SCHED_LOCK(curkse, curkse->k_kseg); } void -thr_gc(struct pthread *curthread) +_thr_gc(struct pthread *curthread) { - struct pthread *td, *joiner; - struct kse_group *free_kseg; + struct pthread *td, *td_next; + kse_critical_t crit; + int clean; - _gc_check = 0; - KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); - while ((td = TAILQ_FIRST(&_thread_gc_list)) != NULL) { + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); + + /* Check the threads waiting for GC. */ + for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) { + td_next = TAILQ_NEXT(td, gcle); + if ((td->flags & THR_FLAGS_GC_SAFE) == 0) + continue; +#ifdef NOT_YET + else if (((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) && + (td->kse->k_mbx.km_flags == 0)) { + /* + * The thread and KSE are operating on the same + * stack. Wait for the KSE to exit before freeing + * the thread's stack as well as everything else. + */ + continue; + } +#endif THR_GCLIST_REMOVE(td); - clean = (td->attr.flags & PTHREAD_DETACHED) != 0; - KSE_LOCK_RELEASE(curkse, &_thread_list_lock); + clean = ((td->attr.flags & PTHREAD_DETACHED) != 0) && + (td->refcount == 0); + _thr_stack_free(&td->attr); + KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); + DBG_MSG("Found thread %p in GC list, clean? %d\n", td, clean); - KSE_SCHED_LOCK(curkse, td->kseg); - TAILQ_REMOVE(&td->kseg->kg_threadq, td, kle); - if (TAILQ_EMPTY(&td->kseg->kg_threadq)) - free_kseg = td->kseg; - else - free_kseg = NULL; - joiner = NULL; - if ((td->joiner != NULL) && (td->joiner->state == PS_JOIN) && - (td->joiner->join_status.thread == td)) { - joiner = td->joiner; - joiner->join_status.thread = NULL; - - /* Set the return status for the joining thread: */ - joiner->join_status.ret = td->ret; - - /* Make the thread runnable. */ - if (td->kseg == joiner->kseg) { - _thr_setrunnable_unlocked(joiner); - joiner = NULL; - } + if ((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) { + KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); + kse_free_unlocked(td->kse); + kseg_free(td->kseg); + KSE_LOCK_RELEASE(curthread->kse, &kse_lock); } - td->joiner = NULL; - KSE_SCHED_UNLOCK(curkse, td->kseg); - if (free_kseg != NULL) - kseg_free(free_kseg); - if (joiner != NULL) { - KSE_SCHED_LOCK(curkse, joiner->kseg); - _thr_setrunnable_unlocked(joiner); - KSE_SCHED_LOCK(curkse, joiner->kseg); + if (clean != 0) { + _kse_critical_leave(crit); + _thr_free(curthread, td); + crit = _kse_critical_enter(); } - _thr_free(curkse, td); - KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); + KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); } - KSE_LOCK_RELEASE(curkse, &_thread_list_lock); + KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); + _kse_critical_leave(crit); } /* * Only new threads that are running or suspended may be scheduled. */ void _thr_schedule_add(struct pthread *curthread, struct pthread *newthread) { struct kse *curkse; kse_critical_t crit; int need_start; /* * If this is the first time creating a thread, make sure * the mailbox is set for the current thread. */ if ((newthread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) { /* * No need to lock the scheduling queue since the * KSE/KSEG pair have not yet been started. */ KSEG_THRQ_ADD(newthread->kseg, newthread); if (newthread->state == PS_RUNNING) THR_RUNQ_INSERT_TAIL(newthread); newthread->kseg->kg_threadcount++; /* * This thread needs a new KSE and KSEG. */ crit = _kse_critical_enter(); curkse = _get_curkse(); _ksd_setprivate(&newthread->kse->k_ksd); kse_create(&newthread->kse->k_mbx, 1); _ksd_setprivate(&curkse->k_ksd); _kse_critical_leave(crit); } else { /* * Lock the KSE and add the new thread to its list of * assigned threads. If the new thread is runnable, also * add it to the KSE's run queue. */ need_start = 0; KSE_SCHED_LOCK(curthread->kse, newthread->kseg); KSEG_THRQ_ADD(newthread->kseg, newthread); if (newthread->state == PS_RUNNING) THR_RUNQ_INSERT_TAIL(newthread); newthread->kseg->kg_threadcount++; if ((newthread->kse->k_flags & KF_STARTED) == 0) { /* * This KSE hasn't been started yet. Start it * outside of holding the lock. */ newthread->kse->k_flags |= KF_STARTED; need_start = 1; } KSE_SCHED_UNLOCK(curthread->kse, newthread->kseg); if (need_start != 0) kse_create(&newthread->kse->k_mbx, 0); else if ((newthread->state == PS_RUNNING) && KSE_WAITING(newthread->kse)) { /* * The thread is being scheduled on another KSEG. */ KSE_WAKEUP(newthread->kse); } } } void kse_waitq_insert(struct pthread *thread) { struct pthread *td; if (thread->wakeup_time.tv_sec == -1) TAILQ_INSERT_TAIL(&thread->kse->k_schedq->sq_waitq, thread, pqe); else { td = TAILQ_FIRST(&thread->kse->k_schedq->sq_waitq); while ((td != NULL) && (td->wakeup_time.tv_sec != -1) && ((td->wakeup_time.tv_sec < thread->wakeup_time.tv_sec) || ((td->wakeup_time.tv_sec == thread->wakeup_time.tv_sec) && (td->wakeup_time.tv_nsec <= thread->wakeup_time.tv_nsec)))) td = TAILQ_NEXT(td, pqe); if (td == NULL) TAILQ_INSERT_TAIL(&thread->kse->k_schedq->sq_waitq, thread, pqe); else TAILQ_INSERT_BEFORE(td, thread, pqe); } thread->flags |= THR_FLAGS_IN_WAITQ; } /* * This must be called with the scheduling lock held. */ static void kse_check_completed(struct kse *kse) { struct pthread *thread; struct kse_thr_mailbox *completed; if ((completed = kse->k_mbx.km_completed) != NULL) { kse->k_mbx.km_completed = NULL; while (completed != NULL) { thread = completed->tm_udata; DBG_MSG("Found completed thread %p, name %s\n", thread, (thread->name == NULL) ? "none" : thread->name); thread->blocked = 0; if (thread != kse->k_curthread) KSE_RUNQ_INSERT_TAIL(kse, thread); completed = completed->tm_next; } } } /* * This must be called with the scheduling lock held. */ static void kse_check_waitq(struct kse *kse) { struct pthread *pthread; struct timespec ts; KSE_GET_TOD(kse, &ts); /* * Wake up threads that have timedout. This has to be * done before adding the current thread to the run queue * so that a CPU intensive thread doesn't get preference * over waiting threads. */ while (((pthread = KSE_WAITQ_FIRST(kse)) != NULL) && thr_timedout(pthread, &ts)) { /* Remove the thread from the wait queue: */ KSE_WAITQ_REMOVE(kse, pthread); DBG_MSG("Found timedout thread %p in waitq\n", pthread); /* Indicate the thread timedout: */ pthread->timeout = 1; /* Add the thread to the priority queue: */ THR_SET_STATE(pthread, PS_RUNNING); KSE_RUNQ_INSERT_TAIL(kse, pthread); } } static int thr_timedout(struct pthread *thread, struct timespec *curtime) { if (thread->wakeup_time.tv_sec < 0) return (0); else if (thread->wakeup_time.tv_sec > curtime->tv_sec) return (0); else if ((thread->wakeup_time.tv_sec == curtime->tv_sec) && (thread->wakeup_time.tv_nsec > curtime->tv_nsec)) return (0); else return (1); } /* * This must be called with the scheduling lock held. * * Each thread has a time slice, a wakeup time (used when it wants * to wait for a specified amount of time), a run state, and an * active flag. * * When a thread gets run by the scheduler, the active flag is * set to non-zero (1). When a thread performs an explicit yield * or schedules a state change, it enters the scheduler and the * active flag is cleared. When the active flag is still seen * set in the scheduler, that means that the thread is blocked in * the kernel (because it is cleared before entering the scheduler * in all other instances). * * The wakeup time is only set for those states that can timeout. * It is set to (-1, -1) for all other instances. * * The thread's run state, aside from being useful when debugging, * is used to place the thread in an appropriate queue. There * are 2 basic queues: * * o run queue - queue ordered by priority for all threads * that are runnable * o waiting queue - queue sorted by wakeup time for all threads * that are not otherwise runnable (not blocked * in kernel, not waiting for locks) * * The thread's time slice is used for round-robin scheduling * (the default scheduling policy). While a SCHED_RR thread * is runnable it's time slice accumulates. When it reaches * the time slice interval, it gets reset and added to the end * of the queue of threads at its priority. When a thread no * longer becomes runnable (blocks in kernel, waits, etc), its * time slice is reset. * * The job of kse_switchout_thread() is to handle all of the above. */ static void kse_switchout_thread(struct kse *kse, struct pthread *thread) { int level; /* * Place the currently running thread into the * appropriate queue(s). */ DBG_MSG("Switching out thread %p, state %d\n", thread, thread->state); if (thread->blocked != 0) { /* This thread must have blocked in the kernel. */ /* thread->slice_usec = -1;*/ /* restart timeslice */ /* * XXX - Check for pending signals for this thread to * see if we need to interrupt it in the kernel. */ /* if (thread->check_pending != 0) */ if ((thread->slice_usec != -1) && (thread->attr.sched_policy != SCHED_FIFO)) thread->slice_usec += (thread->tmbx.tm_uticks + thread->tmbx.tm_sticks) * _clock_res_usec; } else { switch (thread->state) { case PS_DEAD: /* * The scheduler is operating on a different * stack. It is safe to do garbage collecting * here. */ thr_cleanup(kse, thread); return; break; case PS_RUNNING: /* Nothing to do here. */ break; case PS_COND_WAIT: case PS_SLEEP_WAIT: /* Insert into the waiting queue: */ KSE_WAITQ_INSERT(kse, thread); break; case PS_LOCKWAIT: /* * This state doesn't timeout. */ thread->wakeup_time.tv_sec = -1; thread->wakeup_time.tv_nsec = -1; level = thread->locklevel - 1; if (_LCK_BUSY(&thread->lockusers[level])) KSE_WAITQ_INSERT(kse, thread); else THR_SET_STATE(thread, PS_RUNNING); break; case PS_JOIN: case PS_MUTEX_WAIT: case PS_SIGSUSPEND: case PS_SIGWAIT: case PS_SUSPENDED: case PS_DEADLOCK: default: /* * These states don't timeout. */ thread->wakeup_time.tv_sec = -1; thread->wakeup_time.tv_nsec = -1; /* Insert into the waiting queue: */ KSE_WAITQ_INSERT(kse, thread); break; } if (thread->state != PS_RUNNING) { /* Restart the time slice: */ thread->slice_usec = -1; } else { if (thread->need_switchout != 0) /* * The thread yielded on its own; * restart the timeslice. */ thread->slice_usec = -1; else if ((thread->slice_usec != -1) && (thread->attr.sched_policy != SCHED_FIFO)) { thread->slice_usec += (thread->tmbx.tm_uticks + thread->tmbx.tm_sticks) * _clock_res_usec; /* Check for time quantum exceeded: */ if (thread->slice_usec > TIMESLICE_USEC) thread->slice_usec = -1; } if (thread->slice_usec == -1) { /* * The thread exceeded its time quantum or * it yielded the CPU; place it at the tail * of the queue for its priority. */ KSE_RUNQ_INSERT_TAIL(kse, thread); } else { /* * The thread hasn't exceeded its interval * Place it at the head of the queue for its * priority. */ KSE_RUNQ_INSERT_HEAD(kse, thread); } } } thread->active = 0; thread->need_switchout = 0; } /* * This function waits for the smallest timeout value of any waiting * thread, or until it receives a message from another KSE. * * This must be called with the scheduling lock held. */ static void kse_wait(struct kse *kse) { struct timespec *ts, ts_sleep; struct pthread *td_wait, *td_run; ts = &kse->k_mbx.km_timeofday; KSE_SET_WAIT(kse); td_wait = KSE_WAITQ_FIRST(kse); td_run = KSE_RUNQ_FIRST(kse); KSE_SCHED_UNLOCK(kse, kse->k_kseg); if (td_run == NULL) { if ((td_wait == NULL) || (td_wait->wakeup_time.tv_sec < 0)) { /* Limit sleep to no more than 2 minutes. */ ts_sleep.tv_sec = 120; ts_sleep.tv_nsec = 0; } else { TIMESPEC_SUB(&ts_sleep, &td_wait->wakeup_time, ts); if (ts_sleep.tv_sec > 120) { ts_sleep.tv_sec = 120; ts_sleep.tv_nsec = 0; } } if ((ts_sleep.tv_sec >= 0) && (ts_sleep.tv_nsec >= 0)) { /* Don't sleep for negative times. */ kse_release(&ts_sleep); /* * The above never returns. * XXX - Actually, it would be nice if it did * for KSE's with only one thread. */ } } KSE_CLEAR_WAIT(kse); } /* * Avoid calling this kse_exit() so as not to confuse it with the * system call of the same name. */ static void kse_fini(struct kse *kse) { struct timespec ts; + struct kse_group *free_kseg = NULL; + if ((kse->k_kseg->kg_flags & KGF_SINGLE_THREAD) != 0) + kse_exit(); /* - * Check to see if this is the main kse. + * Check to see if this is one of the main kses. */ - if (kse == _kse_initial) { + else if (kse->k_kseg != _kse_initial->k_kseg) { + /* Remove this KSE from the KSEG's list of KSEs. */ + KSE_SCHED_LOCK(kse, kse->k_kseg); + TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe); + if (TAILQ_EMPTY(&kse->k_kseg->kg_kseq)) + free_kseg = kse->k_kseg; + KSE_SCHED_UNLOCK(kse, kse->k_kseg); + /* + * Add this KSE to the list of free KSEs along with + * the KSEG if is now orphaned. + */ + KSE_LOCK_ACQUIRE(kse, &kse_lock); + if (free_kseg != NULL) + kseg_free(free_kseg); + kse_free_unlocked(kse); + KSE_LOCK_RELEASE(kse, &kse_lock); + kse_exit(); + /* Never returns. */ + } else { + /* * Wait for the last KSE/thread to exit, or for more * threads to be created (it is possible for additional * scope process threads to be created after the main * thread exits). */ ts.tv_sec = 120; ts.tv_nsec = 0; KSE_SET_WAIT(kse); KSE_SCHED_LOCK(kse, kse->k_kseg); if ((active_kse_count > 1) && (kse->k_kseg->kg_threadcount == 0)) { KSE_SCHED_UNLOCK(kse, kse->k_kseg); /* * XXX - We need a way for the KSE to do a timed * wait. */ kse_release(&ts); /* The above never returns. */ } KSE_SCHED_UNLOCK(kse, kse->k_kseg); /* There are no more threads; exit this process: */ if (kse->k_kseg->kg_threadcount == 0) { /* kse_exit(); */ __isthreaded = 0; exit(0); } - } else { - /* Mark this KSE for GC: */ - KSE_LOCK_ACQUIRE(kse, &_thread_list_lock); - TAILQ_INSERT_TAIL(&free_kseq, kse, k_qe); - KSE_LOCK_RELEASE(kse, &_thread_list_lock); - kse_exit(); } } void _thr_sig_add(struct pthread *thread, int sig, siginfo_t *info, ucontext_t *ucp) { struct kse *curkse; curkse = _get_curkse(); KSE_SCHED_LOCK(curkse, thread->kseg); /* * A threads assigned KSE can't change out from under us * when we hold the scheduler lock. */ if (THR_IS_ACTIVE(thread)) { /* Thread is active. Can't install the signal for it. */ /* Make a note in the thread that it has a signal. */ sigaddset(&thread->sigpend, sig); thread->check_pending = 1; } else { /* Make a note in the thread that it has a signal. */ sigaddset(&thread->sigpend, sig); thread->check_pending = 1; if (thread->blocked != 0) { /* Tell the kernel to interrupt the thread. */ kse_thr_interrupt(&thread->tmbx); } } KSE_SCHED_UNLOCK(curkse, thread->kseg); } void _thr_set_timeout(const struct timespec *timeout) { struct pthread *curthread = _get_curthread(); struct timespec ts; /* Reset the timeout flag for the running thread: */ curthread->timeout = 0; /* Check if the thread is to wait forever: */ if (timeout == NULL) { /* * Set the wakeup time to something that can be recognised as * different to an actual time of day: */ curthread->wakeup_time.tv_sec = -1; curthread->wakeup_time.tv_nsec = -1; } /* Check if no waiting is required: */ else if ((timeout->tv_sec == 0) && (timeout->tv_nsec == 0)) { /* Set the wake up time to 'immediately': */ curthread->wakeup_time.tv_sec = 0; curthread->wakeup_time.tv_nsec = 0; } else { /* Calculate the time for the current thread to wakeup: */ KSE_GET_TOD(curthread->kse, &ts); TIMESPEC_ADD(&curthread->wakeup_time, &ts, timeout); } } void _thr_panic_exit(char *file, int line, char *msg) { char buf[256]; snprintf(buf, sizeof(buf), "(%s:%d) %s\n", file, line, msg); __sys_write(2, buf, strlen(buf)); abort(); } void _thr_setrunnable(struct pthread *curthread, struct pthread *thread) { kse_critical_t crit; crit = _kse_critical_enter(); KSE_SCHED_LOCK(curthread->kse, thread->kseg); _thr_setrunnable_unlocked(thread); KSE_SCHED_UNLOCK(curthread->kse, thread->kseg); _kse_critical_leave(crit); } void _thr_setrunnable_unlocked(struct pthread *thread) { if ((thread->kseg->kg_flags & KGF_SINGLE_THREAD) != 0) /* No silly queues for these threads. */ THR_SET_STATE(thread, PS_RUNNING); else { if ((thread->flags & THR_FLAGS_IN_WAITQ) != 0) KSE_WAITQ_REMOVE(thread->kse, thread); THR_SET_STATE(thread, PS_RUNNING); if ((thread->blocked == 0) && (thread->flags & THR_FLAGS_IN_RUNQ) == 0) THR_RUNQ_INSERT_TAIL(thread); } /* * XXX - Threads are not yet assigned to specific KSEs; they are * assigned to the KSEG. So the fact that a thread's KSE is * waiting doesn't necessarily mean that it will be the KSE * that runs the thread after the lock is granted. But we * don't know if the other KSEs within the same KSEG are * also in a waiting state or not so we err on the side of * caution and wakeup the thread's last known KSE. We * ensure that the threads KSE doesn't change while it's * scheduling lock is held so it is safe to reference it * (the KSE). If the KSE wakes up and doesn't find any more * work it will again go back to waiting so no harm is done. */ if (KSE_WAITING(thread->kse)) KSE_WAKEUP(thread->kse); } struct pthread * _get_curthread(void) { return (_ksd_curthread); } /* This assumes the caller has disabled upcalls. */ struct kse * _get_curkse(void) { return (_ksd_curkse); } void _set_curkse(struct kse *kse) { _ksd_setprivate(&kse->k_ksd); } /* * Allocate a new KSEG. * - * We allow the current KSE (curkse) to be NULL in the case that this + * We allow the current thread to be NULL in the case that this * is the first time a KSEG is being created (library initialization). * In this case, we don't need to (and can't) take any locks. */ struct kse_group * -_kseg_alloc(struct kse *curkse) +_kseg_alloc(struct pthread *curthread) { struct kse_group *kseg = NULL; + kse_critical_t crit; - if ((curkse != NULL) && (free_kseg_count > 0)) { + if ((curthread != NULL) && (free_kseg_count > 0)) { /* Use the kse lock for the kseg queue. */ - KSE_LOCK_ACQUIRE(curkse, &kse_lock); + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); if ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) { TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe); free_kseg_count--; active_kseg_count++; TAILQ_INSERT_TAIL(&active_kse_groupq, kseg, kg_qe); } - KSE_LOCK_RELEASE(curkse, &kse_lock); + KSE_LOCK_RELEASE(curthread->kse, &kse_lock); + _kse_critical_leave(crit); } /* * If requested, attempt to allocate a new KSE group only if the * KSE allocation was successful and a KSE group wasn't found in * the free list. */ if ((kseg == NULL) && ((kseg = (struct kse_group *)malloc(sizeof(*kseg))) != NULL)) { - THR_ASSERT(_pq_alloc(&kseg->kg_schedq.sq_runq, - THR_MIN_PRIORITY, THR_LAST_PRIORITY) == 0, - "Unable to allocate priority queue."); - kseg_init(kseg); - if (curkse != NULL) - KSE_LOCK_ACQUIRE(curkse, &kse_lock); - kseg_free(kseg); - if (curkse != NULL) - KSE_LOCK_RELEASE(curkse, &kse_lock); + if (_pq_alloc(&kseg->kg_schedq.sq_runq, + THR_MIN_PRIORITY, THR_LAST_PRIORITY) != 0) { + free(kseg); + kseg = NULL; + } else { + kseg_init(kseg); + /* Add the KSEG to the list of active KSEGs. */ + if (curthread != NULL) { + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); + active_kseg_count++; + TAILQ_INSERT_TAIL(&active_kse_groupq, + kseg, kg_qe); + KSE_LOCK_RELEASE(curthread->kse, &kse_lock); + _kse_critical_leave(crit); + } else { + active_kseg_count++; + TAILQ_INSERT_TAIL(&active_kse_groupq, + kseg, kg_qe); + } + } } return (kseg); } /* * This must be called with the kse lock held and when there are * no more threads that reference it. */ static void kseg_free(struct kse_group *kseg) { + TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe); TAILQ_INSERT_HEAD(&free_kse_groupq, kseg, kg_qe); kseg_init(kseg); free_kseg_count++; active_kseg_count--; } /* * Allocate a new KSE. * - * We allow the current KSE (curkse) to be NULL in the case that this + * We allow the current thread to be NULL in the case that this * is the first time a KSE is being created (library initialization). * In this case, we don't need to (and can't) take any locks. */ struct kse * -_kse_alloc(struct kse *curkse) +_kse_alloc(struct pthread *curthread) { struct kse *kse = NULL; + kse_critical_t crit; int need_ksd = 0; int i; - if ((curkse != NULL) && (free_kse_count > 0)) { - KSE_LOCK_ACQUIRE(curkse, &kse_lock); + if ((curthread != NULL) && (free_kse_count > 0)) { + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); /* Search for a finished KSE. */ kse = TAILQ_FIRST(&free_kseq); #define KEMBX_DONE 0x01 while ((kse != NULL) && ((kse->k_mbx.km_flags & KEMBX_DONE) == 0)) { kse = TAILQ_NEXT(kse, k_qe); } #undef KEMBX_DONE if (kse != NULL) { TAILQ_REMOVE(&free_kseq, kse, k_qe); free_kse_count--; active_kse_count++; TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe); } - KSE_LOCK_RELEASE(curkse, &kse_lock); + KSE_LOCK_RELEASE(curthread->kse, &kse_lock); + _kse_critical_leave(crit); } if ((kse == NULL) && ((kse = (struct kse *)malloc(sizeof(*kse))) != NULL)) { bzero(kse, sizeof(*kse)); /* Initialize the lockusers. */ for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) { _lockuser_init(&kse->k_lockusers[i], (void *)kse); _LCK_SET_PRIVATE2(&kse->k_lockusers[i], NULL); } /* We had to malloc a kse; mark it as needing a new ID.*/ need_ksd = 1; /* * Create the KSE context. * * XXX - For now this is done here in the allocation. * In the future, we may want to have it done * outside the allocation so that scope system * threads (one thread per KSE) are not required * to have a stack for an unneeded kse upcall. */ kse->k_mbx.km_func = kse_entry; kse->k_mbx.km_stack.ss_sp = (char *)malloc(KSE_STACKSIZE); kse->k_mbx.km_stack.ss_size = KSE_STACKSIZE; kse->k_mbx.km_udata = (void *)kse; kse->k_mbx.km_quantum = 20000; if (kse->k_mbx.km_stack.ss_size == NULL) { free(kse); kse = NULL; } } if ((kse != NULL) && (need_ksd != 0)) { /* This KSE needs initialization. */ - if (curkse != NULL) - KSE_LOCK_ACQUIRE(curkse, &kse_lock); + if (curthread != NULL) { + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); + } /* Initialize KSD inside of the lock. */ if (_ksd_create(&kse->k_ksd, (void *)kse, sizeof(*kse)) != 0) { - if (curkse != NULL) - KSE_LOCK_RELEASE(curkse, &kse_lock); + if (curthread != NULL) { + KSE_LOCK_RELEASE(curthread->kse, &kse_lock); + _kse_critical_leave(crit); + } free(kse->k_mbx.km_stack.ss_sp); for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) { _lockuser_destroy(&kse->k_lockusers[i]); } free(kse); return (NULL); } kse->k_flags = 0; active_kse_count++; TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe); - if (curkse != NULL) - KSE_LOCK_RELEASE(curkse, &kse_lock); - + if (curthread != NULL) { + KSE_LOCK_RELEASE(curthread->kse, &kse_lock); + _kse_critical_leave(crit); + } } return (kse); } void -_kse_free(struct kse *curkse, struct kse *kse) +kse_free_unlocked(struct kse *kse) { - struct kse_group *kseg = NULL; - - if (curkse == kse) - PANIC("KSE trying to free itself"); - KSE_LOCK_ACQUIRE(curkse, &kse_lock); active_kse_count--; - if ((kseg = kse->k_kseg) != NULL) { - TAILQ_REMOVE(&kseg->kg_kseq, kse, k_qe); - /* - * Free the KSEG if there are no more threads associated - * with it. - */ - if (TAILQ_EMPTY(&kseg->kg_threadq)) - kseg_free(kseg); - } kse->k_kseg = NULL; kse->k_flags &= ~KF_INITIALIZED; TAILQ_INSERT_HEAD(&free_kseq, kse, k_qe); free_kse_count++; - KSE_LOCK_RELEASE(curkse, &kse_lock); } +void +_kse_free(struct pthread *curthread, struct kse *kse) +{ + kse_critical_t crit; + + if (curthread == NULL) + kse_free_unlocked(kse); + else { + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); + kse_free_unlocked(kse); + KSE_LOCK_RELEASE(curthread->kse, &kse_lock); + _kse_critical_leave(crit); + } +} + static void kseg_init(struct kse_group *kseg) { TAILQ_INIT(&kseg->kg_kseq); TAILQ_INIT(&kseg->kg_threadq); TAILQ_INIT(&kseg->kg_schedq.sq_waitq); - TAILQ_INIT(&kseg->kg_schedq.sq_blockedq); _lock_init(&kseg->kg_lock, LCK_ADAPTIVE, _kse_lock_wait, _kse_lock_wakeup); kseg->kg_threadcount = 0; kseg->kg_idle_kses = 0; kseg->kg_flags = 0; } struct pthread * _thr_alloc(struct pthread *curthread) { kse_critical_t crit; struct pthread *thread = NULL; if (curthread != NULL) { - if (_gc_check != 0) - thread_gc(curthread); + if (GC_NEEDED()) + _thr_gc(curthread); if (free_thread_count > 0) { crit = _kse_critical_enter(); - KSE_LOCK_ACQUIRE(curkse, &thread_lock); + KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock); if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) { TAILQ_REMOVE(&free_threadq, thread, tle); free_thread_count--; } - KSE_LOCK_RELEASE(curkse, &thread_lock); + KSE_LOCK_RELEASE(curthread->kse, &thread_lock); } } if (thread == NULL) thread = (struct pthread *)malloc(sizeof(struct pthread)); return (thread); } void _thr_free(struct pthread *curthread, struct pthread *thread) { kse_critical_t crit; + DBG_MSG("Freeing thread %p\n", thread); if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) free(thread); else { crit = _kse_critical_enter(); - KSE_LOCK_ACQUIRE(curkse, &thread_lock); + KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock); + THR_LIST_REMOVE(thread); TAILQ_INSERT_HEAD(&free_threadq, thread, tle); free_thread_count++; - KSE_LOCK_RELEASE(curkse, &thread_lock); + KSE_LOCK_RELEASE(curthread->kse, &thread_lock); _kse_critical_leave(crit); } } Index: head/lib/libkse/thread/thr_priority_queue.c =================================================================== --- head/lib/libkse/thread/thr_priority_queue.c (revision 113660) +++ head/lib/libkse/thread/thr_priority_queue.c (revision 113661) @@ -1,265 +1,272 @@ /* * Copyright (c) 1998 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Daniel Eischen. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "thr_private.h" /* Prototypes: */ static void pq_insert_prio_list(pq_queue_t *pq, int prio); #if defined(_PTHREADS_INVARIANTS) #define PQ_IN_SCHEDQ (THR_FLAGS_IN_RUNQ | THR_FLAGS_IN_WAITQ) #define PQ_SET_ACTIVE(pq) (pq)->pq_flags |= PQF_ACTIVE #define PQ_CLEAR_ACTIVE(pq) (pq)->pq_flags &= ~PQF_ACTIVE #define PQ_ASSERT_ACTIVE(pq, msg) do { \ if (((pq)->pq_flags & PQF_ACTIVE) == 0) \ PANIC(msg); \ } while (0) #define PQ_ASSERT_INACTIVE(pq, msg) do { \ if (((pq)->pq_flags & PQF_ACTIVE) != 0) \ PANIC(msg); \ } while (0) #define PQ_ASSERT_IN_WAITQ(thrd, msg) do { \ if (((thrd)->flags & THR_FLAGS_IN_WAITQ) == 0) \ PANIC(msg); \ } while (0) #define PQ_ASSERT_IN_RUNQ(thrd, msg) do { \ if (((thrd)->flags & THR_FLAGS_IN_RUNQ) == 0) \ PANIC(msg); \ } while (0) #define PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \ if (((thrd)->flags & PQ_IN_SCHEDQ) != 0) \ PANIC(msg); \ } while (0) #else #define PQ_SET_ACTIVE(pq) #define PQ_CLEAR_ACTIVE(pq) #define PQ_ASSERT_ACTIVE(pq, msg) #define PQ_ASSERT_INACTIVE(pq, msg) #define PQ_ASSERT_IN_WAITQ(thrd, msg) #define PQ_ASSERT_IN_RUNQ(thrd, msg) #define PQ_ASSERT_NOT_QUEUED(thrd, msg) #endif int _pq_alloc(pq_queue_t *pq, int minprio, int maxprio) { int ret = 0; int prioslots = maxprio - minprio + 1; if (pq == NULL) ret = -1; /* Create the priority queue with (maxprio - minprio + 1) slots: */ else if ((pq->pq_lists = (pq_list_t *) malloc(sizeof(pq_list_t) * prioslots)) == NULL) ret = -1; else { /* Remember the queue size: */ pq->pq_size = prioslots; ret = _pq_init(pq); } return (ret); } +void +_pq_free(pq_queue_t *pq) +{ + if ((pq != NULL) && (pq->pq_lists != NULL)) + free(pq->pq_lists); +} + int _pq_init(pq_queue_t *pq) { int i, ret = 0; if ((pq == NULL) || (pq->pq_lists == NULL)) ret = -1; else { /* Initialize the queue for each priority slot: */ for (i = 0; i < pq->pq_size; i++) { TAILQ_INIT(&pq->pq_lists[i].pl_head); pq->pq_lists[i].pl_prio = i; pq->pq_lists[i].pl_queued = 0; } /* Initialize the priority queue: */ TAILQ_INIT(&pq->pq_queue); pq->pq_flags = 0; } return (ret); } void _pq_remove(pq_queue_t *pq, pthread_t pthread) { int prio = pthread->active_priority; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_INACTIVE(pq, "_pq_remove: pq_active"); PQ_SET_ACTIVE(pq); PQ_ASSERT_IN_RUNQ(pthread, "_pq_remove: Not in priority queue"); /* * Remove this thread from priority list. Note that if * the priority list becomes empty, it is not removed * from the priority queue because another thread may be * added to the priority list (resulting in a needless * removal/insertion). Priority lists are only removed * from the priority queue when _pq_first is called. */ TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe); /* This thread is now longer in the priority queue. */ pthread->flags &= ~THR_FLAGS_IN_RUNQ; PQ_CLEAR_ACTIVE(pq); } void _pq_insert_head(pq_queue_t *pq, pthread_t pthread) { int prio; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_INACTIVE(pq, "_pq_insert_head: pq_active"); PQ_SET_ACTIVE(pq); PQ_ASSERT_NOT_QUEUED(pthread, "_pq_insert_head: Already in priority queue"); prio = pthread->active_priority; TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe); if (pq->pq_lists[prio].pl_queued == 0) /* Insert the list into the priority queue: */ pq_insert_prio_list(pq, prio); /* Mark this thread as being in the priority queue. */ pthread->flags |= THR_FLAGS_IN_RUNQ; PQ_CLEAR_ACTIVE(pq); } void _pq_insert_tail(pq_queue_t *pq, pthread_t pthread) { int prio; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_INACTIVE(pq, "_pq_insert_tail: pq_active"); PQ_SET_ACTIVE(pq); PQ_ASSERT_NOT_QUEUED(pthread, "_pq_insert_tail: Already in priority queue"); prio = pthread->active_priority; TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe); if (pq->pq_lists[prio].pl_queued == 0) /* Insert the list into the priority queue: */ pq_insert_prio_list(pq, prio); /* Mark this thread as being in the priority queue. */ pthread->flags |= THR_FLAGS_IN_RUNQ; PQ_CLEAR_ACTIVE(pq); } pthread_t _pq_first(pq_queue_t *pq) { pq_list_t *pql; pthread_t pthread = NULL; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_INACTIVE(pq, "_pq_first: pq_active"); PQ_SET_ACTIVE(pq); while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) && (pthread == NULL)) { if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) { /* * The priority list is empty; remove the list * from the queue. */ TAILQ_REMOVE(&pq->pq_queue, pql, pl_link); /* Mark the list as not being in the queue: */ pql->pl_queued = 0; } } PQ_CLEAR_ACTIVE(pq); return (pthread); } static void pq_insert_prio_list(pq_queue_t *pq, int prio) { pq_list_t *pql; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_ACTIVE(pq, "pq_insert_prio_list: pq_active"); /* * The priority queue is in descending priority order. Start at * the beginning of the queue and find the list before which the * new list should be inserted. */ pql = TAILQ_FIRST(&pq->pq_queue); while ((pql != NULL) && (pql->pl_prio > prio)) pql = TAILQ_NEXT(pql, pl_link); /* Insert the list: */ if (pql == NULL) TAILQ_INSERT_TAIL(&pq->pq_queue, &pq->pq_lists[prio], pl_link); else TAILQ_INSERT_BEFORE(pql, &pq->pq_lists[prio], pl_link); /* Mark this list as being in the queue: */ pq->pq_lists[prio].pl_queued = 1; } Index: head/lib/libkse/thread/thr_private.h =================================================================== --- head/lib/libkse/thread/thr_private.h (revision 113660) +++ head/lib/libkse/thread/thr_private.h (revision 113661) @@ -1,1149 +1,1174 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Private thread definitions for the uthread kernel. * * $FreeBSD$ */ #ifndef _THR_PRIVATE_H #define _THR_PRIVATE_H /* * Include files. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ksd.h" #include "lock.h" #include "pthread_md.h" /* * Evaluate the storage class specifier. */ #ifdef GLOBAL_PTHREAD_PRIVATE #define SCLASS #define SCLASS_PRESET(x...) = x #else #define SCLASS extern #define SCLASS_PRESET(x...) #endif /* * Kernel fatal error handler macro. */ #define PANIC(string) _thr_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args) #define stderr_debug(args...) _thread_printf(STDOUT_FILENO, ##args) #define DBG_MUTEX 0x0001 #define DBG_SIG 0x0002 #define THR_ASSERT(cond, msg) do { \ if (!(cond)) \ PANIC(msg); \ } while (0) /* * State change macro without scheduling queue change: */ #define THR_SET_STATE(thrd, newstate) do { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } while (0) /* * Define the signals to be used for scheduling. */ #define _ITIMER_SCHED_TIMER ITIMER_PROF #define _SCHED_SIGNAL SIGPROF #define TIMESPEC_ADD(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \ if ((dst)->tv_nsec > 1000000000) { \ (dst)->tv_sec++; \ (dst)->tv_nsec -= 1000000000; \ } \ } while (0) #define TIMESPEC_SUB(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ if ((dst)->tv_nsec < 0) { \ (dst)->tv_sec--; \ (dst)->tv_nsec += 1000000000; \ } \ } while (0) /* * Priority queues. * * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. */ typedef struct pq_list { TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ int pl_prio; /* the priority of this list */ int pl_queued; /* is this in the priority queue */ } pq_list_t; typedef struct pq_queue { TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ pq_list_t *pq_lists; /* array of all priority lists */ int pq_size; /* number of priority lists */ #define PQF_ACTIVE 0x0001 int pq_flags; } pq_queue_t; /* * Each KSEG has a scheduling queue. For now, threads that exist in their * own KSEG (system scope) will get a full priority queue. In the future * this can be optimized for the single thread per KSEG case. */ struct sched_queue { pq_queue_t sq_runq; TAILQ_HEAD(, pthread) sq_waitq; /* waiting in userland */ - TAILQ_HEAD(, pthread) sq_blockedq; /* waiting in kernel */ }; /* Used to maintain pending and active signals: */ struct sigstatus { siginfo_t *info; /* arg 2 to signal handler */ int pending; /* Is this a pending signal? */ int blocked; /* * This signal has occured and hasn't * yet been handled; ignore subsequent * signals until the handler is done. */ int signo; }; typedef struct kse_thr_mailbox *kse_critical_t; struct kse_group; #define MAX_KSE_LOCKLEVEL 3 struct kse { struct kse_mailbox k_mbx; /* kernel kse mailbox */ /* -- location and order specific items for gdb -- */ struct pthread *k_curthread; /* current thread */ struct kse_group *k_kseg; /* parent KSEG */ struct sched_queue *k_schedq; /* scheduling queue */ /* -- end of location and order specific items -- */ - TAILQ_ENTRY(kse) k_qe; /* link entry */ + TAILQ_ENTRY(kse) k_qe; /* KSE list link entry */ + TAILQ_ENTRY(kse) k_kgqe; /* KSEG's KSE list entry */ struct ksd k_ksd; /* KSE specific data */ /* * Items that are only modified by the kse, or that otherwise * don't need to be locked when accessed */ struct lock k_lock; struct lockuser k_lockusers[MAX_KSE_LOCKLEVEL]; int k_locklevel; sigset_t k_sigmask; struct sigstatus k_sigq[NSIG]; int k_check_sigq; long k_resched; /* scheduling signal arrived */ int k_flags; #define KF_STARTED 0x0001 /* kernel kse created */ #define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */ int k_cpu; /* CPU ID when bound */ int k_done; /* this KSE is done */ }; /* * Each KSE group contains one or more KSEs in which threads can run. * At least for now, there is one scheduling queue per KSE group; KSEs * within the same KSE group compete for threads from the same scheduling * queue. A scope system thread has one KSE in one KSE group; the group * does not use its scheduling queue. */ struct kse_group { TAILQ_HEAD(, kse) kg_kseq; /* list of KSEs in group */ TAILQ_HEAD(, pthread) kg_threadq; /* list of threads in group */ TAILQ_ENTRY(kse_group) kg_qe; /* link entry */ struct sched_queue kg_schedq; /* scheduling queue */ struct lock kg_lock; int kg_threadcount; /* # of assigned threads */ int kg_idle_kses; int kg_flags; #define KGF_SINGLE_THREAD 0x0001 /* scope system kse group */ #define KGF_SCHEDQ_INITED 0x0002 /* has an initialized schedq */ }; /* + * Add/remove threads from a KSE's scheduling queue. + * For now the scheduling queue is hung off the KSEG. + */ +#define KSEG_THRQ_ADD(kseg, thr) \ +do { \ + TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle);\ + (kseg)->kg_threadcount++; \ +} while (0) + +#define KSEG_THRQ_REMOVE(kseg, thr) \ +do { \ + TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle); \ + (kseg)->kg_threadcount--; \ +} while (0) + + +/* * Lock acquire and release for KSEs. */ #define KSE_LOCK_ACQUIRE(kse, lck) \ do { \ if ((kse)->k_locklevel >= MAX_KSE_LOCKLEVEL) \ PANIC("Exceeded maximum lock level"); \ else { \ (kse)->k_locklevel++; \ _lock_acquire((lck), \ &(kse)->k_lockusers[(kse)->k_locklevel - 1], 0); \ } \ } while (0) #define KSE_LOCK_RELEASE(kse, lck) \ do { \ if ((kse)->k_locklevel > 0) { \ _lock_release((lck), \ &(kse)->k_lockusers[(kse)->k_locklevel - 1]); \ (kse)->k_locklevel--; \ } \ } while (0) /* * Lock our own KSEG. */ #define KSE_LOCK(curkse) \ KSE_LOCK_ACQUIRE(curkse, &(curkse)->k_kseg->kg_lock) #define KSE_UNLOCK(curkse) \ KSE_LOCK_RELEASE(curkse, &(curkse)->k_kseg->kg_lock) /* * Lock a potentially different KSEG. */ #define KSE_SCHED_LOCK(curkse, kseg) \ KSE_LOCK_ACQUIRE(curkse, &(kseg)->kg_lock) #define KSE_SCHED_UNLOCK(curkse, kseg) \ KSE_LOCK_RELEASE(curkse, &(kseg)->kg_lock) /* * Waiting queue manipulation macros (using pqe link): */ #define KSE_WAITQ_REMOVE(kse, thrd) \ do { \ if (((thrd)->flags & THR_FLAGS_IN_WAITQ) != 0) { \ TAILQ_REMOVE(&(kse)->k_schedq->sq_waitq, thrd, pqe); \ (thrd)->flags &= ~THR_FLAGS_IN_WAITQ; \ } \ } while (0) #define KSE_WAITQ_INSERT(kse, thrd) kse_waitq_insert(thrd) #define KSE_WAITQ_FIRST(kse) TAILQ_FIRST(&(kse)->k_schedq->sq_waitq) #define KSE_SET_WAIT(kse) \ atomic_store_rel_int(&(kse)->k_mbx.km_flags, 1) #define KSE_CLEAR_WAIT(kse) \ atomic_set_acq_int(&(kse)->k_mbx.km_flags, 0) #define KSE_WAITING(kse) (kse)->k_mbx.km_flags != 0 #define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_mbx) /* * TailQ initialization values. */ #define TAILQ_INITIALIZER { NULL, NULL } /* * lock initialization values. */ #define LCK_INITIALIZER { NULL, NULL, LCK_DEFAULT } struct pthread_mutex { /* * Lock for accesses to this structure. */ struct lock m_lock; enum pthread_mutextype m_type; int m_protocol; TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; long m_flags; int m_count; int m_refcount; /* * Used for priority inheritence and protection. * * m_prio - For priority inheritence, the highest active * priority (threads locking the mutex inherit * this priority). For priority protection, the * ceiling priority of this mutex. * m_saved_prio - mutex owners inherited priority before * taking the mutex, restored when the owner * unlocks the mutex. */ int m_prio; int m_saved_prio; /* * Link for list of all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; }; /* * Flags for mutexes. */ #define MUTEX_FLAGS_PRIVATE 0x01 #define MUTEX_FLAGS_INITED 0x02 #define MUTEX_FLAGS_BUSY 0x04 /* * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ { LCK_INITIALIZER, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, \ TAILQ_INITIALIZER, NULL, MUTEX_FLAGS_PRIVATE, 0, 0, 0, 0, \ TAILQ_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; long m_flags; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } /* * Condition variable definitions. */ enum pthread_cond_type { COND_TYPE_FAST, COND_TYPE_MAX }; struct pthread_cond { /* * Lock for accesses to this structure. */ struct lock c_lock; enum pthread_cond_type c_type; TAILQ_HEAD(cond_head, pthread) c_queue; struct pthread_mutex *c_mutex; long c_flags; long c_seqno; }; struct pthread_cond_attr { enum pthread_cond_type c_type; long c_flags; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ { LCK_INITIALIZER, COND_TYPE_FAST, TAILQ_INITIALIZER, \ NULL, NULL, 0, 0 } /* * Semaphore definitions. */ struct sem { #define SEM_MAGIC ((u_int32_t) 0x09fa4012) u_int32_t magic; pthread_mutex_t lock; pthread_cond_t gtzero; u_int32_t count; u_int32_t nwaiters; }; /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *next; void (*routine) (); void *routine_arg; }; struct pthread_attr { int sched_policy; int sched_inherit; int sched_interval; int prio; int suspend; #define THR_STACK_USER 0x100 /* 0xFF reserved for */ int flags; void *arg_attr; void (*cleanup_attr) (); void *stackaddr_attr; size_t stacksize_attr; size_t guardsize_attr; }; /* * Thread creation state attributes. */ #define THR_CREATE_RUNNING 0 #define THR_CREATE_SUSPENDED 1 /* * Miscellaneous definitions. */ #define THR_STACK_DEFAULT 65536 /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define THR_STACK_INITIAL 0x100000 /* * Define the different priority ranges. All applications have thread * priorities constrained within 0-31. The threads library raises the * priority when delivering signals in order to ensure that signal * delivery happens (from the POSIX spec) "as soon as possible". * In the future, the threads library will also be able to map specific * threads into real-time (cooperating) processes or kernel threads. * The RT and SIGNAL priorities will be used internally and added to * thread base priorities so that the scheduling queue can handle both * normal and RT priority threads with and without signal handling. * * The approach taken is that, within each class, signal delivery * always has priority over thread execution. */ #define THR_DEFAULT_PRIORITY 15 #define THR_MIN_PRIORITY 0 #define THR_MAX_PRIORITY 31 /* 0x1F */ #define THR_SIGNAL_PRIORITY 32 /* 0x20 */ #define THR_RT_PRIORITY 64 /* 0x40 */ #define THR_FIRST_PRIORITY THR_MIN_PRIORITY #define THR_LAST_PRIORITY \ (THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY) #define THR_BASE_PRIORITY(prio) ((prio) & THR_MAX_PRIORITY) /* * Clock resolution in microseconds. */ #define CLOCK_RES_USEC 10000 /* * Time slice period in microseconds. */ #define TIMESLICE_USEC 20000 /* * XXX - Define a thread-safe macro to get the current time of day * which is updated at regular intervals by something. * * For now, we just make the system call to get the time. */ #define KSE_GET_TOD(curkse, tsp) \ do { \ *tsp = (curkse)->k_mbx.km_timeofday; \ if ((tsp)->tv_sec == 0) \ clock_gettime(CLOCK_REALTIME, tsp); \ } while (0) struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ int state; /* 0 = idle >0 = # of readers -1 = writer */ pthread_cond_t read_signal; pthread_cond_t write_signal; int blocked_writers; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_LOCKWAIT, PS_MUTEX_WAIT, PS_COND_WAIT, PS_SLEEP_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, PS_JOIN, PS_SUSPENDED, PS_DEAD, PS_DEADLOCK, PS_STATE_MAX }; union pthread_wait_data { pthread_mutex_t mutex; pthread_cond_t cond; const sigset_t *sigwait; /* Waiting on a signal in sigwait */ struct lock *lock; }; /* * Define a continuation routine that can be used to perform a * transfer of control: */ typedef void (*thread_continuation_t) (void *); /* * This stores a thread's state prior to running a signal handler. * It is used when a signal is delivered to a thread blocked in * userland. If the signal handler returns normally, the thread's * state is restored from here. */ struct pthread_sigframe { int psf_flags; int psf_interrupted; int psf_signo; enum pthread_state psf_state; union pthread_wait_data psf_wait_data; struct timespec psf_wakeup_time; sigset_t psf_sigset; sigset_t psf_sigmask; int psf_seqno; }; struct join_status { struct pthread *thread; void *ret; int error; }; struct pthread_specific_elem { const void *data; int seqno; }; #define MAX_THR_LOCKLEVEL 3 /* * Thread structure. */ struct pthread { /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define THR_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; char *name; u_int64_t uniqueid; /* for gdb */ /* Queue entry for list of all threads: */ TAILQ_ENTRY(pthread) tle; /* link for all threads in process */ TAILQ_ENTRY(pthread) kle; /* link for all threads in KSE/KSEG */ /* Queue entry for GC lists: */ TAILQ_ENTRY(pthread) gcle; /* * Lock for accesses to this thread structure. */ struct lock lock; struct lockuser lockusers[MAX_THR_LOCKLEVEL]; int locklevel; kse_critical_t critical[MAX_KSE_LOCKLEVEL]; struct kse *kse; struct kse_group *kseg; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; struct pthread_attr attr; /* * Thread mailbox. */ struct kse_thr_mailbox tmbx; int active; /* thread running */ int blocked; /* thread blocked in kernel */ int need_switchout; int need_wakeup; /* * Used for tracking delivery of signal handlers. */ struct pthread_sigframe *curframe; siginfo_t siginfo[NSIG]; /* * Cancelability flags - the lower 2 bits are used by cancel * definitions in pthread.h */ #define THR_AT_CANCEL_POINT 0x0004 #define THR_CANCELLING 0x0008 #define THR_CANCEL_NEEDED 0x0010 int cancelflags; thread_continuation_t continuation; /* * The thread's base and pending signal masks. The active * signal mask is stored in the thread's context (in mailbox). */ sigset_t sigmask; sigset_t sigpend; int sigmask_seqno; int check_pending; int refcount; /* Thread state: */ enum pthread_state state; /* * Number of microseconds accumulated by this thread when * time slicing is active. */ long slice_usec; /* * Time to wake up thread. This is used for sleeping threads and * for any operation which may time out (such as select). */ struct timespec wakeup_time; /* TRUE if operation has timed out. */ int timeout; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* * The joiner is the thread that is joining to this thread. The * join status keeps track of a join operation to another thread. */ struct pthread *joiner; struct join_status join_status; /* * The current thread can belong to only one scheduling queue at * a time (ready or waiting queue). It can also belong to: * * o A queue of threads waiting for a mutex * o A queue of threads waiting for a condition variable * * It is possible for a thread to belong to more than one of the * above queues if it is handling a signal. A thread may only * enter a mutex or condition variable queue when it is not * being called from a signal handler. If a thread is a member * of one of these queues when a signal handler is invoked, it * must be removed from the queue before invoking the handler * and then added back to the queue after return from the handler. * * Use pqe for the scheduling queue link (both ready and waiting), * sqe for synchronization (mutex, condition variable, and join) * queue links, and qe for all other links. */ TAILQ_ENTRY(pthread) pqe; /* priority, wait queues link */ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ /* Wait data. */ union pthread_wait_data data; /* * Set to TRUE if a blocking operation was * interrupted by a signal: */ int interrupted; /* Signal number when in state PS_SIGWAIT: */ int signo; /* * Set to non-zero when this thread has entered a critical * region. We allow for recursive entries into critical regions. */ int critical_count; /* * Set to TRUE if this thread should yield after leaving a * critical region to check for signals, messages, etc. */ int critical_yield; int sflags; #define THR_FLAGS_IN_SYNCQ 0x0001 /* Miscellaneous flags; only set with scheduling lock held. */ int flags; #define THR_FLAGS_PRIVATE 0x0001 #define THR_FLAGS_IN_WAITQ 0x0002 /* in waiting queue using pqe link */ #define THR_FLAGS_IN_RUNQ 0x0004 /* in run queue using pqe link */ #define THR_FLAGS_EXITING 0x0008 /* thread is exiting */ #define THR_FLAGS_SUSPENDED 0x0010 /* thread is suspended */ #define THR_FLAGS_GC_SAFE 0x0020 /* thread safe for cleaning */ #define THR_FLAGS_IN_TDLIST 0x0040 /* thread in all thread list */ #define THR_FLAGS_IN_GCLIST 0x0080 /* thread in gc list */ /* * Base priority is the user setable and retrievable priority * of the thread. It is only affected by explicit calls to * set thread priority and upon thread creation via a thread * attribute or default priority. */ char base_priority; /* * Inherited priority is the priority a thread inherits by * taking a priority inheritence or protection mutex. It * is not affected by base priority changes. Inherited * priority defaults to and remains 0 until a mutex is taken * that is being waited on by any other thread whose priority * is non-zero. */ char inherited_priority; /* * Active priority is always the maximum of the threads base * priority and inherited priority. When there is a change * in either the base or inherited priority, the active * priority must be recalculated. */ char active_priority; /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; /* * Queue of currently owned mutexes. */ TAILQ_HEAD(, pthread_mutex) mutexq; void *ret; struct pthread_specific_elem *specific; int specific_data_count; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; char *fname; /* Ptr to source file name */ int lineno; /* Source line number. */ }; /* * Critical regions can also be detected by looking at the threads * current lock level. Ensure these macros increment and decrement * the lock levels such that locks can not be held with a lock level * of 0. */ #define THR_IN_CRITICAL(thrd) \ (((thrd)->locklevel > 0) || \ ((thrd)->critical_count > 0)) #define THR_YIELD_CHECK(thrd) \ do { \ if (((thrd)->critical_yield != 0) && \ !(THR_IN_CRITICAL(thrd))) \ _thr_sched_switch(thrd); \ else if (((thrd)->check_pending != 0) && \ !(THR_IN_CRITICAL(thrd))) \ _thr_sig_check_pending(thrd); \ } while (0) #define THR_LOCK_ACQUIRE(thrd, lck) \ do { \ if ((thrd)->locklevel >= MAX_THR_LOCKLEVEL) \ PANIC("Exceeded maximum lock level"); \ else { \ (thrd)->locklevel++; \ _lock_acquire((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1], \ (thrd)->active_priority); \ } \ } while (0) #define THR_LOCK_RELEASE(thrd, lck) \ do { \ if ((thrd)->locklevel > 0) { \ _lock_release((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1]); \ (thrd)->locklevel--; \ if ((thrd)->locklevel != 0) \ ; \ else if ((thrd)->critical_yield != 0) \ _thr_sched_switch(thrd); \ else if ((thrd)->check_pending != 0) \ _thr_sig_check_pending(thrd); \ } \ } while (0) /* * For now, threads will have their own lock separate from their * KSE scheduling lock. */ #define THR_LOCK(thr) THR_LOCK_ACQUIRE(thr, &(thr)->lock) #define THR_UNLOCK(thr) THR_LOCK_RELEASE(thr, &(thr)->lock) #define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock) #define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock) /* * Priority queue manipulation macros (using pqe link). We use * the thread's kseg link instead of the kse link because a thread * does not (currently) have a statically assigned kse. */ #define THR_RUNQ_INSERT_HEAD(thrd) \ _pq_insert_head(&(thrd)->kseg->kg_schedq.sq_runq, thrd) #define THR_RUNQ_INSERT_TAIL(thrd) \ _pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd) #define THR_RUNQ_REMOVE(thrd) \ _pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd) #define THR_RUNQ_FIRST() \ _pq_first(&(thrd)->kseg->kg_schedq.sq_runq) /* * Macros to insert/remove threads to the all thread list and * the gc list. */ #define THR_LIST_ADD(thrd) do { \ if (((thrd)->flags & THR_FLAGS_IN_TDLIST) == 0) { \ TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \ (thrd)->flags |= THR_FLAGS_IN_TDLIST; \ } \ } while (0) #define THR_LIST_REMOVE(thrd) do { \ if (((thrd)->flags & THR_FLAGS_IN_TDLIST) != 0) { \ TAILQ_REMOVE(&_thread_list, thrd, tle); \ (thrd)->flags &= ~THR_FLAGS_IN_TDLIST; \ } \ } while (0) #define THR_GCLIST_ADD(thrd) do { \ if (((thrd)->flags & THR_FLAGS_IN_GCLIST) == 0) { \ - TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, tle); \ + TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\ (thrd)->flags |= THR_FLAGS_IN_GCLIST; \ + _gc_count++; \ } \ } while (0) #define THR_GCLIST_REMOVE(thrd) do { \ if (((thrd)->flags & THR_FLAGS_IN_GCLIST) != 0) { \ - TAILQ_REMOVE(&_thread_gc_list, thrd, tle); \ + TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \ (thrd)->flags &= ~THR_FLAGS_IN_GCLIST; \ + _gc_count--; \ } \ } while (0) +#define GC_NEEDED() (atomic_load_acq_int(&_gc_count) >= 5) + /* * Locking the scheduling queue for another thread uses that thread's * KSEG lock. */ #define THR_SCHED_LOCK(curthr, thr) do { \ (curthr)->critical[(curthr)->locklevel] = _kse_critical_enter(); \ (curthr)->locklevel++; \ KSE_SCHED_LOCK((curthr)->kse, (thr)->kseg); \ } while (0) #define THR_SCHED_UNLOCK(curthr, thr) do { \ KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \ (curthr)->locklevel--; \ _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \ if ((curthr)->locklevel == 0) \ THR_YIELD_CHECK(curthr); \ } while (0) #define THR_CRITICAL_ENTER(thr) (thr)->critical_count++ #define THR_CRITICAL_LEAVE(thr) do { \ (thr)->critical_count--; \ if (((thr)->critical_yield != 0) && \ ((thr)->critical_count == 0)) { \ (thr)->critical_yield = 0; \ _thr_sched_switch(thr); \ } \ } while (0) #define THR_IS_ACTIVE(thrd) \ ((thrd)->kse != NULL) && ((thrd)->kse->k_curthread == (thrd)) #define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0) /* * Global variables for the pthread kernel. */ SCLASS void *_usrstack SCLASS_PRESET(NULL); SCLASS struct kse *_kse_initial SCLASS_PRESET(NULL); SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL); /* List of all threads: */ SCLASS TAILQ_HEAD(, pthread) _thread_list SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list)); /* List of threads needing GC: */ SCLASS TAILQ_HEAD(, pthread) _thread_gc_list SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list)); /* Default thread attributes: */ SCLASS struct pthread_attr _pthread_attr_default SCLASS_PRESET({ SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY, THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, THR_STACK_DEFAULT }); /* Default mutex attributes: */ SCLASS struct pthread_mutex_attr _pthread_mutexattr_default SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }); /* Default condition variable attributes: */ SCLASS struct pthread_cond_attr _pthread_condattr_default SCLASS_PRESET({COND_TYPE_FAST, 0}); /* Clock resolution in usec. */ SCLASS int _clock_res_usec SCLASS_PRESET(CLOCK_RES_USEC); /* Array of signal actions for this process: */ SCLASS struct sigaction _thread_sigact[NSIG]; /* * Array of counts of dummy handlers for SIG_DFL signals. This is used to * assure that there is always a dummy signal handler installed while there * is a thread sigwait()ing on the corresponding signal. */ SCLASS int _thread_dfl_count[NSIG]; /* * Lock for above count of dummy handlers and for the process signal * mask and pending signal sets. */ SCLASS struct lock _thread_signal_lock; /* Pending signals and mask for this process: */ SCLASS sigset_t _thr_proc_sigpending; SCLASS sigset_t _thr_proc_sigmask SCLASS_PRESET({{0, 0, 0, 0}}); SCLASS siginfo_t _thr_proc_siginfo[NSIG]; SCLASS pid_t _thr_pid SCLASS_PRESET(0); /* Garbage collector lock. */ SCLASS struct lock _gc_lock; SCLASS int _gc_check SCLASS_PRESET(0); -SCLASS pthread_t _gc_thread; +SCLASS int _gc_count SCLASS_PRESET(0); SCLASS struct lock _mutex_static_lock; SCLASS struct lock _rwlock_static_lock; SCLASS struct lock _keytable_lock; SCLASS struct lock _thread_list_lock; SCLASS int _thr_guard_default; SCLASS int _thr_page_size; SCLASS int _thr_debug_flags SCLASS_PRESET(0); /* Undefine the storage class and preset specifiers: */ #undef SCLASS #undef SCLASS_PRESET /* * Function prototype definitions. */ __BEGIN_DECLS int _cond_reinit(pthread_cond_t *); void _cond_wait_backout(struct pthread *); struct pthread *_get_curthread(void); struct kse *_get_curkse(void); void _set_curkse(struct kse *); -struct kse *_kse_alloc(struct kse *); +struct kse *_kse_alloc(struct pthread *); kse_critical_t _kse_critical_enter(void); void _kse_critical_leave(kse_critical_t); -void _kse_free(struct kse *, struct kse *); +void _kse_free(struct pthread *, struct kse *); void _kse_init(); -struct kse_group *_kseg_alloc(struct kse *); +struct kse_group *_kseg_alloc(struct pthread *); void _kse_lock_wait(struct lock *, struct lockuser *lu); void _kse_lock_wakeup(struct lock *, struct lockuser *lu); void _kse_sig_check_pending(struct kse *); void _kse_single_thread(struct pthread *); void _kse_start(struct kse *); void _kse_setthreaded(int); int _kse_isthreaded(void); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); void _mutex_lock_backout(struct pthread *); void _mutex_notify_priochange(struct pthread *, struct pthread *, int); int _mutex_reinit(struct pthread_mutex *); void _mutex_unlock_private(struct pthread *); void _libpthread_init(struct pthread *); int _pq_alloc(struct pq_queue *, int, int); +void _pq_free(struct pq_queue *); int _pq_init(struct pq_queue *); void _pq_remove(struct pq_queue *pq, struct pthread *); void _pq_insert_head(struct pq_queue *pq, struct pthread *); void _pq_insert_tail(struct pq_queue *pq, struct pthread *); struct pthread *_pq_first(struct pq_queue *pq); void *_pthread_getspecific(pthread_key_t); int _pthread_key_create(pthread_key_t *, void (*) (void *)); int _pthread_key_delete(pthread_key_t); int _pthread_mutex_destroy(pthread_mutex_t *); int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); int _pthread_mutex_lock(pthread_mutex_t *); int _pthread_mutex_trylock(pthread_mutex_t *); int _pthread_mutex_unlock(pthread_mutex_t *); int _pthread_mutexattr_init(pthread_mutexattr_t *); int _pthread_mutexattr_destroy(pthread_mutexattr_t *); int _pthread_mutexattr_settype(pthread_mutexattr_t *, int); int _pthread_once(pthread_once_t *, void (*) (void)); struct pthread *_pthread_self(void); int _pthread_setspecific(pthread_key_t, const void *); -struct pthread *_thr_alloc(struct kse *); +struct pthread *_thr_alloc(struct pthread *); +int _thread_enter_uts(struct kse_thr_mailbox *, struct kse_mailbox *); +int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **); void _thr_exit(char *, int, char *); void _thr_exit_cleanup(void); void _thr_lock_wait(struct lock *lock, struct lockuser *lu); void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu); int _thr_ref_add(struct pthread *, struct pthread *, int); void _thr_ref_delete(struct pthread *, struct pthread *); void _thr_schedule_add(struct pthread *, struct pthread *); void _thr_schedule_remove(struct pthread *, struct pthread *); void _thr_setrunnable(struct pthread *curthread, struct pthread *thread); void _thr_setrunnable_unlocked(struct pthread *thread); void _thr_sig_add(struct pthread *, int, siginfo_t *, ucontext_t *); void _thr_sig_dispatch(struct kse *, int, siginfo_t *); int _thr_stack_alloc(struct pthread_attr *); void _thr_stack_free(struct pthread_attr *); void _thr_exit_cleanup(void); -void _thr_free(struct kse *, struct pthread *); +void _thr_free(struct pthread *, struct pthread *); +void _thr_gc(struct pthread *); void _thr_panic_exit(char *, int, char *); void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_printf(int, const char *, ...); void _thr_sched_frame(struct pthread_sigframe *); void _thr_sched_switch(struct pthread *); void _thr_set_timeout(const struct timespec *); void _thr_sig_handler(int, siginfo_t *, ucontext_t *); void _thr_sig_check_pending(struct pthread *); void _thr_sig_rundown(struct pthread *, ucontext_t *, struct pthread_sigframe *); void _thr_sig_send(struct pthread *pthread, int sig); void _thr_sig_wrapper(void); void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf); void _thr_seterrno(struct pthread *, int); void _thr_enter_cancellation_point(struct pthread *); void _thr_leave_cancellation_point(struct pthread *); /* XXX - Stuff that goes away when my sources get more up to date. */ /* #include */ #ifdef SYS_KSE_H int __sys_kse_create(struct kse_mailbox *, int); int __sys_kse_thr_wakeup(struct kse_mailbox *); int __sys_kse_exit(struct kse_mailbox *); int __sys_kse_release(struct kse_mailbox *); #endif /* #include */ #ifdef _SYS_AIO_H_ int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); #endif /* #include */ #ifdef _SYS_FCNTL_H_ int __sys_fcntl(int, int, ...); int __sys_open(const char *, int, ...); #endif /* #include */ #ifdef _SYS_IOCTL_H_ int __sys_ioctl(int, unsigned long, ...); #endif /* #inclde */ #ifdef _SCHED_H_ int __sys_sched_yield(void); #endif /* #include */ #ifdef _SIGNAL_H_ int __sys_kill(pid_t, int); int __sys_sigaction(int, const struct sigaction *, struct sigaction *); int __sys_sigpending(sigset_t *); int __sys_sigprocmask(int, const sigset_t *, sigset_t *); int __sys_sigsuspend(const sigset_t *); int __sys_sigreturn(ucontext_t *); int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); #endif /* #include */ #ifdef _SYS_SOCKET_H_ int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int); #endif /* #include */ #ifdef _SYS_UIO_H_ ssize_t __sys_readv(int, const struct iovec *, int); ssize_t __sys_writev(int, const struct iovec *, int); #endif /* #include */ #ifdef _TIME_H_ int __sys_nanosleep(const struct timespec *, struct timespec *); #endif /* #include */ #ifdef _UNISTD_H_ int __sys_close(int); int __sys_execve(const char *, char * const *, char * const *); int __sys_fork(void); int __sys_fsync(int); pid_t __sys_getpid(void); int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); ssize_t __sys_read(int, void *, size_t); ssize_t __sys_write(int, const void *, size_t); void __sys_exit(int); #endif /* #include */ #ifdef _SYS_POLL_H_ int __sys_poll(struct pollfd *, unsigned, int); #endif /* #include */ #ifdef _SYS_MMAN_H_ int __sys_msync(void *, size_t, int); #endif #endif /* !_THR_PRIVATE_H */ Index: head/lib/libkse/thread/thr_resume_np.c =================================================================== --- head/lib/libkse/thread/thr_resume_np.c (revision 113660) +++ head/lib/libkse/thread/thr_resume_np.c (revision 113661) @@ -1,109 +1,112 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "thr_private.h" static void resume_common(struct pthread *); __weak_reference(_pthread_resume_np, pthread_resume_np); __weak_reference(_pthread_resume_all_np, pthread_resume_all_np); /* Resume a thread: */ int _pthread_resume_np(pthread_t thread) { struct pthread *curthread = _get_curthread(); int ret; /* Add a reference to the thread: */ if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0)) == 0) { /* Is it currently suspended? */ if ((thread->flags & THR_FLAGS_SUSPENDED) != 0) { /* Lock the threads scheduling queue: */ THR_SCHED_LOCK(curthread, thread); - resume_common(thread); + if ((curthread->state != PS_DEAD) && + (curthread->state != PS_DEADLOCK) && + ((curthread->flags & THR_FLAGS_EXITING) != 0)) + resume_common(thread); /* Unlock the threads scheduling queue: */ THR_SCHED_UNLOCK(curthread, thread); } _thr_ref_delete(curthread, thread); } return (ret); } void _pthread_resume_all_np(void) { struct pthread *curthread = _get_curthread(); struct pthread *thread; kse_critical_t crit; /* Take the thread list lock: */ crit = _kse_critical_enter(); KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); TAILQ_FOREACH(thread, &_thread_list, tle) { if ((thread != curthread) && ((thread->flags & THR_FLAGS_SUSPENDED) != 0) && (thread->state != PS_DEAD) && (thread->state != PS_DEADLOCK) && ((thread->flags & THR_FLAGS_EXITING) == 0)) { THR_SCHED_LOCK(curthread, thread); resume_common(thread); THR_SCHED_UNLOCK(curthread, thread); } } /* Release the thread list lock: */ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); _kse_critical_leave(crit); } static void resume_common(struct pthread *thread) { /* Clear the suspend flag: */ thread->flags &= ~THR_FLAGS_SUSPENDED; /* * If the thread's state is suspended, that means it is * now runnable but not in any scheduling queue. Set the * state to running and insert it into the run queue. */ if (thread->state == PS_SUSPENDED) _thr_setrunnable_unlocked(thread); } Index: head/lib/libkse/thread/thr_setschedparam.c =================================================================== --- head/lib/libkse/thread/thr_setschedparam.c (revision 113660) +++ head/lib/libkse/thread/thr_setschedparam.c (revision 113661) @@ -1,129 +1,136 @@ /* * Copyright (c) 1998 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Daniel Eischen. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include "thr_private.h" __weak_reference(_pthread_setschedparam, pthread_setschedparam); int _pthread_setschedparam(pthread_t pthread, int policy, const struct sched_param *param) { struct pthread *curthread = _get_curthread(); int in_syncq; int in_readyq = 0; int old_prio; int ret = 0; if ((param == NULL) || (policy < SCHED_FIFO) || (policy > SCHED_RR)) { /* Return an invalid argument error: */ ret = EINVAL; } else if ((param->sched_priority < THR_MIN_PRIORITY) || (param->sched_priority > THR_MAX_PRIORITY)) { /* Return an unsupported value error. */ ret = ENOTSUP; /* Find the thread in the list of active threads: */ } else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0)) == 0) { /* * Lock the threads scheduling queue while we change * its priority: */ THR_SCHED_LOCK(curthread, pthread); + if ((pthread->state == PS_DEAD) || + (pthread->state == PS_DEADLOCK) || + ((pthread->flags & THR_FLAGS_EXITING) != 0)) { + THR_SCHED_UNLOCK(curthread, pthread); + _thr_ref_delete(curthread, pthread); + return (ESRCH); + } in_syncq = pthread->flags & THR_FLAGS_IN_SYNCQ; /* Set the scheduling policy: */ pthread->attr.sched_policy = policy; if (param->sched_priority == THR_BASE_PRIORITY(pthread->base_priority)) /* * There is nothing to do; unlock the threads * scheduling queue. */ THR_SCHED_UNLOCK(curthread, pthread); else { /* * Remove the thread from its current priority * queue before any adjustments are made to its * active priority: */ old_prio = pthread->active_priority; if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0) { in_readyq = 1; THR_RUNQ_REMOVE(pthread); } /* Set the thread base priority: */ pthread->base_priority &= (THR_SIGNAL_PRIORITY | THR_RT_PRIORITY); pthread->base_priority = param->sched_priority; /* Recalculate the active priority: */ pthread->active_priority = MAX(pthread->base_priority, pthread->inherited_priority); if (in_readyq) { if ((pthread->priority_mutex_count > 0) && (old_prio > pthread->active_priority)) { /* * POSIX states that if the priority is * being lowered, the thread must be * inserted at the head of the queue for * its priority if it owns any priority * protection or inheritence mutexes. */ THR_RUNQ_INSERT_HEAD(pthread); } else THR_RUNQ_INSERT_TAIL(pthread); } /* Unlock the threads scheduling queue: */ THR_SCHED_UNLOCK(curthread, pthread); /* * Check for any mutex priority adjustments. This * includes checking for a priority mutex on which * this thread is waiting. */ _mutex_notify_priochange(curthread, pthread, in_syncq); } _thr_ref_delete(curthread, pthread); } return (ret); } Index: head/lib/libkse/thread/thr_suspend_np.c =================================================================== --- head/lib/libkse/thread/thr_suspend_np.c (revision 113660) +++ head/lib/libkse/thread/thr_suspend_np.c (revision 113661) @@ -1,106 +1,105 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "thr_private.h" static void suspend_common(struct pthread *thread); __weak_reference(_pthread_suspend_np, pthread_suspend_np); __weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np); /* Suspend a thread: */ int _pthread_suspend_np(pthread_t thread) { struct pthread *curthread = _get_curthread(); int ret; /* Suspending the current thread doesn't make sense. */ if (thread == _get_curthread()) ret = EDEADLK; /* Add a reference to the thread: */ else if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0)) == 0) { /* Lock the threads scheduling queue: */ THR_SCHED_LOCK(curthread, thread); - suspend_common(thread); - /* Unlock the threads scheduling queue: */ THR_SCHED_UNLOCK(curthread, thread); /* Don't forget to remove the reference: */ _thr_ref_delete(curthread, thread); } return (ret); } void _pthread_suspend_all_np(void) { struct pthread *curthread = _get_curthread(); struct pthread *thread; kse_critical_t crit; /* Take the thread list lock: */ crit = _kse_critical_enter(); KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); TAILQ_FOREACH(thread, &_thread_list, tle) { - if ((thread != curthread) && - (thread->state != PS_DEAD) && - (thread->state != PS_DEADLOCK) && - ((thread->flags & THR_FLAGS_EXITING) == 0)) { + if (thread != curthread) { THR_SCHED_LOCK(curthread, thread); suspend_common(thread); THR_SCHED_UNLOCK(curthread, thread); } } /* Release the thread list lock: */ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); _kse_critical_leave(crit); } void suspend_common(struct pthread *thread) { - thread->flags |= THR_FLAGS_SUSPENDED; - if (thread->flags & THR_FLAGS_IN_RUNQ) { - THR_RUNQ_REMOVE(thread); - THR_SET_STATE(thread, PS_SUSPENDED); + if ((thread->state != PS_DEAD) && + (thread->state != PS_DEADLOCK) && + ((thread->flags & THR_FLAGS_EXITING) == 0)) { + thread->flags |= THR_FLAGS_SUSPENDED; + if ((thread->flags & THR_FLAGS_IN_RUNQ) != 0) { + THR_RUNQ_REMOVE(thread); + THR_SET_STATE(thread, PS_SUSPENDED); + } } } Index: head/lib/libpthread/Makefile =================================================================== --- head/lib/libpthread/Makefile (revision 113660) +++ head/lib/libpthread/Makefile (revision 113661) @@ -1,32 +1,32 @@ # $FreeBSD$ # # All library objects contain FreeBSD revision strings by default; they may be # excluded as a space-saving measure. To produce a library that does # not contain these strings, add -DSTRIP_FBSDID (see ) to CFLAGS # below. Note, there are no IDs for syscall stubs whose sources are generated. # To included legacy CSRG sccsid strings, add -DLIBC_SCCS and -DSYSLIBC_SCCS # (for system call stubs) to CFLAGS below. -DSYSLIBC_SCCS affects just the # system call stubs. LIB=kse SHLIB_MAJOR= 1 CFLAGS+=-DPTHREAD_KERNEL CFLAGS+=-I${.CURDIR}/../libc/include -I${.CURDIR}/thread \ -I${.CURDIR}/../../include CFLAGS+=-I${.CURDIR}/arch/${MACHINE_ARCH}/include CFLAGS+=-I${.CURDIR}/sys # Uncomment this if you want libpthread to contain debug information for # thread locking. #CFLAGS+=-D_LOCK_DEBUG -g # enable extra internal consistancy checks CFLAGS+=-D_PTHREADS_INVARIANTS -Wall AINC= -I${.CURDIR}/../libc/${MACHINE_ARCH} -I${.CURDIR}/thread PRECIOUSLIB= yes -.include "${.CURDIR}/man/Makefile.inc" +#.include "${.CURDIR}/man/Makefile.inc" .include "${.CURDIR}/thread/Makefile.inc" .include "${.CURDIR}/sys/Makefile.inc" .include Index: head/lib/libpthread/arch/i386/i386/ksd.c =================================================================== --- head/lib/libpthread/arch/i386/i386/ksd.c (revision 113660) +++ head/lib/libpthread/arch/i386/i386/ksd.c (revision 113661) @@ -1,166 +1,166 @@ /*- * Copyright (C) 2003 David Xu - * Copyright (c) 2001 Daniel Eischen + * Copyright (c) 2001,2003 Daniel Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "ksd.h" #define LDT_ENTRIES 8192 #define LDT_WORDS (8192/sizeof(unsigned int)) #define LDT_RESERVED NLDT static unsigned int ldt_mask[LDT_WORDS]; static int initialized = 0; void initialize(void) { int i, j; memset(ldt_mask, 0xFF, sizeof(ldt_mask)); /* Reserve system predefined LDT entries */ for (i = 0; i < LDT_RESERVED; ++i) { j = i / 32; ldt_mask[j] &= ~(1 << (i % 32)); } initialized = 1; } static u_int alloc_ldt_entry(void) { u_int i, j, index; index = 0; for (i = 0; i < LDT_WORDS; ++i) { if (ldt_mask[i] != 0) { j = bsfl(ldt_mask[i]); ldt_mask[i] &= ~(1 << j); index = i * 32 + j; break; } } return (index); } static void free_ldt_entry(u_int index) { u_int i, j; if (index < LDT_RESERVED || index >= LDT_ENTRIES) return; i = index / 32; j = index % 32; ldt_mask[i] |= (1 << j); } /* * Initialize KSD. This also includes setting up the LDT. */ int _ksd_create(struct ksd *ksd, void *base, int size) { union descriptor ldt; if (initialized == 0) initialize(); ksd->ldt = alloc_ldt_entry(); if (ksd->ldt == 0) return (-1); ksd->base = base; ksd->size = size; ldt.sd.sd_hibase = (unsigned int)ksd->base >> 24; ldt.sd.sd_lobase = (unsigned int)ksd->base & 0xFFFFFF; ldt.sd.sd_hilimit = (size >> 16) & 0xF; ldt.sd.sd_lolimit = ksd->size & 0xFFFF; ldt.sd.sd_type = SDT_MEMRWA; ldt.sd.sd_dpl = SEL_UPL; ldt.sd.sd_p = 1; ldt.sd.sd_xx = 0; ldt.sd.sd_def32 = 1; ldt.sd.sd_gran = 0; /* no more than 1M */ if (i386_set_ldt(ksd->ldt, &ldt, 1) < 0) { free_ldt_entry(ksd->ldt); return (-1); } ksd->flags = KSDF_INITIALIZED; return (0); } void _ksd_destroy(struct ksd *ksd) { if ((ksd->flags & KSDF_INITIALIZED) != 0) { free_ldt_entry(ksd->ldt); } } int _ksd_getprivate(struct ksd *ksd, void **base, int *size) { if ((ksd == NULL) || ((ksd->flags & KSDF_INITIALIZED) == 0)) return (-1); else { *base = ksd->base; *size = ksd->size; return (0); } } /* * This assumes that the LDT is already setup. Just set %gs to * reference it. */ int _ksd_setprivate(struct ksd *ksd) { int val; int ret; if ((ksd->flags & KSDF_INITIALIZED) == 0) ret = -1; else { val = (ksd->ldt << 3) | 7; __asm __volatile("movl %0, %%gs" : : "r" (val)); ret = 0; } return (ret); } Index: head/lib/libpthread/arch/i386/i386/thr_getcontext.S =================================================================== --- head/lib/libpthread/arch/i386/i386/thr_getcontext.S (revision 113660) +++ head/lib/libpthread/arch/i386/i386/thr_getcontext.S (revision 113661) @@ -1,157 +1,157 @@ -/* +/*- * Copyright (c) 2001 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Where do we define these? */ #define UC_MC_OFFSET 16 /* offset to mcontext from ucontext */ #define MC_LEN_OFFSET 80 /* offset to mc_len from mcontext */ #define MC_LEN 640 /* mc_len */ #define MC_FPFMT_OFFSET 84 #define MC_FPFMT_NODEV 0x10000 #define MC_FPFMT_387 0x10001 #define MC_FPFMT_XMM 0x10002 #define MC_OWNEDFP_OFFSET 88 #define MC_OWNEDFP_NONE 0x20000 #define MC_OWNEDFP_FPU 0x20001 #define MC_OWNEDFP_PCB 0x20002 #define MC_FPREGS_OFFSET 96 /* offset to FP regs from mcontext */ #define MC_FP_CW_OFFSET 96 /* offset to FP control word */ /* * int thr_setcontext(ucontext_t *ucp) * * Restores the context in ucp. * * Returns 0 if there are no errors; -1 otherwise */ .weak CNAME(_thr_setcontext) .set CNAME(_thr_setcontext),CNAME(__thr_setcontext) ENTRY(__thr_setcontext) movl 4(%esp), %eax /* get address of context and sigset */ cmpl $0, %eax /* check for null pointer */ jne 1f movl $-1, %eax jmp 7f 1: addl $UC_MC_OFFSET, %eax /* add offset to mcontext */ cmpl $MC_LEN, MC_LEN_OFFSET(%eax) /* is context valid? */ je 2f movl $-1, %eax /* bzzzt, invalid context */ jmp 7f /*2: movl 4(%edx), %gs*/ /* we don't touch %gs */ 2: movl 8(%edx), %fs movl 12(%edx), %es movl 16(%edx), %ds movl 76(%edx), %ss movl 20(%edx), %edi movl 24(%edx), %esi movl 28(%edx), %ebp movl %esp, %ecx /* save current stack in ecx */ movl 72(%edx), %esp /* switch to context defined stack */ movl 60(%edx), %eax /* put return address at top of stack */ pushl %eax movl 44(%edx), %eax /* get ecx from context, */ pushl %eax /* push on top of stack */ movl 48(%edx), %eax /* get eax from context, */ pushl %eax /* push on top of stack */ /* * if (mc_fpowned == MC_OWNEDFP_FPU || mc_fpowned == MC_OWNEDFP_PCB) { * if (mc_fpformat == MC_FPFMT_387) * restore 387 FP register format * else if (mc_fpformat == MC_FPFMT_XMM) * restore XMM/SSE FP register format * } */ cmpl $MC_OWNEDFP_FPU, MC_OWNEDFP_OFFSET(%edx) je 3f cmpl $MC_OWNEDFP_PCB, MC_OWNEDFP_OFFSET(%edx) jne 5f 3: cmpl $MC_FPFMT_387, MC_FPFMT_OFFSET(%edx) jne 5f frstor MC_FPREGS_OFFSET(%edx) /* restore 387 FP regs */ jmp 5f 4: cmpl $MC_FPFMT_XMM, MC_FPFMT_OFFSET(%edx) jne 5f fxrstor MC_FPREGS_OFFSET(%edx) /* restore XMM FP regs */ jmp 6f 5: fninit fldcw MC_FP_CW_OFFSET(%edx) 6: pushl 68(%edx) /* restore flags register */ popf movl 36(%edx), %ebx /* restore ebx and edx */ movl 40(%edx), %edx popl %eax /* restore eax and ecx last */ popl %ecx 7: ret /* * int thr_getcontext(ucontext_t *ucp); * * Returns 0 if there are no errors; -1 otherwise */ .weak CNAME(_thr_getcontext) .set CNAME(_thr_getcontext),CNAME(__thr_getcontext) ENTRY(__thr_getcontext) movl 4(%esp), %eax /* get address of context */ cmpl $0, %eax /* check for null pointer */ jne 1f movl $-1, %eax jmp 2f movl 4(%esp), %eax /* get address of context and sigset */ 1: pushl %edx /* save value of edx */ movl 8(%esp), %edx /* get address of context */ addl $UC_MC_OFFSET, %edx /* add offset to mcontext */ /*movl %gs, 4(%edx)*/ /* we don't touch %gs */ movl %fs, 8(%edx) movl %es, 12(%edx) movl %ds, 16(%edx) movl %ss, 76(%edx) movl %edi, 20(%edx) movl %esi, 24(%edx) movl %ebp, 28(%edx) movl %ebx, 36(%edx) movl $0, 48(%edx) /* store successful return in eax */ popl %eax /* get saved value of edx */ movl %eax, 40(%edx) /* save edx */ movl %ecx, 44(%edx) movl (%esp), %eax /* get return address */ movl %eax, 60(%edx) /* save return address */ fnstcw MC_FP_CW_OFFSET(%edx) movl $MC_LEN, MC_LEN_OFFSET(%edx) movl $MC_FPFMT_NODEV, MC_FPFMT_OFFSET(%edx) /* no FP */ movl $MC_OWNEDFP_NONE, MC_OWNEDFP_OFFSET(%edx) /* no FP */ pushfl popl %eax /* get eflags */ movl %eax, 68(%edx) /* store eflags */ movl %esp, %eax /* setcontext pushes the return */ addl $4, %eax /* address onto the top of the */ movl %eax, 72(%edx) /* stack; account for this */ movl 40(%edx), %edx /* restore edx -- is this needed? */ xorl %eax, %eax /* return 0 */ 2: ret Index: head/lib/libpthread/arch/i386/include/atomic_ops.h =================================================================== --- head/lib/libpthread/arch/i386/include/atomic_ops.h (revision 113660) +++ head/lib/libpthread/arch/i386/include/atomic_ops.h (revision 113661) @@ -1,51 +1,51 @@ -/* +/*- * Copyright (c) 2001 Daniel Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _ATOMIC_OPS_H_ #define _ATOMIC_OPS_H_ /* * Atomic swap: * Atomic (tmp = *dst, *dst = val), then *res = tmp * * void atomic_swap_long(long *dst, long val, long *res); */ static inline void atomic_swap_long(long *dst, long val, long *res) { __asm __volatile( "xchgl %2, %1; movl %2, %0" : "=m" (*res) : "m" (*dst), "r" (val) : "memory"); } #define atomic_swap_int(d, v, r) \ atomic_swap_long((long *)(d), (long)(v), (long *)(r)) #define atomic_swap_ptr atomic_swap_int #endif Index: head/lib/libpthread/arch/i386/include/ksd.h =================================================================== --- head/lib/libpthread/arch/i386/include/ksd.h (revision 113660) +++ head/lib/libpthread/arch/i386/include/ksd.h (revision 113661) @@ -1,144 +1,144 @@ /*- * Copyright (C) 2003 David Xu * Copyright (c) 2001 Daniel Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ */ #ifndef _KSD_H_ #define _KSD_H_ #include struct pthread; struct __ucontext; struct kse; /* * KSE Specific Data. */ struct ksd { int ldt; #define KSDF_INITIALIZED 0x01 long flags; void *base; long size; }; /* * Evaluates to the byte offset of the per-kse variable name. */ #define __ksd_offset(name) __offsetof(struct kse, name) /* * Evaluates to the type of the per-kse variable name. */ #define __ksd_type(name) __typeof(((struct kse *)0)->name) /* * Evaluates to the value of the per-kse variable name. */ #define __KSD_GET_PTR(name) ({ \ void *__result; \ \ u_int __i; \ __asm __volatile("movl %%gs:%1, %0" \ : "=r" (__i) \ : "m" (*(u_int *)(__ksd_offset(name)))); \ __result = (void *)__i; \ \ __result; \ }) /* * Evaluates to the value of the per-kse variable name. */ #define __KSD_GET32(name) ({ \ __ksd_type(name) __result; \ \ u_int __i; \ __asm __volatile("movl %%gs:%1, %0" \ : "=r" (__i) \ : "m" (*(u_int *)(__ksd_offset(name)))); \ __result = *(__ksd_type(name) *)&__i; \ \ __result; \ }) /* * Sets the value of the per-cpu variable name to value val. */ #define __KSD_SET32(name, val) ({ \ __ksd_type(name) __val = (val); \ \ u_int __i; \ __i = *(u_int *)&__val; \ __asm __volatile("movl %1,%%gs:%0" \ : "=m" (*(u_int *)(__ksd_offset(name))) \ : "r" (__i)); \ }) static __inline u_long __ksd_readandclear32(volatile u_long *addr) { u_long result; __asm __volatile ( " xorl %0, %0;" " xchgl %%gs:%1, %0;" "# __ksd_readandclear32" : "=&r" (result) : "m" (*addr)); return (result); } #define __KSD_READANDCLEAR32(name) ({ \ __ksd_type(name) __result; \ \ __result = (__ksd_type(name)) \ __ksd_readandclear32((u_long *)__ksd_offset(name)); \ __result; \ }) /* * All members of struct kse are prefixed with k_. */ #define KSD_GET_PTR(member) __KSD_GET_PTR(k_ ## member) #define KSD_SET_PTR(member, val) __KSD_SET32(k_ ## member, val) #define KSD_READANDCLEAR_PTR(member) __KSD_READANDCLEAR32(k_ ## member) #define _ksd_curkse ((struct kse *)KSD_GET_PTR(mbx.km_udata)) #define _ksd_curthread KSD_GET_PTR(curthread) #define _ksd_set_tmbx(value) KSD_SET_PTR(mbx.km_curthread, (void *)value) #define _ksd_readandclear_tmbx KSD_READANDCLEAR_PTR(mbx.km_curthread) int _ksd_create(struct ksd *ksd, void *base, int size); void _ksd_destroy(struct ksd *ksd); int _ksd_getprivate(struct ksd *ksd, void **base, int *size); int _ksd_setprivate(struct ksd *ksd); #endif Index: head/lib/libpthread/arch/i386/include/pthread_md.h =================================================================== --- head/lib/libpthread/arch/i386/include/pthread_md.h (revision 113660) +++ head/lib/libpthread/arch/i386/include/pthread_md.h (revision 113661) @@ -1,54 +1,51 @@ /*- * Copyright (c) 2002 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions for the thread kernel. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ -#include #include #include -extern int _thread_enter_uts(struct kse_thr_mailbox *, struct kse_mailbox *); -extern int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **); extern int _thr_setcontext(ucontext_t *); extern int _thr_getcontext(ucontext_t *); /* * These are needed to ensure an application doesn't attempt to jump * between stacks of different threads. They return the stack of * jmp_buf, sigjmp_buf, and ucontext respectively. */ #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2])) #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2])) #define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp)) #define THR_GETCONTEXT(ucp) _thr_getcontext(ucp) #define THR_SETCONTEXT(ucp) _thr_setcontext(ucp) #endif Index: head/lib/libpthread/sys/lock.c =================================================================== --- head/lib/libpthread/sys/lock.c (revision 113660) +++ head/lib/libpthread/sys/lock.c (revision 113661) @@ -1,268 +1,268 @@ /*- - * Copyright (c) 2001 Daniel Eischen . + * Copyright (c) 2001, 2003 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "atomic_ops.h" #include "lock.h" #define LCK_ASSERT assert #define MAX_SPINS 500 void _lock_destroy(struct lock *lck) { if ((lck != NULL) && (lck->l_head != NULL)) { free(lck->l_head); lck->l_head = NULL; lck->l_tail = NULL; } } int _lock_init(struct lock *lck, enum lock_type ltype, lock_handler_t *waitfunc, lock_handler_t *wakeupfunc) { if (lck == NULL) return (-1); else if ((lck->l_head = malloc(sizeof(struct lockreq))) == NULL) return (-1); else { lck->l_type = ltype; lck->l_wait = waitfunc; lck->l_wakeup = wakeupfunc; lck->l_head->lr_locked = 0; lck->l_head->lr_watcher = NULL; lck->l_head->lr_owner = NULL; lck->l_head->lr_waiting = 0; lck->l_tail = lck->l_head; } return (0); } int _lockuser_init(struct lockuser *lu, void *priv) { if (lu == NULL) return (-1); else if ((lu->lu_myreq == NULL) && ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL)) return (-1); else { lu->lu_myreq->lr_locked = 1; lu->lu_myreq->lr_watcher = NULL; lu->lu_myreq->lr_owner = lu; lu->lu_myreq->lr_waiting = 0; lu->lu_watchreq = NULL; lu->lu_priority = 0; lu->lu_private = priv; lu->lu_private2 = NULL; } return (0); } void _lockuser_destroy(struct lockuser *lu) { if ((lu != NULL) && (lu->lu_myreq != NULL)) free(lu->lu_myreq); } /* * Acquire a lock waiting (spin or sleep) for it to become available. */ void _lock_acquire(struct lock *lck, struct lockuser *lu, int prio) { int i; /** * XXX - We probably want to remove these checks to optimize * performance. It is also a bug if any one of the * checks fail, so it's probably better to just let it * SEGV and fix it. */ #if 0 if (lck == NULL || lu == NULL || lck->l_head == NULL) return; #endif if ((lck->l_type & LCK_PRIORITY) == 0) atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq); else { LCK_ASSERT(lu->lu_myreq->lr_locked == 1); LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL); LCK_ASSERT(lu->lu_myreq->lr_owner == lu); LCK_ASSERT(lu->lu_myreq->lr_waiting == 0); LCK_ASSERT(lu->lu_watchreq == NULL); lu->lu_priority = prio; /* * Atomically swap the head of the lock request with * this request. */ atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq); } if (lu->lu_watchreq->lr_locked != 0) { atomic_store_rel_ptr(&lu->lu_watchreq->lr_watcher, lu); if ((lck->l_wait == NULL) || ((lck->l_type & LCK_ADAPTIVE) == 0)) { while (lu->lu_watchreq->lr_locked == 0) ; /* spin, then yield? */ } else { /* * Spin for a bit before invoking the wait function. * * We should be a little smarter here. If we're * running on a single processor, then the lock * owner got preempted and spinning will accomplish * nothing but waste time. If we're running on * multiple processors, the owner could be running * on another CPU and we might acquire the lock if * we spin for a bit. * * The other thing to keep in mind is that threads * acquiring these locks are considered to be in * critical regions; they will not be preempted by * the _UTS_ until they release the lock. It is * therefore safe to assume that if a lock can't * be acquired, it is currently held by a thread * running in another KSE. */ for (i = 0; i < MAX_SPINS; i++) { if (lu->lu_watchreq->lr_locked == 0) return; } atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 1); while (lu->lu_watchreq->lr_locked != 0) lck->l_wait(lck, lu); atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 0); } } } /* * Release a lock. */ void _lock_release(struct lock *lck, struct lockuser *lu) { struct lockuser *lu_tmp, *lu_h; struct lockreq *myreq; int prio_h; /** * XXX - We probably want to remove these checks to optimize * performance. It is also a bug if any one of the * checks fail, so it's probably better to just let it * SEGV and fix it. */ #if 0 if ((lck == NULL) || (lu == NULL)) return; #endif if ((lck->l_type & LCK_PRIORITY) != 0) { prio_h = 0; lu_h = NULL; /* Update tail if our request is last. */ if (lu->lu_watchreq->lr_owner == NULL) { atomic_store_rel_ptr(&lck->l_tail, lu->lu_myreq); atomic_store_rel_ptr(&lu->lu_myreq->lr_owner, NULL); } else { /* Remove ourselves from the list. */ atomic_store_rel_ptr(&lu->lu_myreq->lr_owner, lu->lu_watchreq->lr_owner); atomic_store_rel_ptr( &lu->lu_watchreq->lr_owner->lu_myreq, lu->lu_myreq); } /* * The watch request now becomes our own because we've * traded away our previous request. Save our previous * request so that we can grant the lock. */ myreq = lu->lu_myreq; lu->lu_myreq = lu->lu_watchreq; lu->lu_watchreq = NULL; lu->lu_myreq->lr_locked = 1; lu->lu_myreq->lr_owner = lu; lu->lu_myreq->lr_watcher = NULL; lu->lu_myreq->lr_waiting = 0; /* * Traverse the list of lock requests in reverse order * looking for the user with the highest priority. */ for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL; lu_tmp = lu_tmp->lu_myreq->lr_watcher) { if (lu_tmp->lu_priority > prio_h) { lu_h = lu_tmp; prio_h = lu_tmp->lu_priority; } } if (lu_h != NULL) { /* Give the lock to the highest priority user. */ atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0); if ((lu_h->lu_watchreq->lr_waiting != 0) && (lck->l_wakeup != NULL)) /* Notify the sleeper */ lck->l_wakeup(lck, lu_h->lu_myreq->lr_watcher); } else { /* Give the lock to the previous request. */ atomic_store_rel_long(&myreq->lr_locked, 0); if ((myreq->lr_waiting != 0) && (lck->l_wakeup != NULL)) /* Notify the sleeper */ lck->l_wakeup(lck, myreq->lr_watcher); } } else { /* * The watch request now becomes our own because we've * traded away our previous request. Save our previous * request so that we can grant the lock. */ myreq = lu->lu_myreq; lu->lu_myreq = lu->lu_watchreq; lu->lu_watchreq = NULL; lu->lu_myreq->lr_locked = 1; lu->lu_myreq->lr_waiting = 0; /* Give the lock to the previous request. */ atomic_store_rel_long(&myreq->lr_locked, 0); if ((myreq->lr_waiting != 0) && (lck->l_wakeup != NULL)) /* Notify the sleeper */ lck->l_wakeup(lck, myreq->lr_watcher); } } Index: head/lib/libpthread/sys/lock.h =================================================================== --- head/lib/libpthread/sys/lock.h (revision 113660) +++ head/lib/libpthread/sys/lock.h (revision 113661) @@ -1,89 +1,89 @@ /* - * Copyright (c) 2001 Daniel Eischen . + * Copyright (c) 2001, 2003 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LOCK_H_ #define _LOCK_H_ struct lockreq; struct lockuser; struct lock; enum lock_type { LCK_DEFAULT = 0x0000, /* default is FIFO spin locks */ LCK_PRIORITY = 0x0001, LCK_ADAPTIVE = 0x0002 /* call user-supplied handlers */ }; typedef void lock_handler_t(struct lock *, struct lockuser *); struct lock { struct lockreq *l_head; struct lockreq *l_tail; /* only used for priority locks */ enum lock_type l_type; lock_handler_t *l_wait; /* only used for adaptive locks */ lock_handler_t *l_wakeup; /* only used for adaptive locks */ }; /* Try to make this >= CACHELINESIZE */ struct lockreq { volatile long lr_locked; /* lock granted = 0, busy otherwise */ struct lockuser *lr_watcher; /* only used for priority locks */ struct lockuser *lr_owner; /* only used for priority locks */ long lr_waiting; /* non-zero when wakeup needed */ }; struct lockuser { struct lockreq *lu_myreq; /* request to give up/trade */ struct lockreq *lu_watchreq; /* watch this request */ int lu_priority; /* only used for priority locks */ void *lu_private1; /* private{1,2} are initialized to */ void *lu_private2; /* NULL and can be used by caller */ #define lu_private lu_private1 }; #define _LCK_INITIALIZER(lck_req) { &lck_req, NULL, LCK_DEFAULT, \ NULL, NULL } #define _LCK_REQUEST_INITIALIZER { 0, NULL, NULL, 0 } #define _LCK_BUSY(lu) ((lu)->lu_watchreq->lr_locked != 0) #define _LCK_GRANTED(lu) ((lu)->lu_watchreq->lr_locked == 0) #define _LCK_SET_PRIVATE(lu, p) (lu)->lu_private = (void *)(p) #define _LCK_GET_PRIVATE(lu) (lu)->lu_private #define _LCK_SET_PRIVATE2(lu, p) (lu)->lu_private2 = (void *)(p) #define _LCK_GET_PRIVATE2(lu) (lu)->lu_private2 void _lock_destroy(struct lock *); int _lock_init(struct lock *, enum lock_type, lock_handler_t *, lock_handler_t *); int _lockuser_init(struct lockuser *lu, void *priv); void _lockuser_destroy(struct lockuser *lu); void _lock_acquire(struct lock *, struct lockuser *, int); void _lock_release(struct lock *, struct lockuser *); #endif Index: head/lib/libpthread/thread/thr_create.c =================================================================== --- head/lib/libpthread/thread/thr_create.c (revision 113660) +++ head/lib/libpthread/thread/thr_create.c (revision 113661) @@ -1,311 +1,312 @@ /* + * Copyright (c) 2003 Daniel M. Eischen * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include "thr_private.h" #include "libc_private.h" static u_int64_t next_uniqueid = 1; #define OFF(f) offsetof(struct pthread, f) int _thread_next_offset = OFF(tle.tqe_next); int _thread_uniqueid_offset = OFF(uniqueid); int _thread_state_offset = OFF(state); int _thread_name_offset = OFF(name); int _thread_ctx_offset = OFF(tmbx.tm_context); #undef OFF int _thread_PS_RUNNING_value = PS_RUNNING; int _thread_PS_DEAD_value = PS_DEAD; static int create_stack(struct pthread_attr *pattr); static void thread_start(struct pthread *curthread, void *(*start_routine) (void *), void *arg); __weak_reference(_pthread_create, pthread_create); /* * Some notes on new thread creation and first time initializion * to enable multi-threading. * * There are basically two things that need to be done. * * 1) The internal library variables must be initialized. * 2) Upcalls need to be enabled to allow multiple threads * to be run. * * The first may be done as a result of other pthread functions * being called. When _thr_initial is null, _libpthread_init is * called to initialize the internal variables; this also creates * or sets the initial thread. It'd be nice to automatically * have _libpthread_init called on program execution so we don't * have to have checks throughout the library. * * The second part is only triggered by the creation of the first * thread (other than the initial/main thread). If the thread * being created is a scope system thread, then a new KSE/KSEG * pair needs to be allocated. Also, if upcalls haven't been * enabled on the initial thread's KSE, they must be now that * there is more than one thread; this could be delayed until * the initial KSEG has more than one thread. */ int _pthread_create(pthread_t * thread, const pthread_attr_t * attr, void *(*start_routine) (void *), void *arg) { struct kse *curkse; struct pthread *curthread, *new_thread; struct kse *kse = NULL; struct kse_group *kseg = NULL; kse_critical_t crit; int i; int ret = 0; if (_thr_initial == NULL) _libpthread_init(NULL); crit = _kse_critical_enter(); curthread = _get_curthread(); curkse = curthread->kse; /* Allocate memory for the thread structure: */ - if ((new_thread = _thr_alloc(curkse)) == NULL) { + if ((new_thread = _thr_alloc(curthread)) == NULL) { /* Insufficient memory to create a thread: */ ret = EAGAIN; } else { /* Initialize the thread structure: */ memset(new_thread, 0, sizeof(struct pthread)); /* Check if default thread attributes are required: */ if (attr == NULL || *attr == NULL) /* Use the default thread attributes: */ new_thread->attr = _pthread_attr_default; else new_thread->attr = *(*attr); if (create_stack(&new_thread->attr) != 0) { /* Insufficient memory to create a stack: */ ret = EAGAIN; - _thr_free(curkse, new_thread); + _thr_free(curthread, new_thread); } else if (((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) && - (((kse = _kse_alloc(curkse)) == NULL) - || ((kseg = _kseg_alloc(curkse)) == NULL))) { + (((kse = _kse_alloc(curthread)) == NULL) + || ((kseg = _kseg_alloc(curthread)) == NULL))) { /* Insufficient memory to create a new KSE/KSEG: */ ret = EAGAIN; if (kse != NULL) - _kse_free(curkse, kse); + _kse_free(curthread, kse); if ((new_thread->attr.flags & THR_STACK_USER) == 0) { KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); _thr_stack_free(&new_thread->attr); KSE_LOCK_RELEASE(curkse, &_thread_list_lock); } - _thr_free(curkse, new_thread); + _thr_free(curthread, new_thread); } else { if (kseg != NULL) { /* Add the KSE to the KSEG's list of KSEs. */ TAILQ_INSERT_HEAD(&kseg->kg_kseq, kse, k_qe); kse->k_kseg = kseg; kse->k_schedq = &kseg->kg_schedq; } /* * Write a magic value to the thread structure * to help identify valid ones: */ new_thread->magic = THR_MAGIC; new_thread->slice_usec = -1; new_thread->start_routine = start_routine; new_thread->arg = arg; new_thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; /* Initialize the thread for signals: */ new_thread->sigmask = curthread->sigmask; /* No thread is wanting to join to this one: */ new_thread->joiner = NULL; /* Initialize the signal frame: */ new_thread->curframe = NULL; /* Initialize the machine context: */ THR_GETCONTEXT(&new_thread->tmbx.tm_context); new_thread->tmbx.tm_udata = new_thread; new_thread->tmbx.tm_context.uc_sigmask = new_thread->sigmask; new_thread->tmbx.tm_context.uc_stack.ss_size = new_thread->attr.stacksize_attr; new_thread->tmbx.tm_context.uc_stack.ss_sp = new_thread->attr.stackaddr_attr; makecontext(&new_thread->tmbx.tm_context, (void (*)(void))thread_start, 4, new_thread, start_routine, arg); /* * Check if this thread is to inherit the scheduling * attributes from its parent: */ if ((new_thread->attr.flags & PTHREAD_INHERIT_SCHED) != 0) { /* Copy the scheduling attributes: */ new_thread->base_priority = curthread->base_priority & ~THR_SIGNAL_PRIORITY; new_thread->attr.prio = curthread->base_priority & ~THR_SIGNAL_PRIORITY; new_thread->attr.sched_policy = curthread->attr.sched_policy; } else { /* * Use just the thread priority, leaving the * other scheduling attributes as their * default values: */ new_thread->base_priority = new_thread->attr.prio; } new_thread->active_priority = new_thread->base_priority; new_thread->inherited_priority = 0; /* Initialize the mutex queue: */ TAILQ_INIT(&new_thread->mutexq); /* Initialize thread locking. */ if (_lock_init(&new_thread->lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize thread lock"); for (i = 0; i < MAX_THR_LOCKLEVEL; i++) { _lockuser_init(&new_thread->lockusers[i], (void *)new_thread); _LCK_SET_PRIVATE2(&new_thread->lockusers[i], (void *)new_thread); } /* Initialise hooks in the thread structure: */ new_thread->specific = NULL; new_thread->cleanup = NULL; new_thread->flags = 0; new_thread->continuation = NULL; if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) new_thread->state = PS_SUSPENDED; else new_thread->state = PS_RUNNING; /* * System scope threads have their own kse and * kseg. Process scope threads are all hung * off the main process kseg. */ if ((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) == 0) { new_thread->kseg = _kse_initial->k_kseg; new_thread->kse = _kse_initial; } else { kse->k_curthread = NULL; kse->k_kseg->kg_flags |= KGF_SINGLE_THREAD; new_thread->kse = kse; new_thread->kseg = kse->k_kseg; kse->k_mbx.km_udata = kse; kse->k_mbx.km_curthread = NULL; } KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); /* * Initialise the unique id which GDB uses to * track threads. */ new_thread->uniqueid = next_uniqueid++; /* Add the thread to the linked list of all threads: */ THR_LIST_ADD(new_thread); KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); /* * Schedule the new thread starting a new KSEG/KSE * pair if necessary. */ _thr_schedule_add(curthread, new_thread); /* Return a pointer to the thread structure: */ (*thread) = new_thread; } } _kse_critical_leave(crit); if ((ret == 0) && (_kse_isthreaded() == 0)) _kse_setthreaded(1); /* Return the status: */ return (ret); } static int create_stack(struct pthread_attr *pattr) { int ret; /* Check if a stack was specified in the thread attributes: */ if ((pattr->stackaddr_attr) != NULL) { pattr->guardsize_attr = 0; pattr->flags = THR_STACK_USER; ret = 0; } else ret = _thr_stack_alloc(pattr); return (ret); } static void thread_start(struct pthread *curthread, void *(*start_routine) (void *), void *arg) { /* Run the current thread's start routine with argument: */ pthread_exit(start_routine(arg)); /* This point should never be reached. */ PANIC("Thread has resumed after exit"); } Index: head/lib/libpthread/thread/thr_detach.c =================================================================== --- head/lib/libpthread/thread/thr_detach.c (revision 113660) +++ head/lib/libpthread/thread/thr_detach.c (revision 113661) @@ -1,102 +1,126 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "thr_private.h" __weak_reference(_pthread_detach, pthread_detach); int _pthread_detach(pthread_t pthread) { - struct pthread *curthread, *joiner; + struct pthread *curthread = _get_curthread(); + struct pthread *joiner; + kse_critical_t crit; + int dead; int rval = 0; /* Check for invalid calling parameters: */ if (pthread == NULL || pthread->magic != THR_MAGIC) /* Return an invalid argument error: */ rval = EINVAL; + else if ((rval = _thr_ref_add(curthread, pthread, + /*include dead*/1)) != 0) { + /* Return an error: */ + _thr_leave_cancellation_point(curthread); + } + /* Check if the thread is already detached: */ - else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) + else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) { /* Return an error: */ + _thr_ref_delete(curthread, pthread); rval = EINVAL; - else { + } else { /* Lock the detached thread: */ - curthread = _get_curthread(); THR_SCHED_LOCK(curthread, pthread); /* Flag the thread as detached: */ pthread->attr.flags |= PTHREAD_DETACHED; /* Retrieve any joining thread and remove it: */ joiner = pthread->joiner; pthread->joiner = NULL; + if (joiner->kseg == pthread->kseg) { + /* + * We already own the scheduler lock for the joiner. + * Take advantage of that and make the joiner runnable. + */ + if (joiner->join_status.thread == pthread) { + /* + * Set the return value for the woken thread: + */ + joiner->join_status.error = ESRCH; + joiner->join_status.ret = NULL; + joiner->join_status.thread = NULL; - /* We are already in a critical region. */ - KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); - if ((pthread->flags & THR_FLAGS_GC_SAFE) != 0) { - THR_LIST_REMOVE(pthread); - THR_GCLIST_ADD(pthread); - atomic_store_rel_int(&_gc_check, 1); - if (KSE_WAITING(_kse_initial)) - KSE_WAKEUP(_kse_initial); + _thr_setrunnable_unlocked(joiner); + } + joiner = NULL; } - KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); - + dead = (pthread->flags & THR_FLAGS_GC_SAFE) != 0; THR_SCHED_UNLOCK(curthread, pthread); + + if (dead != 0) { + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); + THR_GCLIST_ADD(pthread); + KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); + _kse_critical_leave(crit); + } + _thr_ref_delete(curthread, pthread); /* See if there is a thread waiting in pthread_join(): */ if (joiner != NULL) { /* Lock the joiner before fiddling with it. */ THR_SCHED_LOCK(curthread, joiner); if (joiner->join_status.thread == pthread) { /* * Set the return value for the woken thread: */ joiner->join_status.error = ESRCH; joiner->join_status.ret = NULL; joiner->join_status.thread = NULL; _thr_setrunnable_unlocked(joiner); } THR_SCHED_UNLOCK(curthread, joiner); } } /* Return the completion status: */ return (rval); } Index: head/lib/libpthread/thread/thr_find_thread.c =================================================================== --- head/lib/libpthread/thread/thr_find_thread.c (revision 113660) +++ head/lib/libpthread/thread/thr_find_thread.c (revision 113661) @@ -1,100 +1,91 @@ /* * Copyright (c) 2003 Daniel Eischen * Copyright (c) 1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "thr_private.h" /* * Find a thread in the linked list of active threads and add a reference * to it. Threads with positive reference counts will not be deallocated * until all references are released. */ int _thr_ref_add(struct pthread *curthread, struct pthread *thread, int include_dead) { kse_critical_t crit; struct pthread *pthread; if (thread == NULL) /* Invalid thread: */ return (EINVAL); crit = _kse_critical_enter(); KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); TAILQ_FOREACH(pthread, &_thread_list, tle) { if (pthread == thread) { if ((include_dead == 0) && ((pthread->state == PS_DEAD) || ((pthread->state == PS_DEADLOCK) || ((pthread->flags & THR_FLAGS_EXITING) != 0)))) pthread = NULL; else { thread->refcount++; curthread->critical_count++; } break; } } KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); _kse_critical_leave(crit); /* Return zero if the thread exists: */ return ((pthread != NULL) ? 0 : ESRCH); } void _thr_ref_delete(struct pthread *curthread, struct pthread *thread) { kse_critical_t crit; if (thread != NULL) { crit = _kse_critical_enter(); KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); thread->refcount--; curthread->critical_count--; - if (((thread->flags & THR_FLAGS_GC_SAFE) != 0) && - (thread->refcount == 0) && - ((thread->attr.flags & PTHREAD_DETACHED) != 0)) { - THR_LIST_REMOVE(thread); - THR_GCLIST_ADD(thread); - _gc_check = 1; - if (KSE_WAITING(_kse_initial)) - KSE_WAKEUP(_kse_initial); - } KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); _kse_critical_leave(crit); } } Index: head/lib/libpthread/thread/thr_init.c =================================================================== --- head/lib/libpthread/thread/thr_init.c (revision 113660) +++ head/lib/libpthread/thread/thr_init.c (revision 113661) @@ -1,519 +1,519 @@ /* - * Copyright (c) 2003 Daniel M. Eischen + * Copyright (c) 2003 Daniel M. Eischen * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* Allocate space for global thread variables here: */ #define GLOBAL_PTHREAD_PRIVATE #include "namespace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "un-namespace.h" #include "libc_private.h" #include "thr_private.h" #include "ksd.h" int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *); int __pthread_mutex_lock(pthread_mutex_t *); int __pthread_mutex_trylock(pthread_mutex_t *); static void init_private(void); static void init_main_thread(struct pthread *thread); /* * All weak references used within libc should be in this table. * This is so that static libraries will work. */ static void *references[] = { &_accept, &_bind, &_close, &_connect, &_dup, &_dup2, &_execve, &_fcntl, &_flock, &_flockfile, &_fstat, &_fstatfs, &_fsync, &_funlockfile, &_getdirentries, &_getlogin, &_getpeername, &_getsockname, &_getsockopt, &_ioctl, &_kevent, &_listen, &_nanosleep, &_open, &_pthread_getspecific, &_pthread_key_create, &_pthread_key_delete, &_pthread_mutex_destroy, &_pthread_mutex_init, &_pthread_mutex_lock, &_pthread_mutex_trylock, &_pthread_mutex_unlock, &_pthread_mutexattr_init, &_pthread_mutexattr_destroy, &_pthread_mutexattr_settype, &_pthread_once, &_pthread_setspecific, &_read, &_readv, &_recvfrom, &_recvmsg, &_select, &_sendmsg, &_sendto, &_setsockopt, &_sigaction, &_sigprocmask, &_sigsuspend, &_socket, &_socketpair, &_wait4, &_write, &_writev }; /* * These are needed when linking statically. All references within * libgcc (and in the future libc) to these routines are weak, but * if they are not (strongly) referenced by the application or other * libraries, then the actual functions will not be loaded. */ static void *libgcc_references[] = { &_pthread_once, &_pthread_key_create, &_pthread_key_delete, &_pthread_getspecific, &_pthread_setspecific, &_pthread_mutex_init, &_pthread_mutex_destroy, &_pthread_mutex_lock, &_pthread_mutex_trylock, &_pthread_mutex_unlock }; #define DUAL_ENTRY(entry) \ (pthread_func_t)entry, (pthread_func_t)entry static pthread_func_t jmp_table[][2] = { {DUAL_ENTRY(_pthread_cond_broadcast)}, /* PJT_COND_BROADCAST */ {DUAL_ENTRY(_pthread_cond_destroy)}, /* PJT_COND_DESTROY */ {DUAL_ENTRY(_pthread_cond_init)}, /* PJT_COND_INIT */ {DUAL_ENTRY(_pthread_cond_signal)}, /* PJT_COND_SIGNAL */ {(pthread_func_t)__pthread_cond_wait, (pthread_func_t)_pthread_cond_wait}, /* PJT_COND_WAIT */ {DUAL_ENTRY(_pthread_getspecific)}, /* PJT_GETSPECIFIC */ {DUAL_ENTRY(_pthread_key_create)}, /* PJT_KEY_CREATE */ {DUAL_ENTRY(_pthread_key_delete)}, /* PJT_KEY_DELETE*/ {DUAL_ENTRY(_pthread_main_np)}, /* PJT_MAIN_NP */ {DUAL_ENTRY(_pthread_mutex_destroy)}, /* PJT_MUTEX_DESTROY */ {DUAL_ENTRY(_pthread_mutex_init)}, /* PJT_MUTEX_INIT */ {(pthread_func_t)__pthread_mutex_lock, (pthread_func_t)_pthread_mutex_lock}, /* PJT_MUTEX_LOCK */ {(pthread_func_t)__pthread_mutex_trylock, (pthread_func_t)_pthread_mutex_trylock},/* PJT_MUTEX_TRYLOCK */ {DUAL_ENTRY(_pthread_mutex_unlock)}, /* PJT_MUTEX_UNLOCK */ {DUAL_ENTRY(_pthread_mutexattr_destroy)}, /* PJT_MUTEXATTR_DESTROY */ {DUAL_ENTRY(_pthread_mutexattr_init)}, /* PJT_MUTEXATTR_INIT */ {DUAL_ENTRY(_pthread_mutexattr_settype)}, /* PJT_MUTEXATTR_SETTYPE */ {DUAL_ENTRY(_pthread_once)}, /* PJT_ONCE */ {DUAL_ENTRY(_pthread_rwlock_destroy)}, /* PJT_RWLOCK_DESTROY */ {DUAL_ENTRY(_pthread_rwlock_init)}, /* PJT_RWLOCK_INIT */ {DUAL_ENTRY(_pthread_rwlock_rdlock)}, /* PJT_RWLOCK_RDLOCK */ {DUAL_ENTRY(_pthread_rwlock_tryrdlock)},/* PJT_RWLOCK_TRYRDLOCK */ {DUAL_ENTRY(_pthread_rwlock_trywrlock)},/* PJT_RWLOCK_TRYWRLOCK */ {DUAL_ENTRY(_pthread_rwlock_unlock)}, /* PJT_RWLOCK_UNLOCK */ {DUAL_ENTRY(_pthread_rwlock_wrlock)}, /* PJT_RWLOCK_WRLOCK */ {DUAL_ENTRY(_pthread_self)}, /* PJT_SELF */ {DUAL_ENTRY(_pthread_setspecific)}, /* PJT_SETSPECIFIC */ {DUAL_ENTRY(_pthread_sigmask)} /* PJT_SIGMASK */ }; static int init_once = 0; /* * Threaded process initialization. * * This is only called under two conditions: * * 1) Some thread routines have detected that the library hasn't yet * been initialized (_thr_initial == NULL && curthread == NULL), or * * 2) An explicit call to reinitialize after a fork (indicated * by curthread != NULL) */ void _libpthread_init(struct pthread *curthread) { int fd; /* Check if this function has already been called: */ if ((_thr_initial != NULL) && (curthread == NULL)) /* Only initialize the threaded application once. */ return; /* * Make gcc quiescent about {,libgcc_}references not being * referenced: */ if ((references[0] == NULL) || (libgcc_references[0] == NULL)) PANIC("Failed loading mandatory references in _thread_init"); /* * Check the size of the jump table to make sure it is preset * with the correct number of entries. */ if (sizeof(jmp_table) != (sizeof(pthread_func_t) * PJT_MAX * 2)) PANIC("Thread jump table not properly initialized"); memcpy(__thr_jtable, jmp_table, sizeof(jmp_table)); /* * Check for the special case of this process running as * or in place of init as pid = 1: */ if ((_thr_pid = getpid()) == 1) { /* * Setup a new session for this process which is * assumed to be running as root. */ if (setsid() == -1) PANIC("Can't set session ID"); if (revoke(_PATH_CONSOLE) != 0) PANIC("Can't revoke console"); if ((fd = __sys_open(_PATH_CONSOLE, O_RDWR)) < 0) PANIC("Can't open console"); if (setlogin("root") == -1) PANIC("Can't set login to root"); if (__sys_ioctl(fd, TIOCSCTTY, (char *) NULL) == -1) PANIC("Can't set controlling terminal"); } /* Initialize pthread private data. */ init_private(); _kse_init(); /* Initialize the initial kse and kseg. */ _kse_initial = _kse_alloc(NULL); if (_kse_initial == NULL) PANIC("Can't allocate initial kse."); _kse_initial->k_kseg = _kseg_alloc(NULL); if (_kse_initial->k_kseg == NULL) PANIC("Can't allocate initial kseg."); _kse_initial->k_schedq = &_kse_initial->k_kseg->kg_schedq; /* Set the initial thread. */ if (curthread == NULL) { /* Create and initialize the initial thread. */ curthread = _thr_alloc(NULL); if (curthread == NULL) PANIC("Can't allocate initial thread"); _thr_initial = curthread; init_main_thread(curthread); } else { /* * The initial thread is the current thread. It is * assumed that the current thread is already initialized * because it is left over from a fork(). */ _thr_initial = curthread; } - _kse_initial->k_kseg->kg_threadcount = 1; + _kse_initial->k_kseg->kg_threadcount = 0; _thr_initial->kse = _kse_initial; _thr_initial->kseg = _kse_initial->k_kseg; _thr_initial->active = 1; /* * Add the thread to the thread list and to the KSEG's thread * queue. */ THR_LIST_ADD(_thr_initial); - TAILQ_INSERT_TAIL(&_kse_initial->k_kseg->kg_threadq, _thr_initial, kle); + KSEG_THRQ_ADD(_kse_initial->k_kseg, _thr_initial); /* Setup the KSE/thread specific data for the current KSE/thread. */ if (_ksd_setprivate(&_thr_initial->kse->k_ksd) != 0) PANIC("Can't set initial KSE specific data"); _set_curkse(_thr_initial->kse); _thr_initial->kse->k_curthread = _thr_initial; _thr_initial->kse->k_flags |= KF_INITIALIZED; _kse_initial->k_curthread = _thr_initial; } /* * This function and pthread_create() do a lot of the same things. * It'd be nice to consolidate the common stuff in one place. */ static void init_main_thread(struct pthread *thread) { int i; /* Zero the initial thread structure. */ memset(thread, 0, sizeof(struct pthread)); /* Setup the thread attributes. */ thread->attr = _pthread_attr_default; /* * Set up the thread stack. * * Create a red zone below the main stack. All other stacks * are constrained to a maximum size by the parameters * passed to mmap(), but this stack is only limited by * resource limits, so this stack needs an explicitly mapped * red zone to protect the thread stack that is just beyond. */ if (mmap((void *)_usrstack - THR_STACK_INITIAL - _thr_guard_default, _thr_guard_default, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* * Mark the stack as an application supplied stack so that it * isn't deallocated. * * XXX - I'm not sure it would hurt anything to deallocate * the main thread stack because deallocation doesn't * actually free() it; it just puts it in the free * stack queue for later reuse. */ thread->attr.stackaddr_attr = (void *)_usrstack - THR_STACK_INITIAL; thread->attr.stacksize_attr = THR_STACK_INITIAL; thread->attr.guardsize_attr = _thr_guard_default; thread->attr.flags |= THR_STACK_USER; /* * Write a magic value to the thread structure * to help identify valid ones: */ thread->magic = THR_MAGIC; thread->slice_usec = -1; thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; thread->name = strdup("initial thread"); /* Initialize the thread for signals: */ sigemptyset(&thread->sigmask); /* * Set up the thread mailbox. The threads saved context * is also in the mailbox. */ thread->tmbx.tm_udata = thread; thread->tmbx.tm_context.uc_sigmask = thread->sigmask; thread->tmbx.tm_context.uc_stack.ss_size = thread->attr.stacksize_attr; thread->tmbx.tm_context.uc_stack.ss_sp = thread->attr.stackaddr_attr; /* Default the priority of the initial thread: */ thread->base_priority = THR_DEFAULT_PRIORITY; thread->active_priority = THR_DEFAULT_PRIORITY; thread->inherited_priority = 0; /* Initialize the mutex queue: */ TAILQ_INIT(&thread->mutexq); /* Initialize thread locking. */ if (_lock_init(&thread->lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize initial thread lock"); for (i = 0; i < MAX_THR_LOCKLEVEL; i++) { _lockuser_init(&thread->lockusers[i], (void *)thread); _LCK_SET_PRIVATE2(&thread->lockusers[i], (void *)thread); } /* Initialize hooks in the thread structure: */ thread->specific = NULL; thread->cleanup = NULL; thread->flags = 0; thread->continuation = NULL; thread->state = PS_RUNNING; thread->uniqueid = 0; } static void init_private(void) { struct clockinfo clockinfo; struct sigaction act; size_t len; int mib[2]; int i; /* * Avoid reinitializing some things if they don't need to be, * e.g. after a fork(). */ if (init_once == 0) { /* Find the stack top */ mib[0] = CTL_KERN; mib[1] = KERN_USRSTACK; len = sizeof (_usrstack); if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1) PANIC("Cannot get kern.usrstack from sysctl"); /* * Create a red zone below the main stack. All other * stacks are constrained to a maximum size by the * parameters passed to mmap(), but this stack is only * limited by resource limits, so this stack needs an * explicitly mapped red zone to protect the thread stack * that is just beyond. */ if (mmap((void *)_usrstack - THR_STACK_INITIAL - _thr_guard_default, _thr_guard_default, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* Get the kernel clockrate: */ mib[0] = CTL_KERN; mib[1] = KERN_CLOCKRATE; len = sizeof (struct clockinfo); if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0) _clock_res_usec = clockinfo.tick; else _clock_res_usec = CLOCK_RES_USEC; _thr_page_size = getpagesize(); _thr_guard_default = _thr_page_size; init_once = 1; /* Don't do this again. */ } else { /* * Destroy the locks before creating them. We don't * know what state they are in so it is better to just * recreate them. */ _lock_destroy(&_thread_signal_lock); _lock_destroy(&_mutex_static_lock); _lock_destroy(&_rwlock_static_lock); _lock_destroy(&_keytable_lock); } /* Initialize everything else. */ TAILQ_INIT(&_thread_list); TAILQ_INIT(&_thread_gc_list); /* Enter a loop to get the existing signal status: */ for (i = 1; i < NSIG; i++) { /* Check for signals which cannot be trapped: */ if (i == SIGKILL || i == SIGSTOP) { } /* Get the signal handler details: */ else if (__sys_sigaction(i, NULL, &_thread_sigact[i - 1]) != 0) { /* * Abort this process if signal * initialisation fails: */ PANIC("Cannot read signal handler info"); } /* Initialize the SIG_DFL dummy handler count. */ _thread_dfl_count[i] = 0; } /* * Install the signal handler for SIGINFO. It isn't * really needed, but it is nice to have for debugging * purposes. */ if (__sys_sigaction(SIGINFO, &act, NULL) != 0) { /* * Abort this process if signal initialisation fails: */ PANIC("Cannot initialize signal handler"); } _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO | SA_RESTART; /* * Initialize the lock for temporary installation of signal * handlers (to support sigwait() semantics) and for the * process signal mask and pending signal sets. */ if (_lock_init(&_thread_signal_lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize _thread_signal_lock"); if (_lock_init(&_mutex_static_lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize mutex static init lock"); if (_lock_init(&_rwlock_static_lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize rwlock static init lock"); if (_lock_init(&_keytable_lock, LCK_ADAPTIVE, _thr_lock_wait, _thr_lock_wakeup) != 0) PANIC("Cannot initialize thread specific keytable lock"); /* Clear pending signals and get the process signal mask. */ sigemptyset(&_thr_proc_sigpending); __sys_sigprocmask(SIG_SETMASK, NULL, &_thr_proc_sigmask); /* * _thread_list_lock and _kse_count are initialized * by _kse_init() */ } Index: head/lib/libpthread/thread/thr_join.c =================================================================== --- head/lib/libpthread/thread/thr_join.c (revision 113660) +++ head/lib/libpthread/thread/thr_join.c (revision 113661) @@ -1,132 +1,149 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "thr_private.h" __weak_reference(_pthread_join, pthread_join); int _pthread_join(pthread_t pthread, void **thread_return) { - struct pthread *curthread = _get_curthread(); - int ret = 0; + struct pthread *curthread = _get_curthread(); + kse_critical_t crit; + int ret = 0; _thr_enter_cancellation_point(curthread); /* Check if the caller has specified an invalid thread: */ if (pthread == NULL || pthread->magic != THR_MAGIC) { /* Invalid thread: */ _thr_leave_cancellation_point(curthread); return (EINVAL); } /* Check if the caller has specified itself: */ if (pthread == curthread) { /* Avoid a deadlock condition: */ _thr_leave_cancellation_point(curthread); return (EDEADLK); } /* * Find the thread in the list of active threads or in the * list of dead threads: */ if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/1)) != 0) { /* Return an error: */ _thr_leave_cancellation_point(curthread); return (ESRCH); } /* Check if this thread has been detached: */ if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) { /* Remove the reference and return an error: */ _thr_ref_delete(curthread, pthread); ret = ESRCH; } else { /* Lock the target thread while checking its state. */ THR_SCHED_LOCK(curthread, pthread); if ((pthread->state == PS_DEAD) || ((pthread->flags & THR_FLAGS_EXITING) != 0)) { if (thread_return != NULL) /* Return the thread's return value: */ *thread_return = pthread->ret; - /* Unlock the thread and remove the reference. */ + /* Detach the thread. */ + pthread->attr.flags |= PTHREAD_DETACHED; + + /* Unlock the thread. */ THR_SCHED_UNLOCK(curthread, pthread); + + /* + * Remove the thread from the list of active + * threads and add it to the GC list. + */ + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); + THR_LIST_REMOVE(pthread); + THR_GCLIST_ADD(pthread); + KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); + _kse_critical_leave(crit); + + /* Remove the reference. */ _thr_ref_delete(curthread, pthread); } else if (pthread->joiner != NULL) { /* Unlock the thread and remove the reference. */ THR_SCHED_UNLOCK(curthread, pthread); _thr_ref_delete(curthread, pthread); /* Multiple joiners are not supported. */ ret = ENOTSUP; } else { /* Set the running thread to be the joiner: */ pthread->joiner = curthread; /* Keep track of which thread we're joining to: */ curthread->join_status.thread = pthread; /* Unlock the thread and remove the reference. */ THR_SCHED_UNLOCK(curthread, pthread); _thr_ref_delete(curthread, pthread); THR_SCHED_LOCK(curthread, curthread); if (curthread->join_status.thread == pthread) THR_SET_STATE(curthread, PS_JOIN); THR_SCHED_UNLOCK(curthread, curthread); while (curthread->join_status.thread == pthread) { /* Schedule the next thread: */ _thr_sched_switch(curthread); } /* * The thread return value and error are set by the * thread we're joining to when it exits or detaches: */ ret = curthread->join_status.error; if ((ret == 0) && (thread_return != NULL)) *thread_return = curthread->join_status.ret; } } _thr_leave_cancellation_point(curthread); /* Return the completion status: */ return (ret); } Index: head/lib/libpthread/thread/thr_kern.c =================================================================== --- head/lib/libpthread/thread/thr_kern.c (revision 113660) +++ head/lib/libpthread/thread/thr_kern.c (revision 113661) @@ -1,1804 +1,1850 @@ /* * Copyright (C) 2003 Daniel M. Eischen * Copyright (C) 2002 Jonathon Mini * Copyright (c) 1995-1998 John Birrell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include __FBSDID("$FreeBSD"); #include #include #include #include #include #include #include #include #include #include #include #include #include "atomic_ops.h" #include "thr_private.h" #include "pthread_md.h" #include "libc_private.h" /*#define DEBUG_THREAD_KERN */ #ifdef DEBUG_THREAD_KERN #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif /* * Define a high water mark for the maximum number of threads that * will be cached. Once this level is reached, any extra threads * will be free()'d. * * XXX - It doesn't make sense to worry about the maximum number of * KSEs that we can cache because the system will limit us to * something *much* less than the maximum number of threads * that we can have. Disregarding KSEs in their own group, * the maximum number of KSEs is the number of processors in * the system. */ #define MAX_CACHED_THREADS 100 #define KSE_STACKSIZE 16384 #define KSE_SET_MBOX(kse, thrd) \ (kse)->k_mbx.km_curthread = &(thrd)->tmbx #define KSE_SET_EXITED(kse) (kse)->k_flags |= KF_EXITED /* - * Add/remove threads from a KSE's scheduling queue. - * For now the scheduling queue is hung off the KSEG. - */ -#define KSEG_THRQ_ADD(kseg, thr) \ - TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle) -#define KSEG_THRQ_REMOVE(kseg, thr) \ - TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle) - - -/* * Macros for manipulating the run queues. The priority queue * routines use the thread's pqe link and also handle the setting * and clearing of the thread's THR_FLAGS_IN_RUNQ flag. */ #define KSE_RUNQ_INSERT_HEAD(kse, thrd) \ _pq_insert_head(&(kse)->k_schedq->sq_runq, thrd) #define KSE_RUNQ_INSERT_TAIL(kse, thrd) \ _pq_insert_tail(&(kse)->k_schedq->sq_runq, thrd) #define KSE_RUNQ_REMOVE(kse, thrd) \ _pq_remove(&(kse)->k_schedq->sq_runq, thrd) #define KSE_RUNQ_FIRST(kse) _pq_first(&(kse)->k_schedq->sq_runq) /* * We've got to keep track of everything that is allocated, not only * to have a speedy free list, but also so they can be deallocated * after a fork(). */ static TAILQ_HEAD(, kse) active_kseq; static TAILQ_HEAD(, kse) free_kseq; static TAILQ_HEAD(, kse_group) free_kse_groupq; static TAILQ_HEAD(, kse_group) active_kse_groupq; +static TAILQ_HEAD(, kse_group) gc_ksegq; static struct lock kse_lock; /* also used for kseg queue */ static int free_kse_count = 0; static int free_kseg_count = 0; static TAILQ_HEAD(, pthread) free_threadq; static struct lock thread_lock; static int free_thread_count = 0; static int inited = 0; static int active_kse_count = 0; static int active_kseg_count = 0; static void kse_check_completed(struct kse *kse); static void kse_check_waitq(struct kse *kse); static void kse_check_signals(struct kse *kse); static void kse_entry(struct kse_mailbox *mbx); static void kse_fini(struct kse *curkse); static void kse_sched_multi(struct kse *curkse); static void kse_sched_single(struct kse *curkse); static void kse_switchout_thread(struct kse *kse, struct pthread *thread); static void kse_wait(struct kse *kse); +static void kse_free_unlocked(struct kse *kse); static void kseg_free(struct kse_group *kseg); static void kseg_init(struct kse_group *kseg); static void kse_waitq_insert(struct pthread *thread); static void thr_cleanup(struct kse *kse, struct pthread *curthread); -static void thr_gc(struct kse *curkse); +#ifdef NOT_YET static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp); +#endif static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp, struct pthread_sigframe *psf); static int thr_timedout(struct pthread *thread, struct timespec *curtime); /* * This is called after a fork(). * No locks need to be taken here since we are guaranteed to be * single threaded. */ void _kse_single_thread(struct pthread *curthread) { struct kse *kse, *kse_next; struct kse_group *kseg, *kseg_next; struct pthread *thread, *thread_next; kse_critical_t crit; int i; /* * Disable upcalls and clear the threaded flag. * XXX - I don't think we need to disable upcalls after a fork(). * but it doesn't hurt. */ crit = _kse_critical_enter(); __isthreaded = 0; /* * Enter a loop to remove and free all threads other than * the running thread from the active thread list: */ for (thread = TAILQ_FIRST(&_thread_list); thread != NULL; thread = thread_next) { /* * Advance to the next thread before the destroying * the current thread. */ thread_next = TAILQ_NEXT(thread, tle); /* * Remove this thread from the list (the current * thread will be removed but re-added by libpthread * initialization. */ TAILQ_REMOVE(&_thread_list, thread, tle); /* Make sure this isn't the running thread: */ if (thread != curthread) { _thr_stack_free(&thread->attr); if (thread->specific != NULL) free(thread->specific); for (i = 0; i < MAX_THR_LOCKLEVEL; i++) { _lockuser_destroy(&thread->lockusers[i]); } _lock_destroy(&thread->lock); free(thread); } } TAILQ_INIT(&curthread->mutexq); /* initialize mutex queue */ curthread->joiner = NULL; /* no joining threads yet */ sigemptyset(&curthread->sigpend); /* clear pending signals */ if (curthread->specific != NULL) { free(curthread->specific); curthread->specific = NULL; curthread->specific_data_count = 0; } /* Free the free KSEs: */ while ((kse = TAILQ_FIRST(&free_kseq)) != NULL) { TAILQ_REMOVE(&free_kseq, kse, k_qe); _ksd_destroy(&kse->k_ksd); free(kse); } free_kse_count = 0; /* Free the active KSEs: */ for (kse = TAILQ_FIRST(&active_kseq); kse != NULL; kse = kse_next) { kse_next = TAILQ_NEXT(kse, k_qe); TAILQ_REMOVE(&active_kseq, kse, k_qe); for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) { _lockuser_destroy(&kse->k_lockusers[i]); } _lock_destroy(&kse->k_lock); free(kse); } active_kse_count = 0; /* Free the free KSEGs: */ while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) { TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe); _lock_destroy(&kseg->kg_lock); + _pq_free(&kseg->kg_schedq.sq_runq); free(kseg); } free_kseg_count = 0; /* Free the active KSEGs: */ for (kseg = TAILQ_FIRST(&active_kse_groupq); kseg != NULL; kseg = kseg_next) { kseg_next = TAILQ_NEXT(kseg, kg_qe); TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe); _lock_destroy(&kseg->kg_lock); + _pq_free(&kseg->kg_schedq.sq_runq); free(kseg); } active_kseg_count = 0; /* Free the free threads. */ while ((thread = TAILQ_FIRST(&free_threadq)) != NULL) { TAILQ_REMOVE(&free_threadq, thread, tle); if (thread->specific != NULL) free(thread->specific); for (i = 0; i < MAX_THR_LOCKLEVEL; i++) { _lockuser_destroy(&thread->lockusers[i]); } _lock_destroy(&thread->lock); free(thread); } free_thread_count = 0; /* Free the to-be-gc'd threads. */ while ((thread = TAILQ_FIRST(&_thread_gc_list)) != NULL) { - TAILQ_REMOVE(&_thread_gc_list, thread, tle); + TAILQ_REMOVE(&_thread_gc_list, thread, gcle); free(thread); } + TAILQ_INIT(&gc_ksegq); + _gc_count = 0; if (inited != 0) { /* * Destroy these locks; they'll be recreated to assure they * are in the unlocked state. */ _lock_destroy(&kse_lock); _lock_destroy(&thread_lock); _lock_destroy(&_thread_list_lock); inited = 0; } /* * After a fork(), the leftover thread goes back to being * scope process. */ curthread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM; curthread->attr.flags |= PTHREAD_SCOPE_PROCESS; /* * After a fork, we are still operating on the thread's original * stack. Don't clear the THR_FLAGS_USER from the thread's * attribute flags. */ /* Initialize the threads library. */ curthread->kse = NULL; curthread->kseg = NULL; _kse_initial = NULL; _libpthread_init(curthread); } /* * This is used to initialize housekeeping and to initialize the * KSD for the KSE. */ void _kse_init(void) { if (inited == 0) { TAILQ_INIT(&active_kseq); TAILQ_INIT(&active_kse_groupq); TAILQ_INIT(&free_kseq); TAILQ_INIT(&free_kse_groupq); TAILQ_INIT(&free_threadq); + TAILQ_INIT(&gc_ksegq); if (_lock_init(&kse_lock, LCK_ADAPTIVE, _kse_lock_wait, _kse_lock_wakeup) != 0) PANIC("Unable to initialize free KSE queue lock"); if (_lock_init(&thread_lock, LCK_ADAPTIVE, _kse_lock_wait, _kse_lock_wakeup) != 0) PANIC("Unable to initialize free thread queue lock"); if (_lock_init(&_thread_list_lock, LCK_ADAPTIVE, _kse_lock_wait, _kse_lock_wakeup) != 0) PANIC("Unable to initialize thread list lock"); active_kse_count = 0; active_kseg_count = 0; + _gc_count = 0; inited = 1; } } int _kse_isthreaded(void) { return (__isthreaded != 0); } /* * This is called when the first thread (other than the initial * thread) is created. */ void _kse_setthreaded(int threaded) { if ((threaded != 0) && (__isthreaded == 0)) { /* * Locking functions in libc are required when there are * threads other than the initial thread. */ __isthreaded = 1; /* * Tell the kernel to create a KSE for the initial thread * and enable upcalls in it. */ kse_create(&_kse_initial->k_mbx, 0); KSE_SET_MBOX(_kse_initial, _thr_initial); } } /* * Lock wait and wakeup handlers for KSE locks. These are only used by * KSEs, and should never be used by threads. KSE locks include the * KSE group lock (used for locking the scheduling queue) and the * kse_lock defined above. * * When a KSE lock attempt blocks, the entire KSE blocks allowing another * KSE to run. For the most part, it doesn't make much sense to try and * schedule another thread because you need to lock the scheduling queue * in order to do that. And since the KSE lock is used to lock the scheduling * queue, you would just end up blocking again. */ void _kse_lock_wait(struct lock *lock, struct lockuser *lu) { struct kse *curkse = (struct kse *)_LCK_GET_PRIVATE(lu); struct timespec ts; kse_critical_t crit; /* * Enter a loop to wait until we get the lock. */ ts.tv_sec = 0; ts.tv_nsec = 1000000; /* 1 sec */ KSE_SET_WAIT(curkse); while (_LCK_BUSY(lu)) { /* * Yield the kse and wait to be notified when the lock * is granted. */ crit = _kse_critical_enter(); __sys_nanosleep(&ts, NULL); _kse_critical_leave(crit); /* * Make sure that the wait flag is set again in case * we wokeup without the lock being granted. */ KSE_SET_WAIT(curkse); } KSE_CLEAR_WAIT(curkse); } void _kse_lock_wakeup(struct lock *lock, struct lockuser *lu) { struct kse *curkse; struct kse *kse; curkse = _get_curkse(); kse = (struct kse *)_LCK_GET_PRIVATE(lu); if (kse == curkse) PANIC("KSE trying to wake itself up in lock"); else if (KSE_WAITING(kse)) { /* * Notify the owning kse that it has the lock. */ KSE_WAKEUP(kse); } } /* * Thread wait and wakeup handlers for thread locks. These are only used * by threads, never by KSEs. Thread locks include the per-thread lock * (defined in its structure), and condition variable and mutex locks. */ void _thr_lock_wait(struct lock *lock, struct lockuser *lu) { struct pthread *curthread = (struct pthread *)lu->lu_private; int count; /* * Spin for a bit. * * XXX - We probably want to make this a bit smarter. It * doesn't make sense to spin unless there is more * than 1 CPU. A thread that is holding one of these * locks is prevented from being swapped out for another * thread within the same scheduling entity. */ count = 0; while (_LCK_BUSY(lu) && count < 300) count++; while (_LCK_BUSY(lu)) { THR_SCHED_LOCK(curthread, curthread); if (_LCK_BUSY(lu)) { /* Wait for the lock: */ atomic_store_rel_int(&curthread->need_wakeup, 1); THR_SET_STATE(curthread, PS_LOCKWAIT); THR_SCHED_UNLOCK(curthread, curthread); _thr_sched_switch(curthread); } else THR_SCHED_UNLOCK(curthread, curthread); } } void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu) { struct pthread *thread; struct pthread *curthread; curthread = _get_curthread(); thread = (struct pthread *)_LCK_GET_PRIVATE(lu); THR_SCHED_LOCK(curthread, thread); _thr_setrunnable_unlocked(thread); atomic_store_rel_int(&thread->need_wakeup, 0); THR_SCHED_UNLOCK(curthread, thread); } kse_critical_t _kse_critical_enter(void) { kse_critical_t crit; crit = _ksd_readandclear_tmbx; return (crit); } void _kse_critical_leave(kse_critical_t crit) { struct pthread *curthread; _ksd_set_tmbx(crit); if ((crit != NULL) && ((curthread = _get_curthread()) != NULL)) THR_YIELD_CHECK(curthread); } void _thr_critical_enter(struct pthread *thread) { thread->critical_count++; } void _thr_critical_leave(struct pthread *thread) { thread->critical_count--; THR_YIELD_CHECK(thread); } /* * XXX - We may need to take the scheduling lock before calling * this, or perhaps take the lock within here before * doing anything else. */ void _thr_sched_switch(struct pthread *curthread) { struct pthread_sigframe psf; kse_critical_t crit; struct kse *curkse; volatile int once = 0; /* We're in the scheduler, 5 by 5: */ crit = _kse_critical_enter(); curkse = _get_curkse(); curthread->need_switchout = 1; /* The thread yielded on its own. */ curthread->critical_yield = 0; /* No need to yield anymore. */ curthread->slice_usec = -1; /* Restart the time slice. */ /* * The signal frame is allocated off the stack because * a thread can be interrupted by other signals while * it is running down pending signals. */ sigemptyset(&psf.psf_sigset); curthread->curframe = &psf; _thread_enter_uts(&curthread->tmbx, &curkse->k_mbx); /* * This thread is being resumed; check for cancellations. */ if ((once == 0) && (!THR_IN_CRITICAL(curthread))) { once = 1; thr_resume_check(curthread, &curthread->tmbx.tm_context, &psf); } } /* * This is the entry point of the KSE upcall. */ static void kse_entry(struct kse_mailbox *mbx) { struct kse *curkse; /* The kernel should always clear this before making the upcall. */ assert(mbx->km_curthread == NULL); curkse = (struct kse *)mbx->km_udata; /* Check for first time initialization: */ if ((curkse->k_flags & KF_INITIALIZED) == 0) { /* Setup this KSEs specific data. */ _ksd_setprivate(&curkse->k_ksd); _set_curkse(curkse); /* Set this before grabbing the context. */ curkse->k_flags |= KF_INITIALIZED; } /* Avoid checking the type of KSE more than once. */ if ((curkse->k_kseg->kg_flags & KGF_SINGLE_THREAD) != 0) { curkse->k_mbx.km_func = (void *)kse_sched_single; kse_sched_single(curkse); } else { curkse->k_mbx.km_func = (void *)kse_sched_multi; kse_sched_multi(curkse); } } /* * This is the scheduler for a KSE which runs a scope system thread. * The multi-thread KSE scheduler should also work for a single threaded * KSE, but we use a separate scheduler so that it can be fine-tuned * to be more efficient (and perhaps not need a separate stack for * the KSE, allowing it to use the thread's stack). * * XXX - This probably needs some work. */ static void kse_sched_single(struct kse *curkse) { struct pthread *curthread; struct timespec ts; int level; /* This may have returned from a kse_release(). */ if (KSE_WAITING(curkse)) KSE_CLEAR_WAIT(curkse); curthread = curkse->k_curthread; if (curthread->active == 0) { if (curthread->state != PS_RUNNING) { /* Check to see if the thread has timed out. */ KSE_GET_TOD(curkse, &ts); if (thr_timedout(curthread, &ts) != 0) { curthread->timeout = 1; curthread->state = PS_RUNNING; } } } else if (curthread->need_switchout != 0) { /* * This has to do the job of kse_switchout_thread(), only * for a single threaded KSE/KSEG. */ /* This thread no longer needs to yield the CPU: */ curthread->critical_yield = 0; curthread->need_switchout = 0; /* * Lock the scheduling queue. * * There is no scheduling queue for single threaded KSEs, * but we need a lock for protection regardless. */ KSE_SCHED_LOCK(curkse, curkse->k_kseg); switch (curthread->state) { case PS_DEAD: /* Unlock the scheduling queue and exit the KSE. */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); kse_fini(curkse); /* does not return */ break; case PS_COND_WAIT: case PS_SLEEP_WAIT: /* Only insert threads that can timeout: */ if (curthread->wakeup_time.tv_sec != -1) { /* Insert into the waiting queue: */ KSE_WAITQ_INSERT(curkse, curthread); } break; case PS_LOCKWAIT: level = curthread->locklevel - 1; if (_LCK_BUSY(&curthread->lockusers[level])) KSE_WAITQ_INSERT(curkse, curthread); else THR_SET_STATE(curthread, PS_RUNNING); break; case PS_JOIN: case PS_MUTEX_WAIT: case PS_RUNNING: case PS_SIGSUSPEND: case PS_SIGWAIT: case PS_SUSPENDED: case PS_DEADLOCK: default: /* * These states don't timeout and don't need * to be in the waiting queue. */ break; } if (curthread->state != PS_RUNNING) curthread->active = 0; } while (curthread->state != PS_RUNNING) { kse_wait(curkse); } /* Remove the frame reference. */ curthread->curframe = NULL; /* Unlock the scheduling queue. */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); /* * Continue the thread at its current frame: */ _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread); } void dump_queues(struct kse *curkse) { struct pthread *thread; DBG_MSG("Threads in waiting queue:\n"); TAILQ_FOREACH(thread, &curkse->k_kseg->kg_schedq.sq_waitq, pqe) { DBG_MSG(" thread %p, state %d, blocked %d\n", thread, thread->state, thread->blocked); } } /* * This is the scheduler for a KSE which runs multiple threads. */ static void kse_sched_multi(struct kse *curkse) { struct pthread *curthread; struct pthread_sigframe *curframe; int ret; /* This may have returned from a kse_release(). */ if (KSE_WAITING(curkse)) KSE_CLEAR_WAIT(curkse); /* Lock the scheduling lock. */ KSE_SCHED_LOCK(curkse, curkse->k_kseg); /* * If the current thread was completed in another KSE, then * it will be in the run queue. Don't mark it as being blocked. */ if (((curthread = curkse->k_curthread) != NULL) && ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) && (curthread->need_switchout == 0)) { /* * Assume the current thread is blocked; when the * completed threads are checked and if the current * thread is among the completed, the blocked flag * will be cleared. */ curthread->blocked = 1; } /* Check for any unblocked threads in the kernel. */ kse_check_completed(curkse); /* * Check for threads that have timed-out. */ kse_check_waitq(curkse); /* * Switchout the current thread, if necessary, as the last step * so that it is inserted into the run queue (if it's runnable) * _after_ any other threads that were added to it above. */ if (curthread == NULL) ; /* Nothing to do here. */ else if ((curthread->need_switchout == 0) && (curthread->blocked == 0) && (THR_IN_CRITICAL(curthread))) { /* * Resume the thread and tell it to yield when * it leaves the critical region. */ curthread->critical_yield = 0; curthread->active = 1; if ((curthread->flags & THR_FLAGS_IN_RUNQ) != 0) KSE_RUNQ_REMOVE(curkse, curthread); curkse->k_curthread = curthread; curthread->kse = curkse; KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); DBG_MSG("Continuing thread %p in critical region\n", curthread); ret = _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread); if (ret != 0) PANIC("Can't resume thread in critical region\n"); } else if ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) kse_switchout_thread(curkse, curthread); curkse->k_curthread = NULL; /* This has to be done without the scheduling lock held. */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); kse_check_signals(curkse); - - /* Check for GC: */ - if (_gc_check != 0) - thr_gc(curkse); KSE_SCHED_LOCK(curkse, curkse->k_kseg); dump_queues(curkse); /* Check if there are no threads ready to run: */ while (((curthread = KSE_RUNQ_FIRST(curkse)) == NULL) && (curkse->k_kseg->kg_threadcount != 0)) { /* * Wait for a thread to become active or until there are * no more threads. */ kse_wait(curkse); kse_check_waitq(curkse); KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); kse_check_signals(curkse); - if (_gc_check != 0) - thr_gc(curkse); KSE_SCHED_LOCK(curkse, curkse->k_kseg); } /* Check for no more threads: */ if (curkse->k_kseg->kg_threadcount == 0) { /* * Normally this shouldn't return, but it will if there * are other KSEs running that create new threads that * are assigned to this KSE[G]. For instance, if a scope * system thread were to create a scope process thread * and this kse[g] is the initial kse[g], then that newly * created thread would be assigned to us (the initial * kse[g]). */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); kse_fini(curkse); KSE_SCHED_LOCK(curkse, curkse->k_kseg); curthread = KSE_RUNQ_FIRST(curkse); } THR_ASSERT(curthread != NULL, "Return from kse_wait/fini without thread."); THR_ASSERT(curthread->state != PS_DEAD, "Trying to resume dead thread!"); KSE_RUNQ_REMOVE(curkse, curthread); /* * Make the selected thread the current thread. */ curkse->k_curthread = curthread; /* * Make sure the current thread's kse points to this kse. */ curthread->kse = curkse; /* * Reset accounting. */ curthread->tmbx.tm_uticks = 0; curthread->tmbx.tm_sticks = 0; /* * Reset the time slice if this thread is running for the first * time or running again after using its full time slice allocation. */ if (curthread->slice_usec == -1) curthread->slice_usec = 0; /* Mark the thread active. */ curthread->active = 1; /* Remove the frame reference. */ curframe = curthread->curframe; curthread->curframe = NULL; /* Unlock the scheduling queue: */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); /* * The thread's current signal frame will only be NULL if it * is being resumed after being blocked in the kernel. In * this case, and if the thread needs to run down pending * signals or needs a cancellation check, we need to add a * signal frame to the thread's context. */ -#if 0 +#ifdef NOT_YET if ((curframe == NULL) && ((curthread->check_pending != 0) || (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) && ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)))) { signalcontext(&curthread->tmbx.tm_context, 0, (__sighandler_t *)thr_resume_wrapper); } #endif /* * Continue the thread at its current frame: */ DBG_MSG("Continuing thread %p\n", curthread); ret = _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread); if (ret != 0) PANIC("Thread has returned from _thread_switch"); /* This point should not be reached. */ PANIC("Thread has returned from _thread_switch"); } static void kse_check_signals(struct kse *curkse) { sigset_t sigset; int i; /* Deliver posted signals. */ for (i = 0; i < _SIG_WORDS; i++) { atomic_swap_int(&curkse->k_mbx.km_sigscaught.__bits[i], 0, &sigset.__bits[i]); } if (SIGNOTEMPTY(sigset)) { /* * Dispatch each signal. * * XXX - There is no siginfo for any of these. * I think there should be, especially for * signals from other processes (si_pid, si_uid). */ for (i = 1; i < NSIG; i++) { if (sigismember(&sigset, i) != 0) { DBG_MSG("Dispatching signal %d\n", i); _thr_sig_dispatch(curkse, i, NULL /* no siginfo */); } } sigemptyset(&sigset); __sys_sigprocmask(SIG_SETMASK, &sigset, NULL); } } +#ifdef NOT_YET static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp) { struct pthread *curthread = _get_curthread(); thr_resume_check(curthread, ucp, NULL); } +#endif static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp, struct pthread_sigframe *psf) { /* Check signals before cancellations. */ while (curthread->check_pending != 0) { /* Clear the pending flag. */ curthread->check_pending = 0; /* * It's perfectly valid, though not portable, for * signal handlers to munge their interrupted context * and expect to return to it. Ensure we use the * correct context when running down signals. */ _thr_sig_rundown(curthread, ucp, psf); } if (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) && ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) pthread_testcancel(); } /* * Clean up a thread. This must be called with the thread's KSE * scheduling lock held. The thread must be a thread from the * KSE's group. */ static void thr_cleanup(struct kse *curkse, struct pthread *thread) { struct pthread *joiner; - int free_thread = 0; if ((joiner = thread->joiner) != NULL) { thread->joiner = NULL; if ((joiner->state == PS_JOIN) && (joiner->join_status.thread == thread)) { joiner->join_status.thread = NULL; /* Set the return status for the joining thread: */ joiner->join_status.ret = thread->ret; /* Make the thread runnable. */ if (joiner->kseg == curkse->k_kseg) _thr_setrunnable_unlocked(joiner); else { KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); KSE_SCHED_LOCK(curkse, joiner->kseg); _thr_setrunnable_unlocked(joiner); KSE_SCHED_UNLOCK(curkse, joiner->kseg); KSE_SCHED_LOCK(curkse, curkse->k_kseg); } } thread->attr.flags |= PTHREAD_DETACHED; } + if ((thread->attr.flags & PTHREAD_SCOPE_PROCESS) == 0) { + /* + * Remove the thread from the KSEG's list of threads. + */ + KSEG_THRQ_REMOVE(thread->kseg, thread); + /* + * Migrate the thread to the main KSE so that this + * KSE and KSEG can be cleaned when their last thread + * exits. + */ + thread->kseg = _kse_initial->k_kseg; + thread->kse = _kse_initial; + } thread->flags |= THR_FLAGS_GC_SAFE; - thread->kseg->kg_threadcount--; + + /* + * We can't hold the thread list lock while holding the + * scheduler lock. + */ + KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); + DBG_MSG("Adding thread %p to GC list\n", thread); KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); - _thr_stack_free(&thread->attr); - if ((thread->attr.flags & PTHREAD_DETACHED) != 0) { - /* Remove this thread from the list of all threads: */ - THR_LIST_REMOVE(thread); - if (thread->refcount == 0) { - THR_GCLIST_REMOVE(thread); - TAILQ_REMOVE(&thread->kseg->kg_threadq, thread, kle); - free_thread = 1; - } - } + THR_GCLIST_ADD(thread); KSE_LOCK_RELEASE(curkse, &_thread_list_lock); - if (free_thread != 0) - _thr_free(curkse, thread); + KSE_SCHED_LOCK(curkse, curkse->k_kseg); } void -thr_gc(struct pthread *curthread) +_thr_gc(struct pthread *curthread) { - struct pthread *td, *joiner; - struct kse_group *free_kseg; + struct pthread *td, *td_next; + kse_critical_t crit; + int clean; - _gc_check = 0; - KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); - while ((td = TAILQ_FIRST(&_thread_gc_list)) != NULL) { + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); + + /* Check the threads waiting for GC. */ + for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) { + td_next = TAILQ_NEXT(td, gcle); + if ((td->flags & THR_FLAGS_GC_SAFE) == 0) + continue; +#ifdef NOT_YET + else if (((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) && + (td->kse->k_mbx.km_flags == 0)) { + /* + * The thread and KSE are operating on the same + * stack. Wait for the KSE to exit before freeing + * the thread's stack as well as everything else. + */ + continue; + } +#endif THR_GCLIST_REMOVE(td); - clean = (td->attr.flags & PTHREAD_DETACHED) != 0; - KSE_LOCK_RELEASE(curkse, &_thread_list_lock); + clean = ((td->attr.flags & PTHREAD_DETACHED) != 0) && + (td->refcount == 0); + _thr_stack_free(&td->attr); + KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); + DBG_MSG("Found thread %p in GC list, clean? %d\n", td, clean); - KSE_SCHED_LOCK(curkse, td->kseg); - TAILQ_REMOVE(&td->kseg->kg_threadq, td, kle); - if (TAILQ_EMPTY(&td->kseg->kg_threadq)) - free_kseg = td->kseg; - else - free_kseg = NULL; - joiner = NULL; - if ((td->joiner != NULL) && (td->joiner->state == PS_JOIN) && - (td->joiner->join_status.thread == td)) { - joiner = td->joiner; - joiner->join_status.thread = NULL; - - /* Set the return status for the joining thread: */ - joiner->join_status.ret = td->ret; - - /* Make the thread runnable. */ - if (td->kseg == joiner->kseg) { - _thr_setrunnable_unlocked(joiner); - joiner = NULL; - } + if ((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) { + KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); + kse_free_unlocked(td->kse); + kseg_free(td->kseg); + KSE_LOCK_RELEASE(curthread->kse, &kse_lock); } - td->joiner = NULL; - KSE_SCHED_UNLOCK(curkse, td->kseg); - if (free_kseg != NULL) - kseg_free(free_kseg); - if (joiner != NULL) { - KSE_SCHED_LOCK(curkse, joiner->kseg); - _thr_setrunnable_unlocked(joiner); - KSE_SCHED_LOCK(curkse, joiner->kseg); + if (clean != 0) { + _kse_critical_leave(crit); + _thr_free(curthread, td); + crit = _kse_critical_enter(); } - _thr_free(curkse, td); - KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); + KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); } - KSE_LOCK_RELEASE(curkse, &_thread_list_lock); + KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); + _kse_critical_leave(crit); } /* * Only new threads that are running or suspended may be scheduled. */ void _thr_schedule_add(struct pthread *curthread, struct pthread *newthread) { struct kse *curkse; kse_critical_t crit; int need_start; /* * If this is the first time creating a thread, make sure * the mailbox is set for the current thread. */ if ((newthread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) { /* * No need to lock the scheduling queue since the * KSE/KSEG pair have not yet been started. */ KSEG_THRQ_ADD(newthread->kseg, newthread); if (newthread->state == PS_RUNNING) THR_RUNQ_INSERT_TAIL(newthread); newthread->kseg->kg_threadcount++; /* * This thread needs a new KSE and KSEG. */ crit = _kse_critical_enter(); curkse = _get_curkse(); _ksd_setprivate(&newthread->kse->k_ksd); kse_create(&newthread->kse->k_mbx, 1); _ksd_setprivate(&curkse->k_ksd); _kse_critical_leave(crit); } else { /* * Lock the KSE and add the new thread to its list of * assigned threads. If the new thread is runnable, also * add it to the KSE's run queue. */ need_start = 0; KSE_SCHED_LOCK(curthread->kse, newthread->kseg); KSEG_THRQ_ADD(newthread->kseg, newthread); if (newthread->state == PS_RUNNING) THR_RUNQ_INSERT_TAIL(newthread); newthread->kseg->kg_threadcount++; if ((newthread->kse->k_flags & KF_STARTED) == 0) { /* * This KSE hasn't been started yet. Start it * outside of holding the lock. */ newthread->kse->k_flags |= KF_STARTED; need_start = 1; } KSE_SCHED_UNLOCK(curthread->kse, newthread->kseg); if (need_start != 0) kse_create(&newthread->kse->k_mbx, 0); else if ((newthread->state == PS_RUNNING) && KSE_WAITING(newthread->kse)) { /* * The thread is being scheduled on another KSEG. */ KSE_WAKEUP(newthread->kse); } } } void kse_waitq_insert(struct pthread *thread) { struct pthread *td; if (thread->wakeup_time.tv_sec == -1) TAILQ_INSERT_TAIL(&thread->kse->k_schedq->sq_waitq, thread, pqe); else { td = TAILQ_FIRST(&thread->kse->k_schedq->sq_waitq); while ((td != NULL) && (td->wakeup_time.tv_sec != -1) && ((td->wakeup_time.tv_sec < thread->wakeup_time.tv_sec) || ((td->wakeup_time.tv_sec == thread->wakeup_time.tv_sec) && (td->wakeup_time.tv_nsec <= thread->wakeup_time.tv_nsec)))) td = TAILQ_NEXT(td, pqe); if (td == NULL) TAILQ_INSERT_TAIL(&thread->kse->k_schedq->sq_waitq, thread, pqe); else TAILQ_INSERT_BEFORE(td, thread, pqe); } thread->flags |= THR_FLAGS_IN_WAITQ; } /* * This must be called with the scheduling lock held. */ static void kse_check_completed(struct kse *kse) { struct pthread *thread; struct kse_thr_mailbox *completed; if ((completed = kse->k_mbx.km_completed) != NULL) { kse->k_mbx.km_completed = NULL; while (completed != NULL) { thread = completed->tm_udata; DBG_MSG("Found completed thread %p, name %s\n", thread, (thread->name == NULL) ? "none" : thread->name); thread->blocked = 0; if (thread != kse->k_curthread) KSE_RUNQ_INSERT_TAIL(kse, thread); completed = completed->tm_next; } } } /* * This must be called with the scheduling lock held. */ static void kse_check_waitq(struct kse *kse) { struct pthread *pthread; struct timespec ts; KSE_GET_TOD(kse, &ts); /* * Wake up threads that have timedout. This has to be * done before adding the current thread to the run queue * so that a CPU intensive thread doesn't get preference * over waiting threads. */ while (((pthread = KSE_WAITQ_FIRST(kse)) != NULL) && thr_timedout(pthread, &ts)) { /* Remove the thread from the wait queue: */ KSE_WAITQ_REMOVE(kse, pthread); DBG_MSG("Found timedout thread %p in waitq\n", pthread); /* Indicate the thread timedout: */ pthread->timeout = 1; /* Add the thread to the priority queue: */ THR_SET_STATE(pthread, PS_RUNNING); KSE_RUNQ_INSERT_TAIL(kse, pthread); } } static int thr_timedout(struct pthread *thread, struct timespec *curtime) { if (thread->wakeup_time.tv_sec < 0) return (0); else if (thread->wakeup_time.tv_sec > curtime->tv_sec) return (0); else if ((thread->wakeup_time.tv_sec == curtime->tv_sec) && (thread->wakeup_time.tv_nsec > curtime->tv_nsec)) return (0); else return (1); } /* * This must be called with the scheduling lock held. * * Each thread has a time slice, a wakeup time (used when it wants * to wait for a specified amount of time), a run state, and an * active flag. * * When a thread gets run by the scheduler, the active flag is * set to non-zero (1). When a thread performs an explicit yield * or schedules a state change, it enters the scheduler and the * active flag is cleared. When the active flag is still seen * set in the scheduler, that means that the thread is blocked in * the kernel (because it is cleared before entering the scheduler * in all other instances). * * The wakeup time is only set for those states that can timeout. * It is set to (-1, -1) for all other instances. * * The thread's run state, aside from being useful when debugging, * is used to place the thread in an appropriate queue. There * are 2 basic queues: * * o run queue - queue ordered by priority for all threads * that are runnable * o waiting queue - queue sorted by wakeup time for all threads * that are not otherwise runnable (not blocked * in kernel, not waiting for locks) * * The thread's time slice is used for round-robin scheduling * (the default scheduling policy). While a SCHED_RR thread * is runnable it's time slice accumulates. When it reaches * the time slice interval, it gets reset and added to the end * of the queue of threads at its priority. When a thread no * longer becomes runnable (blocks in kernel, waits, etc), its * time slice is reset. * * The job of kse_switchout_thread() is to handle all of the above. */ static void kse_switchout_thread(struct kse *kse, struct pthread *thread) { int level; /* * Place the currently running thread into the * appropriate queue(s). */ DBG_MSG("Switching out thread %p, state %d\n", thread, thread->state); if (thread->blocked != 0) { /* This thread must have blocked in the kernel. */ /* thread->slice_usec = -1;*/ /* restart timeslice */ /* * XXX - Check for pending signals for this thread to * see if we need to interrupt it in the kernel. */ /* if (thread->check_pending != 0) */ if ((thread->slice_usec != -1) && (thread->attr.sched_policy != SCHED_FIFO)) thread->slice_usec += (thread->tmbx.tm_uticks + thread->tmbx.tm_sticks) * _clock_res_usec; } else { switch (thread->state) { case PS_DEAD: /* * The scheduler is operating on a different * stack. It is safe to do garbage collecting * here. */ thr_cleanup(kse, thread); return; break; case PS_RUNNING: /* Nothing to do here. */ break; case PS_COND_WAIT: case PS_SLEEP_WAIT: /* Insert into the waiting queue: */ KSE_WAITQ_INSERT(kse, thread); break; case PS_LOCKWAIT: /* * This state doesn't timeout. */ thread->wakeup_time.tv_sec = -1; thread->wakeup_time.tv_nsec = -1; level = thread->locklevel - 1; if (_LCK_BUSY(&thread->lockusers[level])) KSE_WAITQ_INSERT(kse, thread); else THR_SET_STATE(thread, PS_RUNNING); break; case PS_JOIN: case PS_MUTEX_WAIT: case PS_SIGSUSPEND: case PS_SIGWAIT: case PS_SUSPENDED: case PS_DEADLOCK: default: /* * These states don't timeout. */ thread->wakeup_time.tv_sec = -1; thread->wakeup_time.tv_nsec = -1; /* Insert into the waiting queue: */ KSE_WAITQ_INSERT(kse, thread); break; } if (thread->state != PS_RUNNING) { /* Restart the time slice: */ thread->slice_usec = -1; } else { if (thread->need_switchout != 0) /* * The thread yielded on its own; * restart the timeslice. */ thread->slice_usec = -1; else if ((thread->slice_usec != -1) && (thread->attr.sched_policy != SCHED_FIFO)) { thread->slice_usec += (thread->tmbx.tm_uticks + thread->tmbx.tm_sticks) * _clock_res_usec; /* Check for time quantum exceeded: */ if (thread->slice_usec > TIMESLICE_USEC) thread->slice_usec = -1; } if (thread->slice_usec == -1) { /* * The thread exceeded its time quantum or * it yielded the CPU; place it at the tail * of the queue for its priority. */ KSE_RUNQ_INSERT_TAIL(kse, thread); } else { /* * The thread hasn't exceeded its interval * Place it at the head of the queue for its * priority. */ KSE_RUNQ_INSERT_HEAD(kse, thread); } } } thread->active = 0; thread->need_switchout = 0; } /* * This function waits for the smallest timeout value of any waiting * thread, or until it receives a message from another KSE. * * This must be called with the scheduling lock held. */ static void kse_wait(struct kse *kse) { struct timespec *ts, ts_sleep; struct pthread *td_wait, *td_run; ts = &kse->k_mbx.km_timeofday; KSE_SET_WAIT(kse); td_wait = KSE_WAITQ_FIRST(kse); td_run = KSE_RUNQ_FIRST(kse); KSE_SCHED_UNLOCK(kse, kse->k_kseg); if (td_run == NULL) { if ((td_wait == NULL) || (td_wait->wakeup_time.tv_sec < 0)) { /* Limit sleep to no more than 2 minutes. */ ts_sleep.tv_sec = 120; ts_sleep.tv_nsec = 0; } else { TIMESPEC_SUB(&ts_sleep, &td_wait->wakeup_time, ts); if (ts_sleep.tv_sec > 120) { ts_sleep.tv_sec = 120; ts_sleep.tv_nsec = 0; } } if ((ts_sleep.tv_sec >= 0) && (ts_sleep.tv_nsec >= 0)) { /* Don't sleep for negative times. */ kse_release(&ts_sleep); /* * The above never returns. * XXX - Actually, it would be nice if it did * for KSE's with only one thread. */ } } KSE_CLEAR_WAIT(kse); } /* * Avoid calling this kse_exit() so as not to confuse it with the * system call of the same name. */ static void kse_fini(struct kse *kse) { struct timespec ts; + struct kse_group *free_kseg = NULL; + if ((kse->k_kseg->kg_flags & KGF_SINGLE_THREAD) != 0) + kse_exit(); /* - * Check to see if this is the main kse. + * Check to see if this is one of the main kses. */ - if (kse == _kse_initial) { + else if (kse->k_kseg != _kse_initial->k_kseg) { + /* Remove this KSE from the KSEG's list of KSEs. */ + KSE_SCHED_LOCK(kse, kse->k_kseg); + TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe); + if (TAILQ_EMPTY(&kse->k_kseg->kg_kseq)) + free_kseg = kse->k_kseg; + KSE_SCHED_UNLOCK(kse, kse->k_kseg); + /* + * Add this KSE to the list of free KSEs along with + * the KSEG if is now orphaned. + */ + KSE_LOCK_ACQUIRE(kse, &kse_lock); + if (free_kseg != NULL) + kseg_free(free_kseg); + kse_free_unlocked(kse); + KSE_LOCK_RELEASE(kse, &kse_lock); + kse_exit(); + /* Never returns. */ + } else { + /* * Wait for the last KSE/thread to exit, or for more * threads to be created (it is possible for additional * scope process threads to be created after the main * thread exits). */ ts.tv_sec = 120; ts.tv_nsec = 0; KSE_SET_WAIT(kse); KSE_SCHED_LOCK(kse, kse->k_kseg); if ((active_kse_count > 1) && (kse->k_kseg->kg_threadcount == 0)) { KSE_SCHED_UNLOCK(kse, kse->k_kseg); /* * XXX - We need a way for the KSE to do a timed * wait. */ kse_release(&ts); /* The above never returns. */ } KSE_SCHED_UNLOCK(kse, kse->k_kseg); /* There are no more threads; exit this process: */ if (kse->k_kseg->kg_threadcount == 0) { /* kse_exit(); */ __isthreaded = 0; exit(0); } - } else { - /* Mark this KSE for GC: */ - KSE_LOCK_ACQUIRE(kse, &_thread_list_lock); - TAILQ_INSERT_TAIL(&free_kseq, kse, k_qe); - KSE_LOCK_RELEASE(kse, &_thread_list_lock); - kse_exit(); } } void _thr_sig_add(struct pthread *thread, int sig, siginfo_t *info, ucontext_t *ucp) { struct kse *curkse; curkse = _get_curkse(); KSE_SCHED_LOCK(curkse, thread->kseg); /* * A threads assigned KSE can't change out from under us * when we hold the scheduler lock. */ if (THR_IS_ACTIVE(thread)) { /* Thread is active. Can't install the signal for it. */ /* Make a note in the thread that it has a signal. */ sigaddset(&thread->sigpend, sig); thread->check_pending = 1; } else { /* Make a note in the thread that it has a signal. */ sigaddset(&thread->sigpend, sig); thread->check_pending = 1; if (thread->blocked != 0) { /* Tell the kernel to interrupt the thread. */ kse_thr_interrupt(&thread->tmbx); } } KSE_SCHED_UNLOCK(curkse, thread->kseg); } void _thr_set_timeout(const struct timespec *timeout) { struct pthread *curthread = _get_curthread(); struct timespec ts; /* Reset the timeout flag for the running thread: */ curthread->timeout = 0; /* Check if the thread is to wait forever: */ if (timeout == NULL) { /* * Set the wakeup time to something that can be recognised as * different to an actual time of day: */ curthread->wakeup_time.tv_sec = -1; curthread->wakeup_time.tv_nsec = -1; } /* Check if no waiting is required: */ else if ((timeout->tv_sec == 0) && (timeout->tv_nsec == 0)) { /* Set the wake up time to 'immediately': */ curthread->wakeup_time.tv_sec = 0; curthread->wakeup_time.tv_nsec = 0; } else { /* Calculate the time for the current thread to wakeup: */ KSE_GET_TOD(curthread->kse, &ts); TIMESPEC_ADD(&curthread->wakeup_time, &ts, timeout); } } void _thr_panic_exit(char *file, int line, char *msg) { char buf[256]; snprintf(buf, sizeof(buf), "(%s:%d) %s\n", file, line, msg); __sys_write(2, buf, strlen(buf)); abort(); } void _thr_setrunnable(struct pthread *curthread, struct pthread *thread) { kse_critical_t crit; crit = _kse_critical_enter(); KSE_SCHED_LOCK(curthread->kse, thread->kseg); _thr_setrunnable_unlocked(thread); KSE_SCHED_UNLOCK(curthread->kse, thread->kseg); _kse_critical_leave(crit); } void _thr_setrunnable_unlocked(struct pthread *thread) { if ((thread->kseg->kg_flags & KGF_SINGLE_THREAD) != 0) /* No silly queues for these threads. */ THR_SET_STATE(thread, PS_RUNNING); else { if ((thread->flags & THR_FLAGS_IN_WAITQ) != 0) KSE_WAITQ_REMOVE(thread->kse, thread); THR_SET_STATE(thread, PS_RUNNING); if ((thread->blocked == 0) && (thread->flags & THR_FLAGS_IN_RUNQ) == 0) THR_RUNQ_INSERT_TAIL(thread); } /* * XXX - Threads are not yet assigned to specific KSEs; they are * assigned to the KSEG. So the fact that a thread's KSE is * waiting doesn't necessarily mean that it will be the KSE * that runs the thread after the lock is granted. But we * don't know if the other KSEs within the same KSEG are * also in a waiting state or not so we err on the side of * caution and wakeup the thread's last known KSE. We * ensure that the threads KSE doesn't change while it's * scheduling lock is held so it is safe to reference it * (the KSE). If the KSE wakes up and doesn't find any more * work it will again go back to waiting so no harm is done. */ if (KSE_WAITING(thread->kse)) KSE_WAKEUP(thread->kse); } struct pthread * _get_curthread(void) { return (_ksd_curthread); } /* This assumes the caller has disabled upcalls. */ struct kse * _get_curkse(void) { return (_ksd_curkse); } void _set_curkse(struct kse *kse) { _ksd_setprivate(&kse->k_ksd); } /* * Allocate a new KSEG. * - * We allow the current KSE (curkse) to be NULL in the case that this + * We allow the current thread to be NULL in the case that this * is the first time a KSEG is being created (library initialization). * In this case, we don't need to (and can't) take any locks. */ struct kse_group * -_kseg_alloc(struct kse *curkse) +_kseg_alloc(struct pthread *curthread) { struct kse_group *kseg = NULL; + kse_critical_t crit; - if ((curkse != NULL) && (free_kseg_count > 0)) { + if ((curthread != NULL) && (free_kseg_count > 0)) { /* Use the kse lock for the kseg queue. */ - KSE_LOCK_ACQUIRE(curkse, &kse_lock); + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); if ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) { TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe); free_kseg_count--; active_kseg_count++; TAILQ_INSERT_TAIL(&active_kse_groupq, kseg, kg_qe); } - KSE_LOCK_RELEASE(curkse, &kse_lock); + KSE_LOCK_RELEASE(curthread->kse, &kse_lock); + _kse_critical_leave(crit); } /* * If requested, attempt to allocate a new KSE group only if the * KSE allocation was successful and a KSE group wasn't found in * the free list. */ if ((kseg == NULL) && ((kseg = (struct kse_group *)malloc(sizeof(*kseg))) != NULL)) { - THR_ASSERT(_pq_alloc(&kseg->kg_schedq.sq_runq, - THR_MIN_PRIORITY, THR_LAST_PRIORITY) == 0, - "Unable to allocate priority queue."); - kseg_init(kseg); - if (curkse != NULL) - KSE_LOCK_ACQUIRE(curkse, &kse_lock); - kseg_free(kseg); - if (curkse != NULL) - KSE_LOCK_RELEASE(curkse, &kse_lock); + if (_pq_alloc(&kseg->kg_schedq.sq_runq, + THR_MIN_PRIORITY, THR_LAST_PRIORITY) != 0) { + free(kseg); + kseg = NULL; + } else { + kseg_init(kseg); + /* Add the KSEG to the list of active KSEGs. */ + if (curthread != NULL) { + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); + active_kseg_count++; + TAILQ_INSERT_TAIL(&active_kse_groupq, + kseg, kg_qe); + KSE_LOCK_RELEASE(curthread->kse, &kse_lock); + _kse_critical_leave(crit); + } else { + active_kseg_count++; + TAILQ_INSERT_TAIL(&active_kse_groupq, + kseg, kg_qe); + } + } } return (kseg); } /* * This must be called with the kse lock held and when there are * no more threads that reference it. */ static void kseg_free(struct kse_group *kseg) { + TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe); TAILQ_INSERT_HEAD(&free_kse_groupq, kseg, kg_qe); kseg_init(kseg); free_kseg_count++; active_kseg_count--; } /* * Allocate a new KSE. * - * We allow the current KSE (curkse) to be NULL in the case that this + * We allow the current thread to be NULL in the case that this * is the first time a KSE is being created (library initialization). * In this case, we don't need to (and can't) take any locks. */ struct kse * -_kse_alloc(struct kse *curkse) +_kse_alloc(struct pthread *curthread) { struct kse *kse = NULL; + kse_critical_t crit; int need_ksd = 0; int i; - if ((curkse != NULL) && (free_kse_count > 0)) { - KSE_LOCK_ACQUIRE(curkse, &kse_lock); + if ((curthread != NULL) && (free_kse_count > 0)) { + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); /* Search for a finished KSE. */ kse = TAILQ_FIRST(&free_kseq); #define KEMBX_DONE 0x01 while ((kse != NULL) && ((kse->k_mbx.km_flags & KEMBX_DONE) == 0)) { kse = TAILQ_NEXT(kse, k_qe); } #undef KEMBX_DONE if (kse != NULL) { TAILQ_REMOVE(&free_kseq, kse, k_qe); free_kse_count--; active_kse_count++; TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe); } - KSE_LOCK_RELEASE(curkse, &kse_lock); + KSE_LOCK_RELEASE(curthread->kse, &kse_lock); + _kse_critical_leave(crit); } if ((kse == NULL) && ((kse = (struct kse *)malloc(sizeof(*kse))) != NULL)) { bzero(kse, sizeof(*kse)); /* Initialize the lockusers. */ for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) { _lockuser_init(&kse->k_lockusers[i], (void *)kse); _LCK_SET_PRIVATE2(&kse->k_lockusers[i], NULL); } /* We had to malloc a kse; mark it as needing a new ID.*/ need_ksd = 1; /* * Create the KSE context. * * XXX - For now this is done here in the allocation. * In the future, we may want to have it done * outside the allocation so that scope system * threads (one thread per KSE) are not required * to have a stack for an unneeded kse upcall. */ kse->k_mbx.km_func = kse_entry; kse->k_mbx.km_stack.ss_sp = (char *)malloc(KSE_STACKSIZE); kse->k_mbx.km_stack.ss_size = KSE_STACKSIZE; kse->k_mbx.km_udata = (void *)kse; kse->k_mbx.km_quantum = 20000; if (kse->k_mbx.km_stack.ss_size == NULL) { free(kse); kse = NULL; } } if ((kse != NULL) && (need_ksd != 0)) { /* This KSE needs initialization. */ - if (curkse != NULL) - KSE_LOCK_ACQUIRE(curkse, &kse_lock); + if (curthread != NULL) { + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); + } /* Initialize KSD inside of the lock. */ if (_ksd_create(&kse->k_ksd, (void *)kse, sizeof(*kse)) != 0) { - if (curkse != NULL) - KSE_LOCK_RELEASE(curkse, &kse_lock); + if (curthread != NULL) { + KSE_LOCK_RELEASE(curthread->kse, &kse_lock); + _kse_critical_leave(crit); + } free(kse->k_mbx.km_stack.ss_sp); for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) { _lockuser_destroy(&kse->k_lockusers[i]); } free(kse); return (NULL); } kse->k_flags = 0; active_kse_count++; TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe); - if (curkse != NULL) - KSE_LOCK_RELEASE(curkse, &kse_lock); - + if (curthread != NULL) { + KSE_LOCK_RELEASE(curthread->kse, &kse_lock); + _kse_critical_leave(crit); + } } return (kse); } void -_kse_free(struct kse *curkse, struct kse *kse) +kse_free_unlocked(struct kse *kse) { - struct kse_group *kseg = NULL; - - if (curkse == kse) - PANIC("KSE trying to free itself"); - KSE_LOCK_ACQUIRE(curkse, &kse_lock); active_kse_count--; - if ((kseg = kse->k_kseg) != NULL) { - TAILQ_REMOVE(&kseg->kg_kseq, kse, k_qe); - /* - * Free the KSEG if there are no more threads associated - * with it. - */ - if (TAILQ_EMPTY(&kseg->kg_threadq)) - kseg_free(kseg); - } kse->k_kseg = NULL; kse->k_flags &= ~KF_INITIALIZED; TAILQ_INSERT_HEAD(&free_kseq, kse, k_qe); free_kse_count++; - KSE_LOCK_RELEASE(curkse, &kse_lock); } +void +_kse_free(struct pthread *curthread, struct kse *kse) +{ + kse_critical_t crit; + + if (curthread == NULL) + kse_free_unlocked(kse); + else { + crit = _kse_critical_enter(); + KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); + kse_free_unlocked(kse); + KSE_LOCK_RELEASE(curthread->kse, &kse_lock); + _kse_critical_leave(crit); + } +} + static void kseg_init(struct kse_group *kseg) { TAILQ_INIT(&kseg->kg_kseq); TAILQ_INIT(&kseg->kg_threadq); TAILQ_INIT(&kseg->kg_schedq.sq_waitq); - TAILQ_INIT(&kseg->kg_schedq.sq_blockedq); _lock_init(&kseg->kg_lock, LCK_ADAPTIVE, _kse_lock_wait, _kse_lock_wakeup); kseg->kg_threadcount = 0; kseg->kg_idle_kses = 0; kseg->kg_flags = 0; } struct pthread * _thr_alloc(struct pthread *curthread) { kse_critical_t crit; struct pthread *thread = NULL; if (curthread != NULL) { - if (_gc_check != 0) - thread_gc(curthread); + if (GC_NEEDED()) + _thr_gc(curthread); if (free_thread_count > 0) { crit = _kse_critical_enter(); - KSE_LOCK_ACQUIRE(curkse, &thread_lock); + KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock); if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) { TAILQ_REMOVE(&free_threadq, thread, tle); free_thread_count--; } - KSE_LOCK_RELEASE(curkse, &thread_lock); + KSE_LOCK_RELEASE(curthread->kse, &thread_lock); } } if (thread == NULL) thread = (struct pthread *)malloc(sizeof(struct pthread)); return (thread); } void _thr_free(struct pthread *curthread, struct pthread *thread) { kse_critical_t crit; + DBG_MSG("Freeing thread %p\n", thread); if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) free(thread); else { crit = _kse_critical_enter(); - KSE_LOCK_ACQUIRE(curkse, &thread_lock); + KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock); + THR_LIST_REMOVE(thread); TAILQ_INSERT_HEAD(&free_threadq, thread, tle); free_thread_count++; - KSE_LOCK_RELEASE(curkse, &thread_lock); + KSE_LOCK_RELEASE(curthread->kse, &thread_lock); _kse_critical_leave(crit); } } Index: head/lib/libpthread/thread/thr_priority_queue.c =================================================================== --- head/lib/libpthread/thread/thr_priority_queue.c (revision 113660) +++ head/lib/libpthread/thread/thr_priority_queue.c (revision 113661) @@ -1,265 +1,272 @@ /* * Copyright (c) 1998 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Daniel Eischen. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "thr_private.h" /* Prototypes: */ static void pq_insert_prio_list(pq_queue_t *pq, int prio); #if defined(_PTHREADS_INVARIANTS) #define PQ_IN_SCHEDQ (THR_FLAGS_IN_RUNQ | THR_FLAGS_IN_WAITQ) #define PQ_SET_ACTIVE(pq) (pq)->pq_flags |= PQF_ACTIVE #define PQ_CLEAR_ACTIVE(pq) (pq)->pq_flags &= ~PQF_ACTIVE #define PQ_ASSERT_ACTIVE(pq, msg) do { \ if (((pq)->pq_flags & PQF_ACTIVE) == 0) \ PANIC(msg); \ } while (0) #define PQ_ASSERT_INACTIVE(pq, msg) do { \ if (((pq)->pq_flags & PQF_ACTIVE) != 0) \ PANIC(msg); \ } while (0) #define PQ_ASSERT_IN_WAITQ(thrd, msg) do { \ if (((thrd)->flags & THR_FLAGS_IN_WAITQ) == 0) \ PANIC(msg); \ } while (0) #define PQ_ASSERT_IN_RUNQ(thrd, msg) do { \ if (((thrd)->flags & THR_FLAGS_IN_RUNQ) == 0) \ PANIC(msg); \ } while (0) #define PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \ if (((thrd)->flags & PQ_IN_SCHEDQ) != 0) \ PANIC(msg); \ } while (0) #else #define PQ_SET_ACTIVE(pq) #define PQ_CLEAR_ACTIVE(pq) #define PQ_ASSERT_ACTIVE(pq, msg) #define PQ_ASSERT_INACTIVE(pq, msg) #define PQ_ASSERT_IN_WAITQ(thrd, msg) #define PQ_ASSERT_IN_RUNQ(thrd, msg) #define PQ_ASSERT_NOT_QUEUED(thrd, msg) #endif int _pq_alloc(pq_queue_t *pq, int minprio, int maxprio) { int ret = 0; int prioslots = maxprio - minprio + 1; if (pq == NULL) ret = -1; /* Create the priority queue with (maxprio - minprio + 1) slots: */ else if ((pq->pq_lists = (pq_list_t *) malloc(sizeof(pq_list_t) * prioslots)) == NULL) ret = -1; else { /* Remember the queue size: */ pq->pq_size = prioslots; ret = _pq_init(pq); } return (ret); } +void +_pq_free(pq_queue_t *pq) +{ + if ((pq != NULL) && (pq->pq_lists != NULL)) + free(pq->pq_lists); +} + int _pq_init(pq_queue_t *pq) { int i, ret = 0; if ((pq == NULL) || (pq->pq_lists == NULL)) ret = -1; else { /* Initialize the queue for each priority slot: */ for (i = 0; i < pq->pq_size; i++) { TAILQ_INIT(&pq->pq_lists[i].pl_head); pq->pq_lists[i].pl_prio = i; pq->pq_lists[i].pl_queued = 0; } /* Initialize the priority queue: */ TAILQ_INIT(&pq->pq_queue); pq->pq_flags = 0; } return (ret); } void _pq_remove(pq_queue_t *pq, pthread_t pthread) { int prio = pthread->active_priority; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_INACTIVE(pq, "_pq_remove: pq_active"); PQ_SET_ACTIVE(pq); PQ_ASSERT_IN_RUNQ(pthread, "_pq_remove: Not in priority queue"); /* * Remove this thread from priority list. Note that if * the priority list becomes empty, it is not removed * from the priority queue because another thread may be * added to the priority list (resulting in a needless * removal/insertion). Priority lists are only removed * from the priority queue when _pq_first is called. */ TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe); /* This thread is now longer in the priority queue. */ pthread->flags &= ~THR_FLAGS_IN_RUNQ; PQ_CLEAR_ACTIVE(pq); } void _pq_insert_head(pq_queue_t *pq, pthread_t pthread) { int prio; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_INACTIVE(pq, "_pq_insert_head: pq_active"); PQ_SET_ACTIVE(pq); PQ_ASSERT_NOT_QUEUED(pthread, "_pq_insert_head: Already in priority queue"); prio = pthread->active_priority; TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe); if (pq->pq_lists[prio].pl_queued == 0) /* Insert the list into the priority queue: */ pq_insert_prio_list(pq, prio); /* Mark this thread as being in the priority queue. */ pthread->flags |= THR_FLAGS_IN_RUNQ; PQ_CLEAR_ACTIVE(pq); } void _pq_insert_tail(pq_queue_t *pq, pthread_t pthread) { int prio; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_INACTIVE(pq, "_pq_insert_tail: pq_active"); PQ_SET_ACTIVE(pq); PQ_ASSERT_NOT_QUEUED(pthread, "_pq_insert_tail: Already in priority queue"); prio = pthread->active_priority; TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe); if (pq->pq_lists[prio].pl_queued == 0) /* Insert the list into the priority queue: */ pq_insert_prio_list(pq, prio); /* Mark this thread as being in the priority queue. */ pthread->flags |= THR_FLAGS_IN_RUNQ; PQ_CLEAR_ACTIVE(pq); } pthread_t _pq_first(pq_queue_t *pq) { pq_list_t *pql; pthread_t pthread = NULL; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_INACTIVE(pq, "_pq_first: pq_active"); PQ_SET_ACTIVE(pq); while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) && (pthread == NULL)) { if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) { /* * The priority list is empty; remove the list * from the queue. */ TAILQ_REMOVE(&pq->pq_queue, pql, pl_link); /* Mark the list as not being in the queue: */ pql->pl_queued = 0; } } PQ_CLEAR_ACTIVE(pq); return (pthread); } static void pq_insert_prio_list(pq_queue_t *pq, int prio) { pq_list_t *pql; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_ACTIVE(pq, "pq_insert_prio_list: pq_active"); /* * The priority queue is in descending priority order. Start at * the beginning of the queue and find the list before which the * new list should be inserted. */ pql = TAILQ_FIRST(&pq->pq_queue); while ((pql != NULL) && (pql->pl_prio > prio)) pql = TAILQ_NEXT(pql, pl_link); /* Insert the list: */ if (pql == NULL) TAILQ_INSERT_TAIL(&pq->pq_queue, &pq->pq_lists[prio], pl_link); else TAILQ_INSERT_BEFORE(pql, &pq->pq_lists[prio], pl_link); /* Mark this list as being in the queue: */ pq->pq_lists[prio].pl_queued = 1; } Index: head/lib/libpthread/thread/thr_private.h =================================================================== --- head/lib/libpthread/thread/thr_private.h (revision 113660) +++ head/lib/libpthread/thread/thr_private.h (revision 113661) @@ -1,1149 +1,1174 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Private thread definitions for the uthread kernel. * * $FreeBSD$ */ #ifndef _THR_PRIVATE_H #define _THR_PRIVATE_H /* * Include files. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ksd.h" #include "lock.h" #include "pthread_md.h" /* * Evaluate the storage class specifier. */ #ifdef GLOBAL_PTHREAD_PRIVATE #define SCLASS #define SCLASS_PRESET(x...) = x #else #define SCLASS extern #define SCLASS_PRESET(x...) #endif /* * Kernel fatal error handler macro. */ #define PANIC(string) _thr_exit(__FILE__,__LINE__,string) /* Output debug messages like this: */ #define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args) #define stderr_debug(args...) _thread_printf(STDOUT_FILENO, ##args) #define DBG_MUTEX 0x0001 #define DBG_SIG 0x0002 #define THR_ASSERT(cond, msg) do { \ if (!(cond)) \ PANIC(msg); \ } while (0) /* * State change macro without scheduling queue change: */ #define THR_SET_STATE(thrd, newstate) do { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } while (0) /* * Define the signals to be used for scheduling. */ #define _ITIMER_SCHED_TIMER ITIMER_PROF #define _SCHED_SIGNAL SIGPROF #define TIMESPEC_ADD(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \ if ((dst)->tv_nsec > 1000000000) { \ (dst)->tv_sec++; \ (dst)->tv_nsec -= 1000000000; \ } \ } while (0) #define TIMESPEC_SUB(dst, src, val) \ do { \ (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ if ((dst)->tv_nsec < 0) { \ (dst)->tv_sec--; \ (dst)->tv_nsec += 1000000000; \ } \ } while (0) /* * Priority queues. * * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. */ typedef struct pq_list { TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ int pl_prio; /* the priority of this list */ int pl_queued; /* is this in the priority queue */ } pq_list_t; typedef struct pq_queue { TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ pq_list_t *pq_lists; /* array of all priority lists */ int pq_size; /* number of priority lists */ #define PQF_ACTIVE 0x0001 int pq_flags; } pq_queue_t; /* * Each KSEG has a scheduling queue. For now, threads that exist in their * own KSEG (system scope) will get a full priority queue. In the future * this can be optimized for the single thread per KSEG case. */ struct sched_queue { pq_queue_t sq_runq; TAILQ_HEAD(, pthread) sq_waitq; /* waiting in userland */ - TAILQ_HEAD(, pthread) sq_blockedq; /* waiting in kernel */ }; /* Used to maintain pending and active signals: */ struct sigstatus { siginfo_t *info; /* arg 2 to signal handler */ int pending; /* Is this a pending signal? */ int blocked; /* * This signal has occured and hasn't * yet been handled; ignore subsequent * signals until the handler is done. */ int signo; }; typedef struct kse_thr_mailbox *kse_critical_t; struct kse_group; #define MAX_KSE_LOCKLEVEL 3 struct kse { struct kse_mailbox k_mbx; /* kernel kse mailbox */ /* -- location and order specific items for gdb -- */ struct pthread *k_curthread; /* current thread */ struct kse_group *k_kseg; /* parent KSEG */ struct sched_queue *k_schedq; /* scheduling queue */ /* -- end of location and order specific items -- */ - TAILQ_ENTRY(kse) k_qe; /* link entry */ + TAILQ_ENTRY(kse) k_qe; /* KSE list link entry */ + TAILQ_ENTRY(kse) k_kgqe; /* KSEG's KSE list entry */ struct ksd k_ksd; /* KSE specific data */ /* * Items that are only modified by the kse, or that otherwise * don't need to be locked when accessed */ struct lock k_lock; struct lockuser k_lockusers[MAX_KSE_LOCKLEVEL]; int k_locklevel; sigset_t k_sigmask; struct sigstatus k_sigq[NSIG]; int k_check_sigq; long k_resched; /* scheduling signal arrived */ int k_flags; #define KF_STARTED 0x0001 /* kernel kse created */ #define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */ int k_cpu; /* CPU ID when bound */ int k_done; /* this KSE is done */ }; /* * Each KSE group contains one or more KSEs in which threads can run. * At least for now, there is one scheduling queue per KSE group; KSEs * within the same KSE group compete for threads from the same scheduling * queue. A scope system thread has one KSE in one KSE group; the group * does not use its scheduling queue. */ struct kse_group { TAILQ_HEAD(, kse) kg_kseq; /* list of KSEs in group */ TAILQ_HEAD(, pthread) kg_threadq; /* list of threads in group */ TAILQ_ENTRY(kse_group) kg_qe; /* link entry */ struct sched_queue kg_schedq; /* scheduling queue */ struct lock kg_lock; int kg_threadcount; /* # of assigned threads */ int kg_idle_kses; int kg_flags; #define KGF_SINGLE_THREAD 0x0001 /* scope system kse group */ #define KGF_SCHEDQ_INITED 0x0002 /* has an initialized schedq */ }; /* + * Add/remove threads from a KSE's scheduling queue. + * For now the scheduling queue is hung off the KSEG. + */ +#define KSEG_THRQ_ADD(kseg, thr) \ +do { \ + TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle);\ + (kseg)->kg_threadcount++; \ +} while (0) + +#define KSEG_THRQ_REMOVE(kseg, thr) \ +do { \ + TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle); \ + (kseg)->kg_threadcount--; \ +} while (0) + + +/* * Lock acquire and release for KSEs. */ #define KSE_LOCK_ACQUIRE(kse, lck) \ do { \ if ((kse)->k_locklevel >= MAX_KSE_LOCKLEVEL) \ PANIC("Exceeded maximum lock level"); \ else { \ (kse)->k_locklevel++; \ _lock_acquire((lck), \ &(kse)->k_lockusers[(kse)->k_locklevel - 1], 0); \ } \ } while (0) #define KSE_LOCK_RELEASE(kse, lck) \ do { \ if ((kse)->k_locklevel > 0) { \ _lock_release((lck), \ &(kse)->k_lockusers[(kse)->k_locklevel - 1]); \ (kse)->k_locklevel--; \ } \ } while (0) /* * Lock our own KSEG. */ #define KSE_LOCK(curkse) \ KSE_LOCK_ACQUIRE(curkse, &(curkse)->k_kseg->kg_lock) #define KSE_UNLOCK(curkse) \ KSE_LOCK_RELEASE(curkse, &(curkse)->k_kseg->kg_lock) /* * Lock a potentially different KSEG. */ #define KSE_SCHED_LOCK(curkse, kseg) \ KSE_LOCK_ACQUIRE(curkse, &(kseg)->kg_lock) #define KSE_SCHED_UNLOCK(curkse, kseg) \ KSE_LOCK_RELEASE(curkse, &(kseg)->kg_lock) /* * Waiting queue manipulation macros (using pqe link): */ #define KSE_WAITQ_REMOVE(kse, thrd) \ do { \ if (((thrd)->flags & THR_FLAGS_IN_WAITQ) != 0) { \ TAILQ_REMOVE(&(kse)->k_schedq->sq_waitq, thrd, pqe); \ (thrd)->flags &= ~THR_FLAGS_IN_WAITQ; \ } \ } while (0) #define KSE_WAITQ_INSERT(kse, thrd) kse_waitq_insert(thrd) #define KSE_WAITQ_FIRST(kse) TAILQ_FIRST(&(kse)->k_schedq->sq_waitq) #define KSE_SET_WAIT(kse) \ atomic_store_rel_int(&(kse)->k_mbx.km_flags, 1) #define KSE_CLEAR_WAIT(kse) \ atomic_set_acq_int(&(kse)->k_mbx.km_flags, 0) #define KSE_WAITING(kse) (kse)->k_mbx.km_flags != 0 #define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_mbx) /* * TailQ initialization values. */ #define TAILQ_INITIALIZER { NULL, NULL } /* * lock initialization values. */ #define LCK_INITIALIZER { NULL, NULL, LCK_DEFAULT } struct pthread_mutex { /* * Lock for accesses to this structure. */ struct lock m_lock; enum pthread_mutextype m_type; int m_protocol; TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; long m_flags; int m_count; int m_refcount; /* * Used for priority inheritence and protection. * * m_prio - For priority inheritence, the highest active * priority (threads locking the mutex inherit * this priority). For priority protection, the * ceiling priority of this mutex. * m_saved_prio - mutex owners inherited priority before * taking the mutex, restored when the owner * unlocks the mutex. */ int m_prio; int m_saved_prio; /* * Link for list of all mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_qe; }; /* * Flags for mutexes. */ #define MUTEX_FLAGS_PRIVATE 0x01 #define MUTEX_FLAGS_INITED 0x02 #define MUTEX_FLAGS_BUSY 0x04 /* * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ { LCK_INITIALIZER, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, \ TAILQ_INITIALIZER, NULL, MUTEX_FLAGS_PRIVATE, 0, 0, 0, 0, \ TAILQ_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; long m_flags; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } /* * Condition variable definitions. */ enum pthread_cond_type { COND_TYPE_FAST, COND_TYPE_MAX }; struct pthread_cond { /* * Lock for accesses to this structure. */ struct lock c_lock; enum pthread_cond_type c_type; TAILQ_HEAD(cond_head, pthread) c_queue; struct pthread_mutex *c_mutex; long c_flags; long c_seqno; }; struct pthread_cond_attr { enum pthread_cond_type c_type; long c_flags; }; /* * Flags for condition variables. */ #define COND_FLAGS_PRIVATE 0x01 #define COND_FLAGS_INITED 0x02 #define COND_FLAGS_BUSY 0x04 /* * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ { LCK_INITIALIZER, COND_TYPE_FAST, TAILQ_INITIALIZER, \ NULL, NULL, 0, 0 } /* * Semaphore definitions. */ struct sem { #define SEM_MAGIC ((u_int32_t) 0x09fa4012) u_int32_t magic; pthread_mutex_t lock; pthread_cond_t gtzero; u_int32_t count; u_int32_t nwaiters; }; /* * Cleanup definitions. */ struct pthread_cleanup { struct pthread_cleanup *next; void (*routine) (); void *routine_arg; }; struct pthread_attr { int sched_policy; int sched_inherit; int sched_interval; int prio; int suspend; #define THR_STACK_USER 0x100 /* 0xFF reserved for */ int flags; void *arg_attr; void (*cleanup_attr) (); void *stackaddr_attr; size_t stacksize_attr; size_t guardsize_attr; }; /* * Thread creation state attributes. */ #define THR_CREATE_RUNNING 0 #define THR_CREATE_SUSPENDED 1 /* * Miscellaneous definitions. */ #define THR_STACK_DEFAULT 65536 /* * Maximum size of initial thread's stack. This perhaps deserves to be larger * than the stacks of other threads, since many applications are likely to run * almost entirely on this stack. */ #define THR_STACK_INITIAL 0x100000 /* * Define the different priority ranges. All applications have thread * priorities constrained within 0-31. The threads library raises the * priority when delivering signals in order to ensure that signal * delivery happens (from the POSIX spec) "as soon as possible". * In the future, the threads library will also be able to map specific * threads into real-time (cooperating) processes or kernel threads. * The RT and SIGNAL priorities will be used internally and added to * thread base priorities so that the scheduling queue can handle both * normal and RT priority threads with and without signal handling. * * The approach taken is that, within each class, signal delivery * always has priority over thread execution. */ #define THR_DEFAULT_PRIORITY 15 #define THR_MIN_PRIORITY 0 #define THR_MAX_PRIORITY 31 /* 0x1F */ #define THR_SIGNAL_PRIORITY 32 /* 0x20 */ #define THR_RT_PRIORITY 64 /* 0x40 */ #define THR_FIRST_PRIORITY THR_MIN_PRIORITY #define THR_LAST_PRIORITY \ (THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY) #define THR_BASE_PRIORITY(prio) ((prio) & THR_MAX_PRIORITY) /* * Clock resolution in microseconds. */ #define CLOCK_RES_USEC 10000 /* * Time slice period in microseconds. */ #define TIMESLICE_USEC 20000 /* * XXX - Define a thread-safe macro to get the current time of day * which is updated at regular intervals by something. * * For now, we just make the system call to get the time. */ #define KSE_GET_TOD(curkse, tsp) \ do { \ *tsp = (curkse)->k_mbx.km_timeofday; \ if ((tsp)->tv_sec == 0) \ clock_gettime(CLOCK_REALTIME, tsp); \ } while (0) struct pthread_rwlockattr { int pshared; }; struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ int state; /* 0 = idle >0 = # of readers -1 = writer */ pthread_cond_t read_signal; pthread_cond_t write_signal; int blocked_writers; }; /* * Thread states. */ enum pthread_state { PS_RUNNING, PS_LOCKWAIT, PS_MUTEX_WAIT, PS_COND_WAIT, PS_SLEEP_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, PS_JOIN, PS_SUSPENDED, PS_DEAD, PS_DEADLOCK, PS_STATE_MAX }; union pthread_wait_data { pthread_mutex_t mutex; pthread_cond_t cond; const sigset_t *sigwait; /* Waiting on a signal in sigwait */ struct lock *lock; }; /* * Define a continuation routine that can be used to perform a * transfer of control: */ typedef void (*thread_continuation_t) (void *); /* * This stores a thread's state prior to running a signal handler. * It is used when a signal is delivered to a thread blocked in * userland. If the signal handler returns normally, the thread's * state is restored from here. */ struct pthread_sigframe { int psf_flags; int psf_interrupted; int psf_signo; enum pthread_state psf_state; union pthread_wait_data psf_wait_data; struct timespec psf_wakeup_time; sigset_t psf_sigset; sigset_t psf_sigmask; int psf_seqno; }; struct join_status { struct pthread *thread; void *ret; int error; }; struct pthread_specific_elem { const void *data; int seqno; }; #define MAX_THR_LOCKLEVEL 3 /* * Thread structure. */ struct pthread { /* * Magic value to help recognize a valid thread structure * from an invalid one: */ #define THR_MAGIC ((u_int32_t) 0xd09ba115) u_int32_t magic; char *name; u_int64_t uniqueid; /* for gdb */ /* Queue entry for list of all threads: */ TAILQ_ENTRY(pthread) tle; /* link for all threads in process */ TAILQ_ENTRY(pthread) kle; /* link for all threads in KSE/KSEG */ /* Queue entry for GC lists: */ TAILQ_ENTRY(pthread) gcle; /* * Lock for accesses to this thread structure. */ struct lock lock; struct lockuser lockusers[MAX_THR_LOCKLEVEL]; int locklevel; kse_critical_t critical[MAX_KSE_LOCKLEVEL]; struct kse *kse; struct kse_group *kseg; /* * Thread start routine, argument, stack pointer and thread * attributes. */ void *(*start_routine)(void *); void *arg; struct pthread_attr attr; /* * Thread mailbox. */ struct kse_thr_mailbox tmbx; int active; /* thread running */ int blocked; /* thread blocked in kernel */ int need_switchout; int need_wakeup; /* * Used for tracking delivery of signal handlers. */ struct pthread_sigframe *curframe; siginfo_t siginfo[NSIG]; /* * Cancelability flags - the lower 2 bits are used by cancel * definitions in pthread.h */ #define THR_AT_CANCEL_POINT 0x0004 #define THR_CANCELLING 0x0008 #define THR_CANCEL_NEEDED 0x0010 int cancelflags; thread_continuation_t continuation; /* * The thread's base and pending signal masks. The active * signal mask is stored in the thread's context (in mailbox). */ sigset_t sigmask; sigset_t sigpend; int sigmask_seqno; int check_pending; int refcount; /* Thread state: */ enum pthread_state state; /* * Number of microseconds accumulated by this thread when * time slicing is active. */ long slice_usec; /* * Time to wake up thread. This is used for sleeping threads and * for any operation which may time out (such as select). */ struct timespec wakeup_time; /* TRUE if operation has timed out. */ int timeout; /* * Error variable used instead of errno. The function __error() * returns a pointer to this. */ int error; /* * The joiner is the thread that is joining to this thread. The * join status keeps track of a join operation to another thread. */ struct pthread *joiner; struct join_status join_status; /* * The current thread can belong to only one scheduling queue at * a time (ready or waiting queue). It can also belong to: * * o A queue of threads waiting for a mutex * o A queue of threads waiting for a condition variable * * It is possible for a thread to belong to more than one of the * above queues if it is handling a signal. A thread may only * enter a mutex or condition variable queue when it is not * being called from a signal handler. If a thread is a member * of one of these queues when a signal handler is invoked, it * must be removed from the queue before invoking the handler * and then added back to the queue after return from the handler. * * Use pqe for the scheduling queue link (both ready and waiting), * sqe for synchronization (mutex, condition variable, and join) * queue links, and qe for all other links. */ TAILQ_ENTRY(pthread) pqe; /* priority, wait queues link */ TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ /* Wait data. */ union pthread_wait_data data; /* * Set to TRUE if a blocking operation was * interrupted by a signal: */ int interrupted; /* Signal number when in state PS_SIGWAIT: */ int signo; /* * Set to non-zero when this thread has entered a critical * region. We allow for recursive entries into critical regions. */ int critical_count; /* * Set to TRUE if this thread should yield after leaving a * critical region to check for signals, messages, etc. */ int critical_yield; int sflags; #define THR_FLAGS_IN_SYNCQ 0x0001 /* Miscellaneous flags; only set with scheduling lock held. */ int flags; #define THR_FLAGS_PRIVATE 0x0001 #define THR_FLAGS_IN_WAITQ 0x0002 /* in waiting queue using pqe link */ #define THR_FLAGS_IN_RUNQ 0x0004 /* in run queue using pqe link */ #define THR_FLAGS_EXITING 0x0008 /* thread is exiting */ #define THR_FLAGS_SUSPENDED 0x0010 /* thread is suspended */ #define THR_FLAGS_GC_SAFE 0x0020 /* thread safe for cleaning */ #define THR_FLAGS_IN_TDLIST 0x0040 /* thread in all thread list */ #define THR_FLAGS_IN_GCLIST 0x0080 /* thread in gc list */ /* * Base priority is the user setable and retrievable priority * of the thread. It is only affected by explicit calls to * set thread priority and upon thread creation via a thread * attribute or default priority. */ char base_priority; /* * Inherited priority is the priority a thread inherits by * taking a priority inheritence or protection mutex. It * is not affected by base priority changes. Inherited * priority defaults to and remains 0 until a mutex is taken * that is being waited on by any other thread whose priority * is non-zero. */ char inherited_priority; /* * Active priority is always the maximum of the threads base * priority and inherited priority. When there is a change * in either the base or inherited priority, the active * priority must be recalculated. */ char active_priority; /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; /* * Queue of currently owned mutexes. */ TAILQ_HEAD(, pthread_mutex) mutexq; void *ret; struct pthread_specific_elem *specific; int specific_data_count; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; char *fname; /* Ptr to source file name */ int lineno; /* Source line number. */ }; /* * Critical regions can also be detected by looking at the threads * current lock level. Ensure these macros increment and decrement * the lock levels such that locks can not be held with a lock level * of 0. */ #define THR_IN_CRITICAL(thrd) \ (((thrd)->locklevel > 0) || \ ((thrd)->critical_count > 0)) #define THR_YIELD_CHECK(thrd) \ do { \ if (((thrd)->critical_yield != 0) && \ !(THR_IN_CRITICAL(thrd))) \ _thr_sched_switch(thrd); \ else if (((thrd)->check_pending != 0) && \ !(THR_IN_CRITICAL(thrd))) \ _thr_sig_check_pending(thrd); \ } while (0) #define THR_LOCK_ACQUIRE(thrd, lck) \ do { \ if ((thrd)->locklevel >= MAX_THR_LOCKLEVEL) \ PANIC("Exceeded maximum lock level"); \ else { \ (thrd)->locklevel++; \ _lock_acquire((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1], \ (thrd)->active_priority); \ } \ } while (0) #define THR_LOCK_RELEASE(thrd, lck) \ do { \ if ((thrd)->locklevel > 0) { \ _lock_release((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1]); \ (thrd)->locklevel--; \ if ((thrd)->locklevel != 0) \ ; \ else if ((thrd)->critical_yield != 0) \ _thr_sched_switch(thrd); \ else if ((thrd)->check_pending != 0) \ _thr_sig_check_pending(thrd); \ } \ } while (0) /* * For now, threads will have their own lock separate from their * KSE scheduling lock. */ #define THR_LOCK(thr) THR_LOCK_ACQUIRE(thr, &(thr)->lock) #define THR_UNLOCK(thr) THR_LOCK_RELEASE(thr, &(thr)->lock) #define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock) #define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock) /* * Priority queue manipulation macros (using pqe link). We use * the thread's kseg link instead of the kse link because a thread * does not (currently) have a statically assigned kse. */ #define THR_RUNQ_INSERT_HEAD(thrd) \ _pq_insert_head(&(thrd)->kseg->kg_schedq.sq_runq, thrd) #define THR_RUNQ_INSERT_TAIL(thrd) \ _pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd) #define THR_RUNQ_REMOVE(thrd) \ _pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd) #define THR_RUNQ_FIRST() \ _pq_first(&(thrd)->kseg->kg_schedq.sq_runq) /* * Macros to insert/remove threads to the all thread list and * the gc list. */ #define THR_LIST_ADD(thrd) do { \ if (((thrd)->flags & THR_FLAGS_IN_TDLIST) == 0) { \ TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \ (thrd)->flags |= THR_FLAGS_IN_TDLIST; \ } \ } while (0) #define THR_LIST_REMOVE(thrd) do { \ if (((thrd)->flags & THR_FLAGS_IN_TDLIST) != 0) { \ TAILQ_REMOVE(&_thread_list, thrd, tle); \ (thrd)->flags &= ~THR_FLAGS_IN_TDLIST; \ } \ } while (0) #define THR_GCLIST_ADD(thrd) do { \ if (((thrd)->flags & THR_FLAGS_IN_GCLIST) == 0) { \ - TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, tle); \ + TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\ (thrd)->flags |= THR_FLAGS_IN_GCLIST; \ + _gc_count++; \ } \ } while (0) #define THR_GCLIST_REMOVE(thrd) do { \ if (((thrd)->flags & THR_FLAGS_IN_GCLIST) != 0) { \ - TAILQ_REMOVE(&_thread_gc_list, thrd, tle); \ + TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \ (thrd)->flags &= ~THR_FLAGS_IN_GCLIST; \ + _gc_count--; \ } \ } while (0) +#define GC_NEEDED() (atomic_load_acq_int(&_gc_count) >= 5) + /* * Locking the scheduling queue for another thread uses that thread's * KSEG lock. */ #define THR_SCHED_LOCK(curthr, thr) do { \ (curthr)->critical[(curthr)->locklevel] = _kse_critical_enter(); \ (curthr)->locklevel++; \ KSE_SCHED_LOCK((curthr)->kse, (thr)->kseg); \ } while (0) #define THR_SCHED_UNLOCK(curthr, thr) do { \ KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \ (curthr)->locklevel--; \ _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \ if ((curthr)->locklevel == 0) \ THR_YIELD_CHECK(curthr); \ } while (0) #define THR_CRITICAL_ENTER(thr) (thr)->critical_count++ #define THR_CRITICAL_LEAVE(thr) do { \ (thr)->critical_count--; \ if (((thr)->critical_yield != 0) && \ ((thr)->critical_count == 0)) { \ (thr)->critical_yield = 0; \ _thr_sched_switch(thr); \ } \ } while (0) #define THR_IS_ACTIVE(thrd) \ ((thrd)->kse != NULL) && ((thrd)->kse->k_curthread == (thrd)) #define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0) /* * Global variables for the pthread kernel. */ SCLASS void *_usrstack SCLASS_PRESET(NULL); SCLASS struct kse *_kse_initial SCLASS_PRESET(NULL); SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL); /* List of all threads: */ SCLASS TAILQ_HEAD(, pthread) _thread_list SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list)); /* List of threads needing GC: */ SCLASS TAILQ_HEAD(, pthread) _thread_gc_list SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list)); /* Default thread attributes: */ SCLASS struct pthread_attr _pthread_attr_default SCLASS_PRESET({ SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY, THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, THR_STACK_DEFAULT }); /* Default mutex attributes: */ SCLASS struct pthread_mutex_attr _pthread_mutexattr_default SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }); /* Default condition variable attributes: */ SCLASS struct pthread_cond_attr _pthread_condattr_default SCLASS_PRESET({COND_TYPE_FAST, 0}); /* Clock resolution in usec. */ SCLASS int _clock_res_usec SCLASS_PRESET(CLOCK_RES_USEC); /* Array of signal actions for this process: */ SCLASS struct sigaction _thread_sigact[NSIG]; /* * Array of counts of dummy handlers for SIG_DFL signals. This is used to * assure that there is always a dummy signal handler installed while there * is a thread sigwait()ing on the corresponding signal. */ SCLASS int _thread_dfl_count[NSIG]; /* * Lock for above count of dummy handlers and for the process signal * mask and pending signal sets. */ SCLASS struct lock _thread_signal_lock; /* Pending signals and mask for this process: */ SCLASS sigset_t _thr_proc_sigpending; SCLASS sigset_t _thr_proc_sigmask SCLASS_PRESET({{0, 0, 0, 0}}); SCLASS siginfo_t _thr_proc_siginfo[NSIG]; SCLASS pid_t _thr_pid SCLASS_PRESET(0); /* Garbage collector lock. */ SCLASS struct lock _gc_lock; SCLASS int _gc_check SCLASS_PRESET(0); -SCLASS pthread_t _gc_thread; +SCLASS int _gc_count SCLASS_PRESET(0); SCLASS struct lock _mutex_static_lock; SCLASS struct lock _rwlock_static_lock; SCLASS struct lock _keytable_lock; SCLASS struct lock _thread_list_lock; SCLASS int _thr_guard_default; SCLASS int _thr_page_size; SCLASS int _thr_debug_flags SCLASS_PRESET(0); /* Undefine the storage class and preset specifiers: */ #undef SCLASS #undef SCLASS_PRESET /* * Function prototype definitions. */ __BEGIN_DECLS int _cond_reinit(pthread_cond_t *); void _cond_wait_backout(struct pthread *); struct pthread *_get_curthread(void); struct kse *_get_curkse(void); void _set_curkse(struct kse *); -struct kse *_kse_alloc(struct kse *); +struct kse *_kse_alloc(struct pthread *); kse_critical_t _kse_critical_enter(void); void _kse_critical_leave(kse_critical_t); -void _kse_free(struct kse *, struct kse *); +void _kse_free(struct pthread *, struct kse *); void _kse_init(); -struct kse_group *_kseg_alloc(struct kse *); +struct kse_group *_kseg_alloc(struct pthread *); void _kse_lock_wait(struct lock *, struct lockuser *lu); void _kse_lock_wakeup(struct lock *, struct lockuser *lu); void _kse_sig_check_pending(struct kse *); void _kse_single_thread(struct pthread *); void _kse_start(struct kse *); void _kse_setthreaded(int); int _kse_isthreaded(void); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); void _mutex_lock_backout(struct pthread *); void _mutex_notify_priochange(struct pthread *, struct pthread *, int); int _mutex_reinit(struct pthread_mutex *); void _mutex_unlock_private(struct pthread *); void _libpthread_init(struct pthread *); int _pq_alloc(struct pq_queue *, int, int); +void _pq_free(struct pq_queue *); int _pq_init(struct pq_queue *); void _pq_remove(struct pq_queue *pq, struct pthread *); void _pq_insert_head(struct pq_queue *pq, struct pthread *); void _pq_insert_tail(struct pq_queue *pq, struct pthread *); struct pthread *_pq_first(struct pq_queue *pq); void *_pthread_getspecific(pthread_key_t); int _pthread_key_create(pthread_key_t *, void (*) (void *)); int _pthread_key_delete(pthread_key_t); int _pthread_mutex_destroy(pthread_mutex_t *); int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); int _pthread_mutex_lock(pthread_mutex_t *); int _pthread_mutex_trylock(pthread_mutex_t *); int _pthread_mutex_unlock(pthread_mutex_t *); int _pthread_mutexattr_init(pthread_mutexattr_t *); int _pthread_mutexattr_destroy(pthread_mutexattr_t *); int _pthread_mutexattr_settype(pthread_mutexattr_t *, int); int _pthread_once(pthread_once_t *, void (*) (void)); struct pthread *_pthread_self(void); int _pthread_setspecific(pthread_key_t, const void *); -struct pthread *_thr_alloc(struct kse *); +struct pthread *_thr_alloc(struct pthread *); +int _thread_enter_uts(struct kse_thr_mailbox *, struct kse_mailbox *); +int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **); void _thr_exit(char *, int, char *); void _thr_exit_cleanup(void); void _thr_lock_wait(struct lock *lock, struct lockuser *lu); void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu); int _thr_ref_add(struct pthread *, struct pthread *, int); void _thr_ref_delete(struct pthread *, struct pthread *); void _thr_schedule_add(struct pthread *, struct pthread *); void _thr_schedule_remove(struct pthread *, struct pthread *); void _thr_setrunnable(struct pthread *curthread, struct pthread *thread); void _thr_setrunnable_unlocked(struct pthread *thread); void _thr_sig_add(struct pthread *, int, siginfo_t *, ucontext_t *); void _thr_sig_dispatch(struct kse *, int, siginfo_t *); int _thr_stack_alloc(struct pthread_attr *); void _thr_stack_free(struct pthread_attr *); void _thr_exit_cleanup(void); -void _thr_free(struct kse *, struct pthread *); +void _thr_free(struct pthread *, struct pthread *); +void _thr_gc(struct pthread *); void _thr_panic_exit(char *, int, char *); void _thread_cleanupspecific(void); void _thread_dump_info(void); void _thread_printf(int, const char *, ...); void _thr_sched_frame(struct pthread_sigframe *); void _thr_sched_switch(struct pthread *); void _thr_set_timeout(const struct timespec *); void _thr_sig_handler(int, siginfo_t *, ucontext_t *); void _thr_sig_check_pending(struct pthread *); void _thr_sig_rundown(struct pthread *, ucontext_t *, struct pthread_sigframe *); void _thr_sig_send(struct pthread *pthread, int sig); void _thr_sig_wrapper(void); void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf); void _thr_seterrno(struct pthread *, int); void _thr_enter_cancellation_point(struct pthread *); void _thr_leave_cancellation_point(struct pthread *); /* XXX - Stuff that goes away when my sources get more up to date. */ /* #include */ #ifdef SYS_KSE_H int __sys_kse_create(struct kse_mailbox *, int); int __sys_kse_thr_wakeup(struct kse_mailbox *); int __sys_kse_exit(struct kse_mailbox *); int __sys_kse_release(struct kse_mailbox *); #endif /* #include */ #ifdef _SYS_AIO_H_ int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); #endif /* #include */ #ifdef _SYS_FCNTL_H_ int __sys_fcntl(int, int, ...); int __sys_open(const char *, int, ...); #endif /* #include */ #ifdef _SYS_IOCTL_H_ int __sys_ioctl(int, unsigned long, ...); #endif /* #inclde */ #ifdef _SCHED_H_ int __sys_sched_yield(void); #endif /* #include */ #ifdef _SIGNAL_H_ int __sys_kill(pid_t, int); int __sys_sigaction(int, const struct sigaction *, struct sigaction *); int __sys_sigpending(sigset_t *); int __sys_sigprocmask(int, const sigset_t *, sigset_t *); int __sys_sigsuspend(const sigset_t *); int __sys_sigreturn(ucontext_t *); int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); #endif /* #include */ #ifdef _SYS_SOCKET_H_ int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int); #endif /* #include */ #ifdef _SYS_UIO_H_ ssize_t __sys_readv(int, const struct iovec *, int); ssize_t __sys_writev(int, const struct iovec *, int); #endif /* #include */ #ifdef _TIME_H_ int __sys_nanosleep(const struct timespec *, struct timespec *); #endif /* #include */ #ifdef _UNISTD_H_ int __sys_close(int); int __sys_execve(const char *, char * const *, char * const *); int __sys_fork(void); int __sys_fsync(int); pid_t __sys_getpid(void); int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); ssize_t __sys_read(int, void *, size_t); ssize_t __sys_write(int, const void *, size_t); void __sys_exit(int); #endif /* #include */ #ifdef _SYS_POLL_H_ int __sys_poll(struct pollfd *, unsigned, int); #endif /* #include */ #ifdef _SYS_MMAN_H_ int __sys_msync(void *, size_t, int); #endif #endif /* !_THR_PRIVATE_H */ Index: head/lib/libpthread/thread/thr_resume_np.c =================================================================== --- head/lib/libpthread/thread/thr_resume_np.c (revision 113660) +++ head/lib/libpthread/thread/thr_resume_np.c (revision 113661) @@ -1,109 +1,112 @@ /* * Copyright (c) 1995 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "thr_private.h" static void resume_common(struct pthread *); __weak_reference(_pthread_resume_np, pthread_resume_np); __weak_reference(_pthread_resume_all_np, pthread_resume_all_np); /* Resume a thread: */ int _pthread_resume_np(pthread_t thread) { struct pthread *curthread = _get_curthread(); int ret; /* Add a reference to the thread: */ if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0)) == 0) { /* Is it currently suspended? */ if ((thread->flags & THR_FLAGS_SUSPENDED) != 0) { /* Lock the threads scheduling queue: */ THR_SCHED_LOCK(curthread, thread); - resume_common(thread); + if ((curthread->state != PS_DEAD) && + (curthread->state != PS_DEADLOCK) && + ((curthread->flags & THR_FLAGS_EXITING) != 0)) + resume_common(thread); /* Unlock the threads scheduling queue: */ THR_SCHED_UNLOCK(curthread, thread); } _thr_ref_delete(curthread, thread); } return (ret); } void _pthread_resume_all_np(void) { struct pthread *curthread = _get_curthread(); struct pthread *thread; kse_critical_t crit; /* Take the thread list lock: */ crit = _kse_critical_enter(); KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); TAILQ_FOREACH(thread, &_thread_list, tle) { if ((thread != curthread) && ((thread->flags & THR_FLAGS_SUSPENDED) != 0) && (thread->state != PS_DEAD) && (thread->state != PS_DEADLOCK) && ((thread->flags & THR_FLAGS_EXITING) == 0)) { THR_SCHED_LOCK(curthread, thread); resume_common(thread); THR_SCHED_UNLOCK(curthread, thread); } } /* Release the thread list lock: */ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); _kse_critical_leave(crit); } static void resume_common(struct pthread *thread) { /* Clear the suspend flag: */ thread->flags &= ~THR_FLAGS_SUSPENDED; /* * If the thread's state is suspended, that means it is * now runnable but not in any scheduling queue. Set the * state to running and insert it into the run queue. */ if (thread->state == PS_SUSPENDED) _thr_setrunnable_unlocked(thread); } Index: head/lib/libpthread/thread/thr_setschedparam.c =================================================================== --- head/lib/libpthread/thread/thr_setschedparam.c (revision 113660) +++ head/lib/libpthread/thread/thr_setschedparam.c (revision 113661) @@ -1,129 +1,136 @@ /* * Copyright (c) 1998 Daniel Eischen . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Daniel Eischen. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include "thr_private.h" __weak_reference(_pthread_setschedparam, pthread_setschedparam); int _pthread_setschedparam(pthread_t pthread, int policy, const struct sched_param *param) { struct pthread *curthread = _get_curthread(); int in_syncq; int in_readyq = 0; int old_prio; int ret = 0; if ((param == NULL) || (policy < SCHED_FIFO) || (policy > SCHED_RR)) { /* Return an invalid argument error: */ ret = EINVAL; } else if ((param->sched_priority < THR_MIN_PRIORITY) || (param->sched_priority > THR_MAX_PRIORITY)) { /* Return an unsupported value error. */ ret = ENOTSUP; /* Find the thread in the list of active threads: */ } else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0)) == 0) { /* * Lock the threads scheduling queue while we change * its priority: */ THR_SCHED_LOCK(curthread, pthread); + if ((pthread->state == PS_DEAD) || + (pthread->state == PS_DEADLOCK) || + ((pthread->flags & THR_FLAGS_EXITING) != 0)) { + THR_SCHED_UNLOCK(curthread, pthread); + _thr_ref_delete(curthread, pthread); + return (ESRCH); + } in_syncq = pthread->flags & THR_FLAGS_IN_SYNCQ; /* Set the scheduling policy: */ pthread->attr.sched_policy = policy; if (param->sched_priority == THR_BASE_PRIORITY(pthread->base_priority)) /* * There is nothing to do; unlock the threads * scheduling queue. */ THR_SCHED_UNLOCK(curthread, pthread); else { /* * Remove the thread from its current priority * queue before any adjustments are made to its * active priority: */ old_prio = pthread->active_priority; if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0) { in_readyq = 1; THR_RUNQ_REMOVE(pthread); } /* Set the thread base priority: */ pthread->base_priority &= (THR_SIGNAL_PRIORITY | THR_RT_PRIORITY); pthread->base_priority = param->sched_priority; /* Recalculate the active priority: */ pthread->active_priority = MAX(pthread->base_priority, pthread->inherited_priority); if (in_readyq) { if ((pthread->priority_mutex_count > 0) && (old_prio > pthread->active_priority)) { /* * POSIX states that if the priority is * being lowered, the thread must be * inserted at the head of the queue for * its priority if it owns any priority * protection or inheritence mutexes. */ THR_RUNQ_INSERT_HEAD(pthread); } else THR_RUNQ_INSERT_TAIL(pthread); } /* Unlock the threads scheduling queue: */ THR_SCHED_UNLOCK(curthread, pthread); /* * Check for any mutex priority adjustments. This * includes checking for a priority mutex on which * this thread is waiting. */ _mutex_notify_priochange(curthread, pthread, in_syncq); } _thr_ref_delete(curthread, pthread); } return (ret); } Index: head/lib/libpthread/thread/thr_suspend_np.c =================================================================== --- head/lib/libpthread/thread/thr_suspend_np.c (revision 113660) +++ head/lib/libpthread/thread/thr_suspend_np.c (revision 113661) @@ -1,106 +1,105 @@ /* * Copyright (c) 1995-1998 John Birrell . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by John Birrell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include "thr_private.h" static void suspend_common(struct pthread *thread); __weak_reference(_pthread_suspend_np, pthread_suspend_np); __weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np); /* Suspend a thread: */ int _pthread_suspend_np(pthread_t thread) { struct pthread *curthread = _get_curthread(); int ret; /* Suspending the current thread doesn't make sense. */ if (thread == _get_curthread()) ret = EDEADLK; /* Add a reference to the thread: */ else if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0)) == 0) { /* Lock the threads scheduling queue: */ THR_SCHED_LOCK(curthread, thread); - suspend_common(thread); - /* Unlock the threads scheduling queue: */ THR_SCHED_UNLOCK(curthread, thread); /* Don't forget to remove the reference: */ _thr_ref_delete(curthread, thread); } return (ret); } void _pthread_suspend_all_np(void) { struct pthread *curthread = _get_curthread(); struct pthread *thread; kse_critical_t crit; /* Take the thread list lock: */ crit = _kse_critical_enter(); KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); TAILQ_FOREACH(thread, &_thread_list, tle) { - if ((thread != curthread) && - (thread->state != PS_DEAD) && - (thread->state != PS_DEADLOCK) && - ((thread->flags & THR_FLAGS_EXITING) == 0)) { + if (thread != curthread) { THR_SCHED_LOCK(curthread, thread); suspend_common(thread); THR_SCHED_UNLOCK(curthread, thread); } } /* Release the thread list lock: */ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); _kse_critical_leave(crit); } void suspend_common(struct pthread *thread) { - thread->flags |= THR_FLAGS_SUSPENDED; - if (thread->flags & THR_FLAGS_IN_RUNQ) { - THR_RUNQ_REMOVE(thread); - THR_SET_STATE(thread, PS_SUSPENDED); + if ((thread->state != PS_DEAD) && + (thread->state != PS_DEADLOCK) && + ((thread->flags & THR_FLAGS_EXITING) == 0)) { + thread->flags |= THR_FLAGS_SUSPENDED; + if ((thread->flags & THR_FLAGS_IN_RUNQ) != 0) { + THR_RUNQ_REMOVE(thread); + THR_SET_STATE(thread, PS_SUSPENDED); + } } }