diff --git a/lib/libthr/thread/thr_list.c b/lib/libthr/thread/thr_list.c index bca2bfc75fef..bbc1d2899cb9 100644 --- a/lib/libthr/thread/thr_list.c +++ b/lib/libthr/thread/thr_list.c @@ -1,394 +1,394 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2005 David Xu * Copyright (C) 2003 Daniel M. Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include "libc_private.h" #include "thr_private.h" #include "static_tls.h" /*#define DEBUG_THREAD_LIST */ #ifdef DEBUG_THREAD_LIST #define DBG_MSG stdout_debug #else #define DBG_MSG(x...) #endif #define MAX_THREADS 100000 /* * Define a high water mark for the maximum number of threads that * will be cached. Once this level is reached, any extra threads * will be free()'d. */ #define MAX_CACHED_THREADS 100 /* * We've got to keep track of everything that is allocated, not only * to have a speedy free list, but also so they can be deallocated * after a fork(). */ static TAILQ_HEAD(, pthread) free_threadq; static struct umutex free_thread_lock = DEFAULT_UMUTEX; static struct umutex tcb_lock = DEFAULT_UMUTEX; static int free_thread_count = 0; static int inited = 0; static int total_threads; LIST_HEAD(thread_hash_head, pthread); #define HASH_QUEUES 128 static struct thread_hash_head thr_hashtable[HASH_QUEUES]; #define THREAD_HASH(thrd) (((unsigned long)thrd >> 8) % HASH_QUEUES) static void thr_destroy(struct pthread *curthread, struct pthread *thread); void _thr_list_init(void) { int i; _gc_count = 0; total_threads = 1; _thr_urwlock_init(&_thr_list_lock); TAILQ_INIT(&_thread_list); TAILQ_INIT(&free_threadq); _thr_umutex_init(&free_thread_lock); _thr_umutex_init(&tcb_lock); if (inited) { for (i = 0; i < HASH_QUEUES; ++i) LIST_INIT(&thr_hashtable[i]); } inited = 1; } void _thr_gc(struct pthread *curthread) { struct pthread *td, *td_next; TAILQ_HEAD(, pthread) worklist; TAILQ_INIT(&worklist); THREAD_LIST_WRLOCK(curthread); /* Check the threads waiting for GC. */ TAILQ_FOREACH_SAFE(td, &_thread_gc_list, gcle, td_next) { if (td->tid != TID_TERMINATED) { /* make sure we are not still in userland */ continue; } _thr_stack_free(&td->attr); THR_GCLIST_REMOVE(td); TAILQ_INSERT_HEAD(&worklist, td, gcle); } THREAD_LIST_UNLOCK(curthread); while ((td = TAILQ_FIRST(&worklist)) != NULL) { TAILQ_REMOVE(&worklist, td, gcle); /* * XXX we don't free initial thread, because there might * have some code referencing initial thread. */ if (td == _thr_initial) { DBG_MSG("Initial thread won't be freed\n"); continue; } _thr_free(curthread, td); } } struct pthread * _thr_alloc(struct pthread *curthread) { struct pthread *thread = NULL; struct tcb *tcb; if (curthread != NULL) { if (GC_NEEDED()) _thr_gc(curthread); if (free_thread_count > 0) { THR_LOCK_ACQUIRE(curthread, &free_thread_lock); if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) { TAILQ_REMOVE(&free_threadq, thread, tle); free_thread_count--; } THR_LOCK_RELEASE(curthread, &free_thread_lock); } } if (thread == NULL) { if (total_threads > MAX_THREADS) return (NULL); atomic_add_int(&total_threads, 1); - thread = calloc(1, sizeof(struct pthread)); + thread = __thr_calloc(1, sizeof(struct pthread)); if (thread == NULL) { atomic_add_int(&total_threads, -1); return (NULL); } if ((thread->sleepqueue = _sleepq_alloc()) == NULL || (thread->wake_addr = _thr_alloc_wake_addr()) == NULL) { thr_destroy(curthread, thread); atomic_add_int(&total_threads, -1); return (NULL); } } else { bzero(&thread->_pthread_startzero, __rangeof(struct pthread, _pthread_startzero, _pthread_endzero)); } if (curthread != NULL) { THR_LOCK_ACQUIRE(curthread, &tcb_lock); tcb = _tcb_ctor(thread, 0 /* not initial tls */); THR_LOCK_RELEASE(curthread, &tcb_lock); } else { tcb = _tcb_ctor(thread, 1 /* initial tls */); } if (tcb != NULL) { thread->tcb = tcb; } else { thr_destroy(curthread, thread); atomic_add_int(&total_threads, -1); thread = NULL; } return (thread); } void _thr_free(struct pthread *curthread, struct pthread *thread) { DBG_MSG("Freeing thread %p\n", thread); /* * Always free tcb, as we only know it is part of RTLD TLS * block, but don't know its detail and can not assume how * it works, so better to avoid caching it here. */ if (curthread != NULL) { THR_LOCK_ACQUIRE(curthread, &tcb_lock); _tcb_dtor(thread->tcb); THR_LOCK_RELEASE(curthread, &tcb_lock); } else { _tcb_dtor(thread->tcb); } thread->tcb = NULL; if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) { thr_destroy(curthread, thread); atomic_add_int(&total_threads, -1); } else { /* * Add the thread to the free thread list, this also avoids * pthread id is reused too quickly, may help some buggy apps. */ THR_LOCK_ACQUIRE(curthread, &free_thread_lock); TAILQ_INSERT_TAIL(&free_threadq, thread, tle); free_thread_count++; THR_LOCK_RELEASE(curthread, &free_thread_lock); } } static void thr_destroy(struct pthread *curthread __unused, struct pthread *thread) { if (thread->sleepqueue != NULL) _sleepq_free(thread->sleepqueue); if (thread->wake_addr != NULL) _thr_release_wake_addr(thread->wake_addr); - free(thread); + __thr_free(thread); } /* * Add the thread to the list of all threads and increment * number of active threads. */ void _thr_link(struct pthread *curthread, struct pthread *thread) { THREAD_LIST_WRLOCK(curthread); THR_LIST_ADD(thread); THREAD_LIST_UNLOCK(curthread); atomic_add_int(&_thread_active_threads, 1); } /* * Remove an active thread. */ void _thr_unlink(struct pthread *curthread, struct pthread *thread) { THREAD_LIST_WRLOCK(curthread); THR_LIST_REMOVE(thread); THREAD_LIST_UNLOCK(curthread); atomic_add_int(&_thread_active_threads, -1); } void _thr_hash_add(struct pthread *thread) { struct thread_hash_head *head; head = &thr_hashtable[THREAD_HASH(thread)]; LIST_INSERT_HEAD(head, thread, hle); } void _thr_hash_remove(struct pthread *thread) { LIST_REMOVE(thread, hle); } struct pthread * _thr_hash_find(struct pthread *thread) { struct pthread *td; struct thread_hash_head *head; head = &thr_hashtable[THREAD_HASH(thread)]; LIST_FOREACH(td, head, hle) { if (td == thread) return (thread); } return (NULL); } /* * Find a thread in the linked list of active threads and add a reference * to it. Threads with positive reference counts will not be deallocated * until all references are released. */ int _thr_ref_add(struct pthread *curthread, struct pthread *thread, int include_dead) { int ret; if (thread == NULL) /* Invalid thread: */ return (EINVAL); if ((ret = _thr_find_thread(curthread, thread, include_dead)) == 0) { thread->refcount++; THR_CRITICAL_ENTER(curthread); THR_THREAD_UNLOCK(curthread, thread); } /* Return zero if the thread exists: */ return (ret); } void _thr_ref_delete(struct pthread *curthread, struct pthread *thread) { THR_THREAD_LOCK(curthread, thread); thread->refcount--; _thr_try_gc(curthread, thread); THR_CRITICAL_LEAVE(curthread); } /* entered with thread lock held, exit with thread lock released */ void _thr_try_gc(struct pthread *curthread, struct pthread *thread) { if (THR_SHOULD_GC(thread)) { THR_REF_ADD(curthread, thread); THR_THREAD_UNLOCK(curthread, thread); THREAD_LIST_WRLOCK(curthread); THR_THREAD_LOCK(curthread, thread); THR_REF_DEL(curthread, thread); if (THR_SHOULD_GC(thread)) { THR_LIST_REMOVE(thread); THR_GCLIST_ADD(thread); } THR_THREAD_UNLOCK(curthread, thread); THREAD_LIST_UNLOCK(curthread); } else { THR_THREAD_UNLOCK(curthread, thread); } } /* return with thread lock held if thread is found */ int _thr_find_thread(struct pthread *curthread, struct pthread *thread, int include_dead) { struct pthread *pthread; int ret; if (thread == NULL) return (EINVAL); ret = 0; THREAD_LIST_RDLOCK(curthread); pthread = _thr_hash_find(thread); if (pthread) { THR_THREAD_LOCK(curthread, pthread); if (include_dead == 0 && pthread->state == PS_DEAD) { THR_THREAD_UNLOCK(curthread, pthread); ret = ESRCH; } } else { ret = ESRCH; } THREAD_LIST_UNLOCK(curthread); return (ret); } #include "pthread_tls.h" static void thr_distribute_static_tls(uintptr_t tlsbase, void *src, size_t len, size_t total_len) { memcpy((void *)tlsbase, src, len); memset((char *)tlsbase + len, 0, total_len - len); } void __pthread_distribute_static_tls(size_t offset, void *src, size_t len, size_t total_len) { struct pthread *curthread, *thrd; uintptr_t tlsbase; if (!_thr_is_inited()) { tlsbase = _libc_get_static_tls_base(offset); thr_distribute_static_tls(tlsbase, src, len, total_len); return; } curthread = _get_curthread(); THREAD_LIST_RDLOCK(curthread); TAILQ_FOREACH(thrd, &_thread_list, tle) { tlsbase = _get_static_tls_base(thrd, offset); thr_distribute_static_tls(tlsbase, src, len, total_len); } THREAD_LIST_UNLOCK(curthread); } diff --git a/lib/libthr/thread/thr_sleepq.c b/lib/libthr/thread/thr_sleepq.c index d7de9ab4e25a..9c680acd0ac0 100644 --- a/lib/libthr/thread/thr_sleepq.c +++ b/lib/libthr/thread/thr_sleepq.c @@ -1,183 +1,183 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2010 David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "thr_private.h" #define HASHSHIFT 9 #define HASHSIZE (1 << HASHSHIFT) #define SC_HASH(wchan) ((unsigned) \ ((((uintptr_t)(wchan) >> 3) \ ^ ((uintptr_t)(wchan) >> (HASHSHIFT + 3))) \ & (HASHSIZE - 1))) #define SC_LOOKUP(wc) &sc_table[SC_HASH(wc)] struct sleepqueue_chain { struct umutex sc_lock; int sc_enqcnt; LIST_HEAD(, sleepqueue) sc_queues; int sc_type; }; static struct sleepqueue_chain sc_table[HASHSIZE]; void _sleepq_init(void) { int i; for (i = 0; i < HASHSIZE; ++i) { LIST_INIT(&sc_table[i].sc_queues); _thr_umutex_init(&sc_table[i].sc_lock); } } struct sleepqueue * _sleepq_alloc(void) { struct sleepqueue *sq; - sq = calloc(1, sizeof(struct sleepqueue)); + sq = __thr_calloc(1, sizeof(struct sleepqueue)); TAILQ_INIT(&sq->sq_blocked); SLIST_INIT(&sq->sq_freeq); return (sq); } void _sleepq_free(struct sleepqueue *sq) { - free(sq); + __thr_free(sq); } void _sleepq_lock(void *wchan) { struct pthread *curthread = _get_curthread(); struct sleepqueue_chain *sc; sc = SC_LOOKUP(wchan); THR_LOCK_ACQUIRE_SPIN(curthread, &sc->sc_lock); } void _sleepq_unlock(void *wchan) { struct sleepqueue_chain *sc; struct pthread *curthread = _get_curthread(); sc = SC_LOOKUP(wchan); THR_LOCK_RELEASE(curthread, &sc->sc_lock); } static inline struct sleepqueue * lookup(struct sleepqueue_chain *sc, void *wchan) { struct sleepqueue *sq; LIST_FOREACH(sq, &sc->sc_queues, sq_hash) if (sq->sq_wchan == wchan) return (sq); return (NULL); } struct sleepqueue * _sleepq_lookup(void *wchan) { return (lookup(SC_LOOKUP(wchan), wchan)); } void _sleepq_add(void *wchan, struct pthread *td) { struct sleepqueue_chain *sc; struct sleepqueue *sq; sc = SC_LOOKUP(wchan); sq = lookup(sc, wchan); if (sq != NULL) { SLIST_INSERT_HEAD(&sq->sq_freeq, td->sleepqueue, sq_flink); } else { sq = td->sleepqueue; LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash); sq->sq_wchan = wchan; /* sq->sq_type = type; */ } td->sleepqueue = NULL; td->wchan = wchan; if (((++sc->sc_enqcnt << _thr_queuefifo) & 0xff) != 0) TAILQ_INSERT_HEAD(&sq->sq_blocked, td, wle); else TAILQ_INSERT_TAIL(&sq->sq_blocked, td, wle); } int _sleepq_remove(struct sleepqueue *sq, struct pthread *td) { int rc; TAILQ_REMOVE(&sq->sq_blocked, td, wle); if (TAILQ_EMPTY(&sq->sq_blocked)) { LIST_REMOVE(sq, sq_hash); td->sleepqueue = sq; rc = 0; } else { td->sleepqueue = SLIST_FIRST(&sq->sq_freeq); SLIST_REMOVE_HEAD(&sq->sq_freeq, sq_flink); rc = 1; } td->wchan = NULL; return (rc); } void _sleepq_drop(struct sleepqueue *sq, void (*cb)(struct pthread *, void *arg), void *arg) { struct pthread *td; struct sleepqueue *sq2; td = TAILQ_FIRST(&sq->sq_blocked); if (td == NULL) return; LIST_REMOVE(sq, sq_hash); TAILQ_REMOVE(&sq->sq_blocked, td, wle); if (cb != NULL) cb(td, arg); td->sleepqueue = sq; td->wchan = NULL; sq2 = SLIST_FIRST(&sq->sq_freeq); TAILQ_FOREACH(td, &sq->sq_blocked, wle) { if (cb != NULL) cb(td, arg); td->sleepqueue = sq2; td->wchan = NULL; sq2 = SLIST_NEXT(sq2, sq_flink); } TAILQ_INIT(&sq->sq_blocked); SLIST_INIT(&sq->sq_freeq); }