Changeset View
Standalone View
sys/kern/kern_thread.c
Show First 20 Lines • Show All 535 Lines • ▼ Show 20 Lines | if (tidhashlock > 0) | |||||||||
tidhashlock--; | tidhashlock--; | |||||||||
tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1), | tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1), | |||||||||
M_TIDHASH, M_WAITOK | M_ZERO); | M_TIDHASH, M_WAITOK | M_ZERO); | |||||||||
for (i = 0; i < tidhashlock + 1; i++) | for (i = 0; i < tidhashlock + 1; i++) | |||||||||
rw_init(&tidhashtbl_lock[i], "tidhash"); | rw_init(&tidhashtbl_lock[i], "tidhash"); | |||||||||
TASK_INIT(&thread_reap_task, 0, thread_reap_task_cb, NULL); | TASK_INIT(&thread_reap_task, 0, thread_reap_task_cb, NULL); | |||||||||
callout_init(&thread_reap_callout, 1); | callout_init(&thread_reap_callout, 1); | |||||||||
callout_reset(&thread_reap_callout, 5 * hz, thread_reap_callout_cb, NULL); | callout_reset(&thread_reap_callout, 5 * hz, | |||||||||
thread_reap_callout_cb, NULL); | ||||||||||
} | } | |||||||||
/* | /* | |||||||||
* Place an unused thread on the zombie list. | * Place an unused thread on the zombie list. | |||||||||
*/ | */ | |||||||||
void | void | |||||||||
thread_zombie(struct thread *td) | thread_zombie(struct thread *td) | |||||||||
{ | { | |||||||||
▲ Show 20 Lines • Show All 146 Lines • ▼ Show 20 Lines | if (tdd->tdd_zombies != NULL && | |||||||||
(u_int)(cticks - lticks) > 5 * hz) { | (u_int)(cticks - lticks) > 5 * hz) { | |||||||||
wantreap = true; | wantreap = true; | |||||||||
break; | break; | |||||||||
} | } | |||||||||
} | } | |||||||||
if (wantreap) | if (wantreap) | |||||||||
taskqueue_enqueue(taskqueue_thread, &thread_reap_task); | taskqueue_enqueue(taskqueue_thread, &thread_reap_task); | |||||||||
callout_reset(&thread_reap_callout, 5 * hz, thread_reap_callout_cb, NULL); | callout_reset(&thread_reap_callout, 5 * hz, | |||||||||
thread_reap_callout_cb, NULL); | ||||||||||
} | } | |||||||||
/* | /* | |||||||||
* Allocate a thread. | * Allocate a thread. | |||||||||
*/ | */ | |||||||||
struct thread * | struct thread * | |||||||||
thread_alloc(int pages) | thread_alloc(int pages) | |||||||||
{ | { | |||||||||
struct thread *td; | struct thread *td; | |||||||||
lwpid_t tid; | lwpid_t tid; | |||||||||
if (!thread_count_inc()) { | if (!thread_count_inc()) { | |||||||||
return (NULL); | return (NULL); | |||||||||
} | } | |||||||||
markjUnsubmitted Done Inline Actions
markj: | ||||||||||
tid = tid_alloc(); | tid = tid_alloc(); | |||||||||
td = uma_zalloc(thread_zone, M_WAITOK); | td = uma_zalloc(thread_zone, M_WAITOK); | |||||||||
KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); | KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); | |||||||||
if (!vm_thread_new(td, pages)) { | if (!vm_thread_new(td, pages)) { | |||||||||
uma_zfree(thread_zone, td); | uma_zfree(thread_zone, td); | |||||||||
tid_free(tid); | tid_free(tid); | |||||||||
thread_count_dec(); | thread_count_dec(); | |||||||||
return (NULL); | return (NULL); | |||||||||
} | } | |||||||||
td->td_tid = tid; | td->td_tid = tid; | |||||||||
cpu_thread_alloc(td); | cpu_thread_alloc(td); | |||||||||
EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td); | EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td); | |||||||||
return (td); | return (td); | |||||||||
} | } | |||||||||
int | int | |||||||||
thread_alloc_stack(struct thread *td, int pages) | thread_alloc_stack(struct thread *td, int pages) | |||||||||
{ | { | |||||||||
KASSERT(td->td_kstack == 0, | KASSERT(td->td_kstack == 0, | |||||||||
("thread_alloc_stack called on a thread with kstack")); | ("thread_alloc_stack called on a thread with kstack")); | |||||||||
Done Inline ActionsIs it really necessary to handle the is_bound case? This function also sleeps; most or maybe all users of sched_pin() should not sleep in pinned sections. markj: Is it really necessary to handle the is_bound case? This function also sleeps; most or maybe… | ||||||||||
Done Inline ActionsDropped and switched to reuse most of the code from quisce_cpus() kib: Dropped and switched to reuse most of the code from quisce_cpus() | ||||||||||
if (!vm_thread_new(td, pages)) | if (!vm_thread_new(td, pages)) | |||||||||
return (0); | return (0); | |||||||||
cpu_thread_alloc(td); | cpu_thread_alloc(td); | |||||||||
return (1); | return (1); | |||||||||
} | } | |||||||||
Not Done Inline ActionsWhy are you not using taskqueue_drain() here? hselasky: Why are you not using taskqueue_drain() here? | ||||||||||
Done Inline ActionsI answered this in my previous reply to the similar question from you. kib: I answered this in my previous reply to the similar question from you. | ||||||||||
/* | /* | |||||||||
* Deallocate a thread. | * Deallocate a thread. | |||||||||
*/ | */ | |||||||||
static void | static void | |||||||||
thread_free_batched(struct thread *td) | thread_free_batched(struct thread *td) | |||||||||
{ | { | |||||||||
lock_profile_thread_exit(td); | lock_profile_thread_exit(td); | |||||||||
if (td->td_cpuset) | if (td->td_cpuset) | |||||||||
cpuset_rel(td->td_cpuset); | cpuset_rel(td->td_cpuset); | |||||||||
td->td_cpuset = NULL; | td->td_cpuset = NULL; | |||||||||
cpu_thread_free(td); | cpu_thread_free(td); | |||||||||
Done Inline Actions
markj: | ||||||||||
if (td->td_kstack != 0) | if (td->td_kstack != 0) | |||||||||
vm_thread_dispose(td); | vm_thread_dispose(td); | |||||||||
Not Done Inline ActionsThe task, "t", can be made static. taskqueue_enqueue() will handle any races? How about draining taskqueue_thread instead of the tsleep() ? hselasky: The task, "t", can be made static. taskqueue_enqueue() will handle any races?
How about… | ||||||||||
Not Done Inline ActionsSee "taskqueue_drain(queue, &task)" hselasky: See "taskqueue_drain(queue, &task)" | ||||||||||
Done Inline ActionsI would be fine with the use of drain if the taskqueue thread for reapping was a dedicated thread. Since this is the system-global thread, I prefer to have per-request task and the wait for its execution. Might be, changing reaper to the own thread would be indeed good change. kib: I would be fine with the use of drain if the taskqueue thread for reapping was a dedicated… | ||||||||||
Done Inline ActionsIs it dynamically allocated only because the thread stack may be swapped out while waiting? markj: Is it dynamically allocated only because the thread stack may be swapped out while waiting? | ||||||||||
Done Inline ActionsExactly, and I do not want to PHOLD() current process. kib: Exactly, and I do not want to PHOLD() current process. | ||||||||||
callout_drain(&td->td_slpcallout); | callout_drain(&td->td_slpcallout); | |||||||||
/* | /* | |||||||||
* Freeing handled by the caller. | * Freeing handled by the caller. | |||||||||
*/ | */ | |||||||||
Done Inline ActionsWhy is the pause so long? markj: Why is the pause so long? | ||||||||||
td->td_tid = -1; | td->td_tid = -1; | |||||||||
uma_zfree(thread_zone, td); | uma_zfree(thread_zone, td); | |||||||||
} | } | |||||||||
void | void | |||||||||
thread_free(struct thread *td) | thread_free(struct thread *td) | |||||||||
{ | { | |||||||||
lwpid_t tid; | lwpid_t tid; | |||||||||
▲ Show 20 Lines • Show All 958 Lines • Show Last 20 Lines |