diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -307,17 +307,8 @@ p = td->td_proc; if ((p->p_flag & P_HADTHREADS) != 0) { PROC_LOCK(p); - while (p->p_singlethr > 0) { - error = msleep(&p->p_singlethr, &p->p_mtx, - PWAIT | PCATCH, "exec1t", 0); - if (error != 0) { - error = ERESTART; - goto unlock; - } - } if (thread_single(p, SINGLE_BOUNDARY) != 0) error = ERESTART; -unlock: PROC_UNLOCK(p); } KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0, diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -218,8 +218,6 @@ { PROC_LOCK_ASSERT(p, MA_OWNED); p->p_flag2 |= P2_WEXIT; - while (p->p_singlethr > 0) - msleep(&p->p_singlethr, &p->p_mtx, PWAIT | PCATCH, "exit1t", 0); } /* @@ -276,16 +274,15 @@ * Kill off the other threads. This requires * some co-operation from other parts of the kernel * so it may not be instantaneous. With this state set - * any thread entering the kernel from userspace will - * thread_exit() in trap(). Any thread attempting to + * any thread attempting to interruptibly * sleep will return immediately with EINTR or EWOULDBLOCK * which will hopefully force them to back out to userland * freeing resources as they go. Any thread attempting - * to return to userland will thread_exit() from userret(). + * to return to userland will thread_exit() from ast(). * thread_exit() will unsuspend us when the last of the * other threads exits. * If there is already a thread singler after resumption, - * calling thread_single will fail; in that case, we just + * calling thread_single() will fail; in that case, we just * re-check all suspension request, the thread should * either be suspended there or exit. */ diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -307,8 +307,8 @@ static int fork_norfproc(struct thread *td, int flags) { - int error; struct proc *p1; + int error; KASSERT((flags & RFPROC) == 0, ("fork_norfproc called with RFPROC set")); @@ -319,19 +319,9 @@ * must ensure that other threads do not concurrently create a second * process sharing the vmspace, see vmspace_unshare(). */ -again: if ((p1->p_flag & (P_HADTHREADS | P_SYSTEM)) == P_HADTHREADS && ((flags & (RFCFDG | RFFDG)) != 0 || (flags & RFMEM) == 0)) { PROC_LOCK(p1); - while (p1->p_singlethr > 0) { - error = msleep(&p1->p_singlethr, &p1->p_mtx, - PWAIT | PCATCH, "rfork1t", 0); - if (error != 0) { - PROC_UNLOCK(p1); - return (ERESTART); - } - goto again; - } if (thread_single(p1, SINGLE_BOUNDARY)) { PROC_UNLOCK(p1); return (ERESTART); @@ -340,15 +330,16 @@ } error = vm_forkproc(td, NULL, NULL, NULL, flags); - if (error) + if (error != 0) goto fail; /* * Close all file descriptors. */ - if (flags & RFCFDG) { + if ((flags & RFCFDG) != 0) { struct filedesc *fdtmp; struct pwddesc *pdtmp; + pdtmp = pdinit(td->td_proc->p_pd, false); fdtmp = fdinit(); pdescfree(td); @@ -360,7 +351,7 @@ /* * Unshare file descriptors (from parent). */ - if (flags & RFFDG) { + if ((flags & RFFDG) != 0) { fdunshare(td); pdunshare(td); } diff --git a/sys/kern/kern_procctl.c b/sys/kern/kern_procctl.c --- a/sys/kern/kern_procctl.c +++ b/sys/kern/kern_procctl.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -243,32 +244,29 @@ return (error); } -static void -reap_kill_proc_relock(struct proc *p, int xlocked) -{ - PROC_UNLOCK(p); - if (xlocked) - sx_xlock(&proctree_lock); - else - sx_slock(&proctree_lock); - PROC_LOCK(p); -} +struct reap_kill_proc_work { + struct ucred *cr; + struct proc *p2; + ksiginfo_t *ksi; + struct procctl_reaper_kill *rk; + int *error; + struct task t; +}; static void -reap_kill_proc_locked(struct thread *td, struct proc *p2, - ksiginfo_t *ksi, struct procctl_reaper_kill *rk, int *error) +reap_kill_proc_locked(struct reap_kill_proc_work *w) { - int error1, r, xlocked; + int error1; bool need_stop; - PROC_LOCK_ASSERT(p2, MA_OWNED); - PROC_ASSERT_HELD(p2); + PROC_LOCK_ASSERT(w->p2, MA_OWNED); + PROC_ASSERT_HELD(w->p2); - error1 = p_cansignal(td, p2, rk->rk_sig); + error1 = cr_cansignal(w->cr, w->p2, w->rk->rk_sig); if (error1 != 0) { - if (*error == ESRCH) { - rk->rk_fpid = p2->p_pid; - *error = error1; + if (*w->error == ESRCH) { + w->rk->rk_fpid = w->p2->p_pid; + *w->error = error1; } return; } @@ -281,48 +279,39 @@ * into all structures during fork, ignored by iterator, and * then escaping signalling. * - * If need_stop is true, then reap_kill_proc() returns true if - * the process was successfully stopped and signalled, and - * false if stopping failed and the signal was not sent. - * * The thread cannot usefully stop itself anyway, and if other * thread of the current process forks while the current * thread signals the whole subtree, it is an application * race. */ - need_stop = p2 != td->td_proc && - (td->td_proc->p_flag2 & P2_WEXIT) == 0 && - (p2->p_flag & (P_KPROC | P_SYSTEM | P_STOPPED)) == 0 && - (rk->rk_flags & REAPER_KILL_CHILDREN) == 0; - - if (need_stop) { - xlocked = sx_xlocked(&proctree_lock); - sx_unlock(&proctree_lock); - r = thread_single(p2, SINGLE_ALLPROC); - reap_kill_proc_relock(p2, xlocked); - if (r != 0) - need_stop = false; - } + if ((w->p2->p_flag & (P_KPROC | P_SYSTEM | P_STOPPED)) == 0) + need_stop = thread_single(w->p2, SINGLE_ALLPROC) == 0; + else + need_stop = false; - pksignal(p2, rk->rk_sig, ksi); - rk->rk_killed++; - *error = error1; + pksignal(w->p2, w->rk->rk_sig, w->ksi); + w->rk->rk_killed++; + *w->error = error1; if (need_stop) - thread_single_end(p2, SINGLE_ALLPROC); + thread_single_end(w->p2, SINGLE_ALLPROC); } static void -reap_kill_proc(struct thread *td, struct proc *p2, ksiginfo_t *ksi, - struct procctl_reaper_kill *rk, int *error) +reap_kill_proc_work(void *arg, int pending __unused) { - PROC_LOCK(p2); - if ((p2->p_flag2 & P2_WEXIT) == 0) { - _PHOLD_LITE(p2); - reap_kill_proc_locked(td, p2, ksi, rk, error); - _PRELE(p2); - } - PROC_UNLOCK(p2); + struct reap_kill_proc_work *w; + + w = arg; + PROC_LOCK(w->p2); + if ((w->p2->p_flag2 & P2_WEXIT) == 0) + reap_kill_proc_locked(w); + PROC_UNLOCK(w->p2); + + sx_xlock(&proctree_lock); + w->p2 = NULL; + wakeup(&w->p2); + sx_xunlock(&proctree_lock); } struct reap_kill_tracker { @@ -361,25 +350,40 @@ struct procctl_reaper_kill *rk, ksiginfo_t *ksi, int *error) { struct proc *p2; + int error1; LIST_FOREACH(p2, &reaper->p_children, p_sibling) { - (void)reap_kill_proc(td, p2, ksi, rk, error); - /* - * Do not end the loop on error, signal everything we - * can. - */ + PROC_LOCK(p2); + if ((p2->p_flag2 & P2_WEXIT) == 0) { + error1 = p_cansignal(td, p2, rk->rk_sig); + if (error1 != 0) { + if (*error == ESRCH) { + rk->rk_fpid = p2->p_pid; + *error = error1; + } + + /* + * Do not end the loop on error, + * signal everything we can. + */ + } else { + pksignal(p2, rk->rk_sig, ksi); + rk->rk_killed++; + } + } + PROC_UNLOCK(p2); } } static bool reap_kill_subtree_once(struct thread *td, struct proc *p, struct proc *reaper, - struct procctl_reaper_kill *rk, ksiginfo_t *ksi, int *error, - struct unrhdr *pids) + struct unrhdr *pids, struct reap_kill_proc_work *w) { struct reap_kill_tracker_head tracker; struct reap_kill_tracker *t; struct proc *p2; - bool res; + int r, xlocked; + bool res, st; res = false; TAILQ_INIT(&tracker); @@ -401,14 +405,54 @@ LIST_FOREACH(p2, &t->parent->p_reaplist, p_reapsibling) { if (t->parent == reaper && - (rk->rk_flags & REAPER_KILL_SUBTREE) != 0 && - p2->p_reapsubtree != rk->rk_subtree) + (w->rk->rk_flags & REAPER_KILL_SUBTREE) != 0 && + p2->p_reapsubtree != w->rk->rk_subtree) continue; if ((p2->p_treeflag & P_TREE_REAPER) != 0) reap_kill_sched(&tracker, p2); if (alloc_unr_specific(pids, p2->p_pid) != p2->p_pid) continue; - reap_kill_proc(td, p2, ksi, rk, error); + if (p2 == td->td_proc) { + if ((p2->p_flag & P_HADTHREADS) != 0 && + (p2->p_flag2 & P2_WEXIT) == 0) { + xlocked = sx_xlocked(&proctree_lock); + sx_unlock(&proctree_lock); + st = true; + } else { + st = false; + } + PROC_LOCK(p2); + if (st) + r = thread_single(p2, SINGLE_NO_EXIT); + pksignal(p2, w->rk->rk_sig, w->ksi); + w->rk->rk_killed++; + if (st && r == 0) + thread_single_end(p2, SINGLE_NO_EXIT); + PROC_UNLOCK(p2); + if (st) { + if (xlocked) + sx_xlock(&proctree_lock); + else + sx_slock(&proctree_lock); + } + } else { + PROC_LOCK(p2); + if ((p2->p_flag2 & P2_WEXIT) == 0) { + _PHOLD_LITE(p2); + PROC_UNLOCK(p2); + w->p2 = p2; + w->t.ta_context = w; + taskqueue_enqueue(taskqueue_thread, + &w->t); + while (w->p2 != NULL) { + sx_sleep(&w->p2, &proctree_lock, + PWAIT, "reapst", 0); + } + PROC_LOCK(p2); + _PRELE(p2); + } + PROC_UNLOCK(p2); + } res = true; } reap_kill_sched_free(t); @@ -418,7 +462,7 @@ static void reap_kill_subtree(struct thread *td, struct proc *p, struct proc *reaper, - struct procctl_reaper_kill *rk, ksiginfo_t *ksi, int *error) + struct reap_kill_proc_work *w) { struct unrhdr pids; @@ -433,15 +477,9 @@ PROC_UNLOCK(td->td_proc); goto out; } - td->td_proc->p_singlethr++; PROC_UNLOCK(td->td_proc); - while (reap_kill_subtree_once(td, p, reaper, rk, ksi, error, &pids)) + while (reap_kill_subtree_once(td, p, reaper, &pids, w)) ; - PROC_LOCK(td->td_proc); - td->td_proc->p_singlethr--; - if (td->td_proc->p_singlethr == 0) - wakeup(&p->p_singlethr); - PROC_UNLOCK(td->td_proc); out: clean_unrhdr(&pids); clear_unrhdr(&pids); @@ -459,6 +497,7 @@ static int reap_kill(struct thread *td, struct proc *p, void *data) { + struct reap_kill_proc_work *w; struct proc *reaper; ksiginfo_t ksi; struct procctl_reaper_kill *rk; @@ -487,7 +526,15 @@ if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) { reap_kill_children(td, reaper, rk, &ksi, &error); } else { - reap_kill_subtree(td, p, reaper, rk, &ksi, &error); + w = malloc(sizeof(*w), M_TEMP, M_WAITOK); + w->cr = crhold(td->td_ucred); + w->ksi = &ksi; + w->rk = rk; + w->error = &error; + TASK_INIT(&w->t, 0, reap_kill_proc_work, NULL); + reap_kill_subtree(td, p, reaper, w); + crfree(w->cr); + free(w, M_TEMP); } PROC_LOCK(p); return (error); diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -99,7 +99,7 @@ "struct proc KBI p_pid"); _Static_assert(offsetof(struct proc, p_filemon) == 0x3c8, "struct proc KBI p_filemon"); -_Static_assert(offsetof(struct proc, p_comm) == 0x3e4, +_Static_assert(offsetof(struct proc, p_comm) == 0x3e0, "struct proc KBI p_comm"); _Static_assert(offsetof(struct proc, p_emuldata) == 0x4c8, "struct proc KBI p_emuldata"); @@ -119,9 +119,9 @@ "struct proc KBI p_pid"); _Static_assert(offsetof(struct proc, p_filemon) == 0x270, "struct proc KBI p_filemon"); -_Static_assert(offsetof(struct proc, p_comm) == 0x288, +_Static_assert(offsetof(struct proc, p_comm) == 0x284, "struct proc KBI p_comm"); -_Static_assert(offsetof(struct proc, p_emuldata) == 0x314, +_Static_assert(offsetof(struct proc, p_emuldata) == 0x310, "struct proc KBI p_emuldata"); #endif @@ -1243,12 +1243,8 @@ else p->p_flag &= ~P_SINGLE_BOUNDARY; } - if (mode == SINGLE_ALLPROC) { + if (mode == SINGLE_ALLPROC) p->p_flag |= P_TOTAL_STOP; - thread_lock(td); - td->td_flags |= TDF_DOING_SA; - thread_unlock(td); - } p->p_flag |= P_STOPPED_SINGLE; PROC_SLOCK(p); p->p_singlethread = td; @@ -1335,11 +1331,6 @@ } } PROC_SUNLOCK(p); - if (mode == SINGLE_ALLPROC) { - thread_lock(td); - td->td_flags &= ~TDF_DOING_SA; - thread_unlock(td); - } return (0); } @@ -1626,11 +1617,10 @@ if (!P_SHOULDSTOP(p)) { FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); - if (TD_IS_SUSPENDED(td) && (td->td_flags & - TDF_DOING_SA) == 0) { + if (TD_IS_SUSPENDED(td)) wakeup_swapper |= thread_unsuspend_one(td, p, true); - } else + else thread_unlock(td); } } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c --- a/sys/kern/subr_sleepqueue.c +++ b/sys/kern/subr_sleepqueue.c @@ -388,8 +388,9 @@ } /* - * Sets a timeout that will remove the current thread from the specified - * sleep queue after timo ticks if the thread has not already been awakened. + * Sets a timeout that will remove the current thread from the + * specified sleep queue at the specified time if the thread has not + * already been awakened. Flags are from C_* (callout) namespace. */ void sleepq_set_timeout_sbt(const void *wchan, sbintime_t sbt, sbintime_t pr, @@ -482,7 +483,7 @@ * Lock the per-process spinlock prior to dropping the * PROC_LOCK to avoid a signal delivery race. * PROC_LOCK, PROC_SLOCK, and thread_lock() are - * currently held in tdsendsignal(). + * currently held in tdsendsignal() and thread_single(). */ PROC_SLOCK(p); mtx_lock_spin(&sc->sc_lock); diff --git a/sys/sys/proc.h b/sys/sys/proc.h --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -470,15 +470,15 @@ #define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */ #define TDF_SEINTR 0x00200000 /* EINTR on stop attempts. */ #define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */ -#define TDF_DOING_SA 0x00800000 /* Doing SINGLE_ALLPROC, do not unsuspend me */ +#define TDF_UNUSED6 0x00800000 /* Available */ #define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */ #define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */ #define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */ #define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */ -#define TDF_UNUSED6 0x10000000 /* Available */ -#define TDF_UNUSED7 0x20000000 /* Available */ -#define TDF_UNUSED8 0x40000000 /* Available */ -#define TDF_UNUSED9 0x80000000 /* Available */ +#define TDF_UNUSED7 0x10000000 /* Available */ +#define TDF_UNUSED8 0x20000000 /* Available */ +#define TDF_UNUSED9 0x40000000 /* Available */ +#define TDF_UNUSED10 0x80000000 /* Available */ enum { TDA_AST = 0, /* Special: call all non-flagged AST handlers */ @@ -720,8 +720,6 @@ int p_pendingexits; /* (c) Count of pending thread exits. */ struct filemon *p_filemon; /* (c) filemon-specific data. */ int p_pdeathsig; /* (c) Signal from parent on exit. */ - int p_singlethr; /* (c) Count of threads doing - external thread_single() */ /* End area that is zeroed on creation. */ #define p_endzero p_magic