Page MenuHomeFreeBSD

D36207.id109473.diff
No OneTemporary

D36207.id109473.diff

diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -307,17 +307,8 @@
p = td->td_proc;
if ((p->p_flag & P_HADTHREADS) != 0) {
PROC_LOCK(p);
- while (p->p_singlethr > 0) {
- error = msleep(&p->p_singlethr, &p->p_mtx,
- PWAIT | PCATCH, "exec1t", 0);
- if (error != 0) {
- error = ERESTART;
- goto unlock;
- }
- }
if (thread_single(p, SINGLE_BOUNDARY) != 0)
error = ERESTART;
-unlock:
PROC_UNLOCK(p);
}
KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0,
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -218,8 +218,6 @@
{
PROC_LOCK_ASSERT(p, MA_OWNED);
p->p_flag2 |= P2_WEXIT;
- while (p->p_singlethr > 0)
- msleep(&p->p_singlethr, &p->p_mtx, PWAIT | PCATCH, "exit1t", 0);
}
/*
@@ -276,16 +274,15 @@
* Kill off the other threads. This requires
* some co-operation from other parts of the kernel
* so it may not be instantaneous. With this state set
- * any thread entering the kernel from userspace will
- * thread_exit() in trap(). Any thread attempting to
+ * any thread attempting to interruptibly
* sleep will return immediately with EINTR or EWOULDBLOCK
* which will hopefully force them to back out to userland
* freeing resources as they go. Any thread attempting
- * to return to userland will thread_exit() from userret().
+ * to return to userland will thread_exit() from ast().
* thread_exit() will unsuspend us when the last of the
* other threads exits.
* If there is already a thread singler after resumption,
- * calling thread_single will fail; in that case, we just
+ * calling thread_single() will fail; in that case, we just
* re-check all suspension request, the thread should
* either be suspended there or exit.
*/
@@ -499,7 +496,7 @@
wakeup(q->p_reaper);
for (; q != NULL; q = nq) {
nq = LIST_NEXT(q, p_sibling);
- ksi = ksiginfo_alloc(TRUE);
+ ksi = ksiginfo_alloc(M_WAITOK);
PROC_LOCK(q);
q->p_sigparent = SIGCHLD;
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -307,8 +307,8 @@
static int
fork_norfproc(struct thread *td, int flags)
{
- int error;
struct proc *p1;
+ int error;
KASSERT((flags & RFPROC) == 0,
("fork_norfproc called with RFPROC set"));
@@ -319,19 +319,9 @@
* must ensure that other threads do not concurrently create a second
* process sharing the vmspace, see vmspace_unshare().
*/
-again:
if ((p1->p_flag & (P_HADTHREADS | P_SYSTEM)) == P_HADTHREADS &&
((flags & (RFCFDG | RFFDG)) != 0 || (flags & RFMEM) == 0)) {
PROC_LOCK(p1);
- while (p1->p_singlethr > 0) {
- error = msleep(&p1->p_singlethr, &p1->p_mtx,
- PWAIT | PCATCH, "rfork1t", 0);
- if (error != 0) {
- PROC_UNLOCK(p1);
- return (ERESTART);
- }
- goto again;
- }
if (thread_single(p1, SINGLE_BOUNDARY)) {
PROC_UNLOCK(p1);
return (ERESTART);
@@ -340,15 +330,16 @@
}
error = vm_forkproc(td, NULL, NULL, NULL, flags);
- if (error)
+ if (error != 0)
goto fail;
/*
* Close all file descriptors.
*/
- if (flags & RFCFDG) {
+ if ((flags & RFCFDG) != 0) {
struct filedesc *fdtmp;
struct pwddesc *pdtmp;
+
pdtmp = pdinit(td->td_proc->p_pd, false);
fdtmp = fdinit();
pdescfree(td);
@@ -360,7 +351,7 @@
/*
* Unshare file descriptors (from parent).
*/
- if (flags & RFFDG) {
+ if ((flags & RFFDG) != 0) {
fdunshare(td);
pdunshare(td);
}
diff --git a/sys/kern/kern_procctl.c b/sys/kern/kern_procctl.c
--- a/sys/kern/kern_procctl.c
+++ b/sys/kern/kern_procctl.c
@@ -43,6 +43,7 @@
#include <sys/sx.h>
#include <sys/syscallsubr.h>
#include <sys/sysproto.h>
+#include <sys/taskqueue.h>
#include <sys/wait.h>
#include <vm/vm.h>
@@ -243,32 +244,29 @@
return (error);
}
-static void
-reap_kill_proc_relock(struct proc *p, int xlocked)
-{
- PROC_UNLOCK(p);
- if (xlocked)
- sx_xlock(&proctree_lock);
- else
- sx_slock(&proctree_lock);
- PROC_LOCK(p);
-}
+struct reap_kill_proc_work {
+ struct ucred *cr;
+ struct proc *target;
+ ksiginfo_t *ksi;
+ struct procctl_reaper_kill *rk;
+ int *error;
+ struct task t;
+};
static void
-reap_kill_proc_locked(struct thread *td, struct proc *p2,
- ksiginfo_t *ksi, struct procctl_reaper_kill *rk, int *error)
+reap_kill_proc_locked(struct reap_kill_proc_work *w)
{
- int error1, r, xlocked;
+ int error1;
bool need_stop;
- PROC_LOCK_ASSERT(p2, MA_OWNED);
- PROC_ASSERT_HELD(p2);
+ PROC_LOCK_ASSERT(w->target, MA_OWNED);
+ PROC_ASSERT_HELD(w->target);
- error1 = p_cansignal(td, p2, rk->rk_sig);
+ error1 = cr_cansignal(w->cr, w->target, w->rk->rk_sig);
if (error1 != 0) {
- if (*error == ESRCH) {
- rk->rk_fpid = p2->p_pid;
- *error = error1;
+ if (*w->error == ESRCH) {
+ w->rk->rk_fpid = w->target->p_pid;
+ *w->error = error1;
}
return;
}
@@ -281,48 +279,39 @@
* into all structures during fork, ignored by iterator, and
* then escaping signalling.
*
- * If need_stop is true, then reap_kill_proc() returns true if
- * the process was successfully stopped and signalled, and
- * false if stopping failed and the signal was not sent.
- *
* The thread cannot usefully stop itself anyway, and if other
* thread of the current process forks while the current
* thread signals the whole subtree, it is an application
* race.
*/
- need_stop = p2 != td->td_proc &&
- (td->td_proc->p_flag2 & P2_WEXIT) == 0 &&
- (p2->p_flag & (P_KPROC | P_SYSTEM | P_STOPPED)) == 0 &&
- (rk->rk_flags & REAPER_KILL_CHILDREN) == 0;
-
- if (need_stop) {
- xlocked = sx_xlocked(&proctree_lock);
- sx_unlock(&proctree_lock);
- r = thread_single(p2, SINGLE_ALLPROC);
- reap_kill_proc_relock(p2, xlocked);
- if (r != 0)
- need_stop = false;
- }
+ if ((w->target->p_flag & (P_KPROC | P_SYSTEM | P_STOPPED)) == 0)
+ need_stop = thread_single(w->target, SINGLE_ALLPROC) == 0;
+ else
+ need_stop = false;
- pksignal(p2, rk->rk_sig, ksi);
- rk->rk_killed++;
- *error = error1;
+ (void)pksignal(w->target, w->rk->rk_sig, w->ksi);
+ w->rk->rk_killed++;
+ *w->error = error1;
if (need_stop)
- thread_single_end(p2, SINGLE_ALLPROC);
+ thread_single_end(w->target, SINGLE_ALLPROC);
}
static void
-reap_kill_proc(struct thread *td, struct proc *p2, ksiginfo_t *ksi,
- struct procctl_reaper_kill *rk, int *error)
+reap_kill_proc_work(void *arg, int pending __unused)
{
- PROC_LOCK(p2);
- if ((p2->p_flag2 & P2_WEXIT) == 0) {
- _PHOLD_LITE(p2);
- reap_kill_proc_locked(td, p2, ksi, rk, error);
- _PRELE(p2);
- }
- PROC_UNLOCK(p2);
+ struct reap_kill_proc_work *w;
+
+ w = arg;
+ PROC_LOCK(w->target);
+ if ((w->target->p_flag2 & P2_WEXIT) == 0)
+ reap_kill_proc_locked(w);
+ PROC_UNLOCK(w->target);
+
+ sx_xlock(&proctree_lock);
+ w->target = NULL;
+ wakeup(&w->target);
+ sx_xunlock(&proctree_lock);
}
struct reap_kill_tracker {
@@ -361,25 +350,40 @@
struct procctl_reaper_kill *rk, ksiginfo_t *ksi, int *error)
{
struct proc *p2;
+ int error1;
LIST_FOREACH(p2, &reaper->p_children, p_sibling) {
- (void)reap_kill_proc(td, p2, ksi, rk, error);
- /*
- * Do not end the loop on error, signal everything we
- * can.
- */
+ PROC_LOCK(p2);
+ if ((p2->p_flag2 & P2_WEXIT) == 0) {
+ error1 = p_cansignal(td, p2, rk->rk_sig);
+ if (error1 != 0) {
+ if (*error == ESRCH) {
+ rk->rk_fpid = p2->p_pid;
+ *error = error1;
+ }
+
+ /*
+ * Do not end the loop on error,
+ * signal everything we can.
+ */
+ } else {
+ (void)pksignal(p2, rk->rk_sig, ksi);
+ rk->rk_killed++;
+ }
+ }
+ PROC_UNLOCK(p2);
}
}
static bool
reap_kill_subtree_once(struct thread *td, struct proc *p, struct proc *reaper,
- struct procctl_reaper_kill *rk, ksiginfo_t *ksi, int *error,
- struct unrhdr *pids)
+ struct unrhdr *pids, struct reap_kill_proc_work *w)
{
struct reap_kill_tracker_head tracker;
struct reap_kill_tracker *t;
struct proc *p2;
- bool res;
+ int r, xlocked;
+ bool res, st;
res = false;
TAILQ_INIT(&tracker);
@@ -401,14 +405,55 @@
LIST_FOREACH(p2, &t->parent->p_reaplist, p_reapsibling) {
if (t->parent == reaper &&
- (rk->rk_flags & REAPER_KILL_SUBTREE) != 0 &&
- p2->p_reapsubtree != rk->rk_subtree)
+ (w->rk->rk_flags & REAPER_KILL_SUBTREE) != 0 &&
+ p2->p_reapsubtree != w->rk->rk_subtree)
continue;
if ((p2->p_treeflag & P_TREE_REAPER) != 0)
reap_kill_sched(&tracker, p2);
if (alloc_unr_specific(pids, p2->p_pid) != p2->p_pid)
continue;
- reap_kill_proc(td, p2, ksi, rk, error);
+ if (p2 == td->td_proc) {
+ if ((p2->p_flag & P_HADTHREADS) != 0 &&
+ (p2->p_flag2 & P2_WEXIT) == 0) {
+ xlocked = sx_xlocked(&proctree_lock);
+ sx_unlock(&proctree_lock);
+ st = true;
+ } else {
+ st = false;
+ }
+ PROC_LOCK(p2);
+ if (st)
+ r = thread_single(p2, SINGLE_NO_EXIT);
+ (void)pksignal(p2, w->rk->rk_sig, w->ksi);
+ w->rk->rk_killed++;
+ if (st && r == 0)
+ thread_single_end(p2, SINGLE_NO_EXIT);
+ PROC_UNLOCK(p2);
+ if (st) {
+ if (xlocked)
+ sx_xlock(&proctree_lock);
+ else
+ sx_slock(&proctree_lock);
+ }
+ } else {
+ PROC_LOCK(p2);
+ if ((p2->p_flag2 & P2_WEXIT) == 0) {
+ _PHOLD_LITE(p2);
+ PROC_UNLOCK(p2);
+ w->target = p2;
+ w->t.ta_context = w;
+ taskqueue_enqueue(taskqueue_thread,
+ &w->t);
+ while (w->target != NULL) {
+ sx_sleep(&w->target,
+ &proctree_lock, PWAIT,
+ "reapst", 0);
+ }
+ PROC_LOCK(p2);
+ _PRELE(p2);
+ }
+ PROC_UNLOCK(p2);
+ }
res = true;
}
reap_kill_sched_free(t);
@@ -418,7 +463,7 @@
static void
reap_kill_subtree(struct thread *td, struct proc *p, struct proc *reaper,
- struct procctl_reaper_kill *rk, ksiginfo_t *ksi, int *error)
+ struct reap_kill_proc_work *w)
{
struct unrhdr pids;
@@ -433,15 +478,9 @@
PROC_UNLOCK(td->td_proc);
goto out;
}
- td->td_proc->p_singlethr++;
PROC_UNLOCK(td->td_proc);
- while (reap_kill_subtree_once(td, p, reaper, rk, ksi, error, &pids))
+ while (reap_kill_subtree_once(td, p, reaper, &pids, w))
;
- PROC_LOCK(td->td_proc);
- td->td_proc->p_singlethr--;
- if (td->td_proc->p_singlethr == 0)
- wakeup(&p->p_singlethr);
- PROC_UNLOCK(td->td_proc);
out:
clean_unrhdr(&pids);
clear_unrhdr(&pids);
@@ -459,8 +498,9 @@
static int
reap_kill(struct thread *td, struct proc *p, void *data)
{
+ struct reap_kill_proc_work *w;
struct proc *reaper;
- ksiginfo_t ksi;
+ ksiginfo_t *ksi;
struct procctl_reaper_kill *rk;
int error;
@@ -476,19 +516,29 @@
return (EINVAL);
PROC_UNLOCK(p);
reaper = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
- ksiginfo_init(&ksi);
- ksi.ksi_signo = rk->rk_sig;
- ksi.ksi_code = SI_USER;
- ksi.ksi_pid = td->td_proc->p_pid;
- ksi.ksi_uid = td->td_ucred->cr_ruid;
+ ksi = ksiginfo_alloc(M_WAITOK);
+ ksiginfo_init(ksi);
+ ksi->ksi_signo = rk->rk_sig;
+ ksi->ksi_code = SI_USER;
+ ksi->ksi_pid = td->td_proc->p_pid;
+ ksi->ksi_uid = td->td_ucred->cr_ruid;
error = ESRCH;
rk->rk_killed = 0;
rk->rk_fpid = -1;
if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) {
- reap_kill_children(td, reaper, rk, &ksi, &error);
+ reap_kill_children(td, reaper, rk, ksi, &error);
} else {
- reap_kill_subtree(td, p, reaper, rk, &ksi, &error);
+ w = malloc(sizeof(*w), M_TEMP, M_WAITOK);
+ w->cr = crhold(td->td_ucred);
+ w->ksi = ksi;
+ w->rk = rk;
+ w->error = &error;
+ TASK_INIT(&w->t, 0, reap_kill_proc_work, NULL);
+ reap_kill_subtree(td, p, reaper, w);
+ crfree(w->cr);
+ free(w, M_TEMP);
}
+ ksiginfo_free(ksi);
PROC_LOCK(p);
return (error);
}
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -365,14 +365,13 @@
}
ksiginfo_t *
-ksiginfo_alloc(int wait)
+ksiginfo_alloc(int mwait)
{
- int flags;
+ MPASS(mwait == M_WAITOK || mwait == M_NOWAIT);
- flags = M_ZERO | (wait ? M_WAITOK : M_NOWAIT);
- if (ksiginfo_zone != NULL)
- return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags));
- return (NULL);
+ if (ksiginfo_zone == NULL)
+ return (NULL);
+ return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, mwait | M_ZERO));
}
void
@@ -381,14 +380,14 @@
uma_zfree(ksiginfo_zone, ksi);
}
-static __inline int
+static __inline bool
ksiginfo_tryfree(ksiginfo_t *ksi)
{
- if (!(ksi->ksi_flags & KSI_EXT)) {
+ if ((ksi->ksi_flags & KSI_EXT) == 0) {
uma_zfree(ksiginfo_zone, ksi);
- return (1);
+ return (true);
}
- return (0);
+ return (false);
}
void
@@ -513,7 +512,7 @@
if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
signal_overflow++;
ret = EAGAIN;
- } else if ((ksi = ksiginfo_alloc(0)) == NULL) {
+ } else if ((ksi = ksiginfo_alloc(M_NOWAIT)) == NULL) {
signal_alloc_fail++;
ret = EAGAIN;
} else {
@@ -2265,7 +2264,7 @@
* IEEE Std 1003.1-2001: return success when killing a zombie.
*/
if (p->p_state == PRS_ZOMBIE) {
- if (ksi && (ksi->ksi_flags & KSI_INS))
+ if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
ksiginfo_tryfree(ksi);
return (ret);
}
@@ -2295,7 +2294,7 @@
SDT_PROBE3(proc, , , signal__discard, td, p, sig);
mtx_unlock(&ps->ps_mtx);
- if (ksi && (ksi->ksi_flags & KSI_INS))
+ if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
ksiginfo_tryfree(ksi);
return (ret);
} else {
@@ -2328,7 +2327,7 @@
if ((prop & SIGPROP_TTYSTOP) != 0 &&
(p->p_pgrp->pg_flags & PGRP_ORPHANED) != 0 &&
action == SIG_DFL) {
- if (ksi && (ksi->ksi_flags & KSI_INS))
+ if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
ksiginfo_tryfree(ksi);
return (ret);
}
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -99,7 +99,7 @@
"struct proc KBI p_pid");
_Static_assert(offsetof(struct proc, p_filemon) == 0x3c8,
"struct proc KBI p_filemon");
-_Static_assert(offsetof(struct proc, p_comm) == 0x3e4,
+_Static_assert(offsetof(struct proc, p_comm) == 0x3e0,
"struct proc KBI p_comm");
_Static_assert(offsetof(struct proc, p_emuldata) == 0x4c8,
"struct proc KBI p_emuldata");
@@ -119,9 +119,9 @@
"struct proc KBI p_pid");
_Static_assert(offsetof(struct proc, p_filemon) == 0x270,
"struct proc KBI p_filemon");
-_Static_assert(offsetof(struct proc, p_comm) == 0x288,
+_Static_assert(offsetof(struct proc, p_comm) == 0x284,
"struct proc KBI p_comm");
-_Static_assert(offsetof(struct proc, p_emuldata) == 0x314,
+_Static_assert(offsetof(struct proc, p_emuldata) == 0x310,
"struct proc KBI p_emuldata");
#endif
@@ -468,7 +468,7 @@
{
sigqueue_init(&p->p_sigqueue, p);
- p->p_ksi = ksiginfo_alloc(1);
+ p->p_ksi = ksiginfo_alloc(M_WAITOK);
if (p->p_ksi != NULL) {
/* XXX p_ksi may be null if ksiginfo zone is not ready */
p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
@@ -1243,12 +1243,8 @@
else
p->p_flag &= ~P_SINGLE_BOUNDARY;
}
- if (mode == SINGLE_ALLPROC) {
+ if (mode == SINGLE_ALLPROC)
p->p_flag |= P_TOTAL_STOP;
- thread_lock(td);
- td->td_flags |= TDF_DOING_SA;
- thread_unlock(td);
- }
p->p_flag |= P_STOPPED_SINGLE;
PROC_SLOCK(p);
p->p_singlethread = td;
@@ -1335,11 +1331,6 @@
}
}
PROC_SUNLOCK(p);
- if (mode == SINGLE_ALLPROC) {
- thread_lock(td);
- td->td_flags &= ~TDF_DOING_SA;
- thread_unlock(td);
- }
return (0);
}
@@ -1626,11 +1617,10 @@
if (!P_SHOULDSTOP(p)) {
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
- if (TD_IS_SUSPENDED(td) && (td->td_flags &
- TDF_DOING_SA) == 0) {
+ if (TD_IS_SUSPENDED(td))
wakeup_swapper |= thread_unsuspend_one(td, p,
true);
- } else
+ else
thread_unlock(td);
}
} else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c
--- a/sys/kern/subr_sleepqueue.c
+++ b/sys/kern/subr_sleepqueue.c
@@ -388,8 +388,9 @@
}
/*
- * Sets a timeout that will remove the current thread from the specified
- * sleep queue after timo ticks if the thread has not already been awakened.
+ * Sets a timeout that will remove the current thread from the
+ * specified sleep queue at the specified time if the thread has not
+ * already been awakened. Flags are from C_* (callout) namespace.
*/
void
sleepq_set_timeout_sbt(const void *wchan, sbintime_t sbt, sbintime_t pr,
@@ -482,7 +483,7 @@
* Lock the per-process spinlock prior to dropping the
* PROC_LOCK to avoid a signal delivery race.
* PROC_LOCK, PROC_SLOCK, and thread_lock() are
- * currently held in tdsendsignal().
+ * currently held in tdsendsignal() and thread_single().
*/
PROC_SLOCK(p);
mtx_lock_spin(&sc->sc_lock);
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -470,15 +470,15 @@
#define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */
#define TDF_SEINTR 0x00200000 /* EINTR on stop attempts. */
#define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */
-#define TDF_DOING_SA 0x00800000 /* Doing SINGLE_ALLPROC, do not unsuspend me */
+#define TDF_UNUSED6 0x00800000 /* Available */
#define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */
#define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */
#define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */
#define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */
-#define TDF_UNUSED6 0x10000000 /* Available */
-#define TDF_UNUSED7 0x20000000 /* Available */
-#define TDF_UNUSED8 0x40000000 /* Available */
-#define TDF_UNUSED9 0x80000000 /* Available */
+#define TDF_UNUSED7 0x10000000 /* Available */
+#define TDF_UNUSED8 0x20000000 /* Available */
+#define TDF_UNUSED9 0x40000000 /* Available */
+#define TDF_UNUSED10 0x80000000 /* Available */
enum {
TDA_AST = 0, /* Special: call all non-flagged AST handlers */
@@ -720,8 +720,6 @@
int p_pendingexits; /* (c) Count of pending thread exits. */
struct filemon *p_filemon; /* (c) filemon-specific data. */
int p_pdeathsig; /* (c) Signal from parent on exit. */
- int p_singlethr; /* (c) Count of threads doing
- external thread_single() */
/* End area that is zeroed on creation. */
#define p_endzero p_magic
diff --git a/sys/sys/signalvar.h b/sys/sys/signalvar.h
--- a/sys/sys/signalvar.h
+++ b/sys/sys/signalvar.h
@@ -384,7 +384,7 @@
void execsigs(struct proc *p);
void gsignal(int pgid, int sig, ksiginfo_t *ksi);
void killproc(struct proc *p, const char *why);
-ksiginfo_t * ksiginfo_alloc(int wait);
+ksiginfo_t *ksiginfo_alloc(int mwait);
void ksiginfo_free(ksiginfo_t *ksi);
int pksignal(struct proc *p, int sig, ksiginfo_t *ksi);
void pgsigio(struct sigio **sigiop, int sig, int checkctty);

File Metadata

Mime Type
text/plain
Expires
Fri, Oct 24, 3:31 AM (5 h, 27 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
24116538
Default Alt Text
D36207.id109473.diff (18 KB)

Event Timeline