Changeset View
Standalone View
sys/kern/kern_sig.c
Show First 20 Lines • Show All 108 Lines • ▼ Show 20 Lines | static int killpg1(struct thread *td, int sig, int pgid, int all, | ||||
ksiginfo_t *ksi); | ksiginfo_t *ksi); | ||||
static int issignal(struct thread *td); | static int issignal(struct thread *td); | ||||
static int sigprop(int sig); | static int sigprop(int sig); | ||||
static void tdsigwakeup(struct thread *, int, sig_t, int); | static void tdsigwakeup(struct thread *, int, sig_t, int); | ||||
static int sig_suspend_threads(struct thread *, struct proc *, int); | static int sig_suspend_threads(struct thread *, struct proc *, int); | ||||
static int filt_sigattach(struct knote *kn); | static int filt_sigattach(struct knote *kn); | ||||
static void filt_sigdetach(struct knote *kn); | static void filt_sigdetach(struct knote *kn); | ||||
static int filt_signal(struct knote *kn, long hint); | static int filt_signal(struct knote *kn, long hint); | ||||
static struct thread *sigtd(struct proc *p, int sig, int prop); | static struct thread *sigtd(struct proc *p, int sig, bool fast_sigblock); | ||||
static void sigqueue_start(void); | static void sigqueue_start(void); | ||||
static uma_zone_t ksiginfo_zone = NULL; | static uma_zone_t ksiginfo_zone = NULL; | ||||
struct filterops sig_filtops = { | struct filterops sig_filtops = { | ||||
.f_isfd = 0, | .f_isfd = 0, | ||||
.f_attach = filt_sigattach, | .f_attach = filt_sigattach, | ||||
.f_detach = filt_sigdetach, | .f_detach = filt_sigdetach, | ||||
.f_event = filt_signal, | .f_event = filt_signal, | ||||
▲ Show 20 Lines • Show All 107 Lines • ▼ Show 20 Lines | static int sigproptbl[NSIG] = { | ||||
[SIGVTALRM] = SIGPROP_KILL, | [SIGVTALRM] = SIGPROP_KILL, | ||||
[SIGPROF] = SIGPROP_KILL, | [SIGPROF] = SIGPROP_KILL, | ||||
[SIGWINCH] = SIGPROP_IGNORE, | [SIGWINCH] = SIGPROP_IGNORE, | ||||
[SIGINFO] = SIGPROP_IGNORE, | [SIGINFO] = SIGPROP_IGNORE, | ||||
[SIGUSR1] = SIGPROP_KILL, | [SIGUSR1] = SIGPROP_KILL, | ||||
[SIGUSR2] = SIGPROP_KILL, | [SIGUSR2] = SIGPROP_KILL, | ||||
}; | }; | ||||
static void reschedule_signals(struct proc *p, sigset_t block, int flags); | sigset_t fastblock_mask; | ||||
cem: `__read_mostly`? | |||||
static void | static void | ||||
sigqueue_start(void) | sigqueue_start(void) | ||||
{ | { | ||||
ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t), | ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t), | ||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); | NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); | ||||
uma_prealloc(ksiginfo_zone, preallocate_siginfo); | uma_prealloc(ksiginfo_zone, preallocate_siginfo); | ||||
p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS); | p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS); | ||||
p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1); | p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1); | ||||
p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc); | p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc); | ||||
SIGFILLSET(fastblock_mask); | |||||
SIG_CANTMASK(fastblock_mask); | |||||
} | } | ||||
ksiginfo_t * | ksiginfo_t * | ||||
ksiginfo_alloc(int wait) | ksiginfo_alloc(int wait) | ||||
{ | { | ||||
int flags; | int flags; | ||||
flags = M_ZERO; | flags = M_ZERO; | ||||
Not Done Inline ActionsThis matches what rtld did, but makes it unclear what a signal handler for these signals can safely do. A deadlock or unpredictable crash is likely if such a signal handler is invoked and needs to resolve a lazy symbol while already resolving another lazy symbol. Fortunately, security.bsd.conservative_signals makes this hard to abuse for setuid/setgid programs. jilles: This matches what rtld did, but makes it unclear what a signal handler for these signals can… | |||||
Done Inline ActionsThe list is obviously populated with signals that can be sent synchronously as result of fault. Ideally, we would only block async 'fake' signals sent with kill(2) of the same numbers. Additional aspect is that we reset the signal disposition to default when delivering signal from fault, if the signal is blocked. So if the signals above are blocked, then the process is guaranteed to be killed. I do not think this should be a fix in this review. kib: The list is obviously populated with signals that can be sent synchronously as result of fault. | |||||
Done Inline ActionsI thought that the correct fix for this is to make it possible to only block async signals, while keeping synchronous delivery enabled, i.e. from faults. I even started implementing this with a flag adjustment to the sigprocmask(2) to enable/disable this quirk. But in the process I realized that it is exactly the reverse behavior that is needed, faults should terminate the process if occured in our locked section, both because the handler installed is from application which cannot know internals of rtld, and due to recursion that you noted. So the existing default behavior for blocked faulting signals is desired, and to get it we should stop removing the signals from the mask. The corresponding rtld change will be committed separately. kib: I thought that the correct fix for this is to make it possible to only block async signals… | |||||
if (! wait) | if (! wait) | ||||
flags |= M_NOWAIT; | flags |= M_NOWAIT; | ||||
if (ksiginfo_zone != NULL) | if (ksiginfo_zone != NULL) | ||||
return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags)); | return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags)); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
void | void | ||||
▲ Show 20 Lines • Show All 1,725 Lines • ▼ Show 20 Lines | |||||
* caught immediately, deliver it with correct code. Otherwise, post it | * caught immediately, deliver it with correct code. Otherwise, post it | ||||
* normally. | * normally. | ||||
*/ | */ | ||||
void | void | ||||
trapsignal(struct thread *td, ksiginfo_t *ksi) | trapsignal(struct thread *td, ksiginfo_t *ksi) | ||||
{ | { | ||||
struct sigacts *ps; | struct sigacts *ps; | ||||
struct proc *p; | struct proc *p; | ||||
int sig; | sigset_t sigmask; | ||||
int code; | int code, sig; | ||||
p = td->td_proc; | p = td->td_proc; | ||||
sig = ksi->ksi_signo; | sig = ksi->ksi_signo; | ||||
code = ksi->ksi_code; | code = ksi->ksi_code; | ||||
KASSERT(_SIG_VALID(sig), ("invalid signal")); | KASSERT(_SIG_VALID(sig), ("invalid signal")); | ||||
PROC_LOCK(p); | PROC_LOCK(p); | ||||
ps = p->p_sigacts; | ps = p->p_sigacts; | ||||
mtx_lock(&ps->ps_mtx); | mtx_lock(&ps->ps_mtx); | ||||
sigmask = td->td_sigmask; | |||||
if (td->td_sigblock_val != 0) | |||||
SIGSETOR(sigmask, fastblock_mask); | |||||
if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) && | if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) && | ||||
!SIGISMEMBER(td->td_sigmask, sig)) { | !SIGISMEMBER(sigmask, sig)) { | ||||
#ifdef KTRACE | #ifdef KTRACE | ||||
if (KTRPOINT(curthread, KTR_PSIG)) | if (KTRPOINT(curthread, KTR_PSIG)) | ||||
ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)], | ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)], | ||||
&td->td_sigmask, code); | &td->td_sigmask, code); | ||||
#endif | #endif | ||||
(*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], | (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], | ||||
ksi, &td->td_sigmask); | ksi, &td->td_sigmask); | ||||
postsig_done(sig, td, ps); | postsig_done(sig, td, ps); | ||||
mtx_unlock(&ps->ps_mtx); | mtx_unlock(&ps->ps_mtx); | ||||
} else { | } else { | ||||
/* | /* | ||||
* Avoid a possible infinite loop if the thread | * Avoid a possible infinite loop if the thread | ||||
* masking the signal or process is ignoring the | * masking the signal or process is ignoring the | ||||
* signal. | * signal. | ||||
*/ | */ | ||||
if (kern_forcesigexit && | if (kern_forcesigexit && (SIGISMEMBER(sigmask, sig) || | ||||
(SIGISMEMBER(td->td_sigmask, sig) || | |||||
ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) { | ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) { | ||||
SIGDELSET(td->td_sigmask, sig); | SIGDELSET(td->td_sigmask, sig); | ||||
SIGDELSET(ps->ps_sigcatch, sig); | SIGDELSET(ps->ps_sigcatch, sig); | ||||
SIGDELSET(ps->ps_sigignore, sig); | SIGDELSET(ps->ps_sigignore, sig); | ||||
ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; | ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; | ||||
td->td_sigblock_val = 0; | |||||
} | } | ||||
mtx_unlock(&ps->ps_mtx); | mtx_unlock(&ps->ps_mtx); | ||||
p->p_sig = sig; /* XXX to verify code */ | p->p_sig = sig; /* XXX to verify code */ | ||||
tdsendsignal(p, td, sig, ksi); | tdsendsignal(p, td, sig, ksi); | ||||
} | } | ||||
PROC_UNLOCK(p); | PROC_UNLOCK(p); | ||||
} | } | ||||
static struct thread * | static struct thread * | ||||
sigtd(struct proc *p, int sig, int prop) | sigtd(struct proc *p, int sig, bool fast_sigblock) | ||||
{ | { | ||||
struct thread *td, *signal_td; | struct thread *td, *signal_td; | ||||
PROC_LOCK_ASSERT(p, MA_OWNED); | PROC_LOCK_ASSERT(p, MA_OWNED); | ||||
MPASS(!fast_sigblock || p == curproc); | |||||
/* | /* | ||||
* Check if current thread can handle the signal without | * Check if current thread can handle the signal without | ||||
* switching context to another thread. | * switching context to another thread. | ||||
*/ | */ | ||||
if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig)) | if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig) && | ||||
(!fast_sigblock || curthread->td_sigblock_val == 0)) | |||||
return (curthread); | return (curthread); | ||||
signal_td = NULL; | signal_td = NULL; | ||||
FOREACH_THREAD_IN_PROC(p, td) { | FOREACH_THREAD_IN_PROC(p, td) { | ||||
if (!SIGISMEMBER(td->td_sigmask, sig)) { | if (!SIGISMEMBER(td->td_sigmask, sig) && (!fast_sigblock || | ||||
td != curthread || td->td_sigblock_val == 0)) { | |||||
cemUnsubmitted Not Done Inline ActionsI think the last clause needs to be &&: (!fast_sigblock || (td != curthread && td->td_sigblock_val == 0)) But I'm also not sure the !fast_sigblock logic is correct in the second use. In non-fastblocking callers, e.g., kill(1) -> tdsendsignal(), we will pass the parameter fast_sigblock = false and bypass checking td_sigblock_val. I think we should check td_sigblock_val if fast_sigblock is enabled on a thread, regardless of the caller. I.e., ((td->td_pflags & TDP_SIGFASTBLOCK) == 0 || (td != curthread && td->td_sigblock_val == 0). Perhaps the large condition is more clearly expressed by inverting the sense: FOREACH_THREAD_IN_PROC(...) { if (SIGISMEMBER(...)) continue; if (td->td_pflags & TDP_SIGFASTBLOCK) { if (td == curthread || td->td_sigblock_val != 0) continue; } signal_td = td; break; } But I do not feel strongly about reversing the sense of the loop. cem: I think the last clause needs to be `&&`:
`(!fast_sigblock || (td != curthread && td… | |||||
cemUnsubmitted Not Done Inline ActionsIn fact, the only reason td would be curthread is if the previous condition showed that curthread blocked the relevant signal. So I think this can be simplified to: if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig) && (!fast_sigblock || curthread->td_sigblock_val == 0)) return (curthread); signal_td = NULL; FOREACH_THREAD_IN_PROC(p, td) { /* Already preferentially handled above. */ if (td == curthread) continue; if (SIGISMEMBER(td->td_sigmask, sig) continue; if ((td->td_pflags & TDP_SIGFASTBLOCK) && td->td_sigblock_val != 0) continue; signal_td = td; break; } } cem: In fact, the only reason td would be curthread is if the previous condition showed that… | |||||
kibAuthorUnsubmitted Done Inline ActionsMmm, no, I did wanted it coded in the way I did. First, td_sigblock_val should be only evaluated in the context of curthread. It could be stale because the value is only re-read on syscall entry and AST. Second, I want sigfastblock to disrupt the existing signal delivery flow as little as possible. In particular, most significant change occurs at the AST processing signals (and in wakeup of sleeping thread, by necessity). I want signal delivery to act as if sigfastblock does not exist, and only reschedule at AST if it happens that the immediate delviery is not possible. I agree that td == curthread check is somewhat redundant because the micro-optimization check at the start of function duplicates it, but I do not think it is critical to avoid that duplication. kib: Mmm, no, I did wanted it coded in the way I did.
First, td_sigblock_val should be only… | |||||
signal_td = td; | signal_td = td; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
if (signal_td == NULL) | if (signal_td == NULL) | ||||
signal_td = FIRST_THREAD_IN_PROC(p); | signal_td = FIRST_THREAD_IN_PROC(p); | ||||
return (signal_td); | return (signal_td); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 97 Lines • ▼ Show 20 Lines | if (p->p_state == PRS_ZOMBIE) { | ||||
return (ret); | return (ret); | ||||
} | } | ||||
ps = p->p_sigacts; | ps = p->p_sigacts; | ||||
KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig); | KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig); | ||||
prop = sigprop(sig); | prop = sigprop(sig); | ||||
if (td == NULL) { | if (td == NULL) { | ||||
td = sigtd(p, sig, prop); | td = sigtd(p, sig, false); | ||||
sigqueue = &p->p_sigqueue; | sigqueue = &p->p_sigqueue; | ||||
} else | } else | ||||
sigqueue = &td->td_sigqueue; | sigqueue = &td->td_sigqueue; | ||||
SDT_PROBE3(proc, , , signal__send, td, p, sig); | SDT_PROBE3(proc, , , signal__send, td, p, sig); | ||||
/* | /* | ||||
* If the signal is being ignored, | * If the signal is being ignored, | ||||
▲ Show 20 Lines • Show All 378 Lines • ▼ Show 20 Lines | |||||
* that case as well, however it will be deferred until it can be handled. | * that case as well, however it will be deferred until it can be handled. | ||||
*/ | */ | ||||
int | int | ||||
ptracestop(struct thread *td, int sig, ksiginfo_t *si) | ptracestop(struct thread *td, int sig, ksiginfo_t *si) | ||||
{ | { | ||||
struct proc *p = td->td_proc; | struct proc *p = td->td_proc; | ||||
struct thread *td2; | struct thread *td2; | ||||
ksiginfo_t ksi; | ksiginfo_t ksi; | ||||
int prop; | |||||
PROC_LOCK_ASSERT(p, MA_OWNED); | PROC_LOCK_ASSERT(p, MA_OWNED); | ||||
KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process")); | KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process")); | ||||
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, | WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, | ||||
&p->p_mtx.lock_object, "Stopping for traced signal"); | &p->p_mtx.lock_object, "Stopping for traced signal"); | ||||
td->td_xsig = sig; | td->td_xsig = sig; | ||||
▲ Show 20 Lines • Show All 80 Lines • ▼ Show 20 Lines | stopme: | ||||
} else if (td->td_xsig != 0) { | } else if (td->td_xsig != 0) { | ||||
/* | /* | ||||
* If parent wants us to take a new signal, then it will leave | * If parent wants us to take a new signal, then it will leave | ||||
* it in td->td_xsig; otherwise we just look for signals again. | * it in td->td_xsig; otherwise we just look for signals again. | ||||
*/ | */ | ||||
ksiginfo_init(&ksi); | ksiginfo_init(&ksi); | ||||
ksi.ksi_signo = td->td_xsig; | ksi.ksi_signo = td->td_xsig; | ||||
ksi.ksi_flags |= KSI_PTRACE; | ksi.ksi_flags |= KSI_PTRACE; | ||||
prop = sigprop(td->td_xsig); | td2 = sigtd(p, td->td_xsig, false); | ||||
td2 = sigtd(p, td->td_xsig, prop); | |||||
tdsendsignal(p, td2, td->td_xsig, &ksi); | tdsendsignal(p, td2, td->td_xsig, &ksi); | ||||
if (td != td2) | if (td != td2) | ||||
return (0); | return (0); | ||||
} | } | ||||
return (td->td_xsig); | return (td->td_xsig); | ||||
} | } | ||||
static void | void | ||||
reschedule_signals(struct proc *p, sigset_t block, int flags) | reschedule_signals(struct proc *p, sigset_t block, int flags) | ||||
{ | { | ||||
struct sigacts *ps; | struct sigacts *ps; | ||||
struct thread *td; | struct thread *td; | ||||
int sig; | int sig; | ||||
PROC_LOCK_ASSERT(p, MA_OWNED); | PROC_LOCK_ASSERT(p, MA_OWNED); | ||||
ps = p->p_sigacts; | ps = p->p_sigacts; | ||||
mtx_assert(&ps->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0 ? | mtx_assert(&ps->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0 ? | ||||
MA_OWNED : MA_NOTOWNED); | MA_OWNED : MA_NOTOWNED); | ||||
if (SIGISEMPTY(p->p_siglist)) | if (SIGISEMPTY(p->p_siglist)) | ||||
return; | return; | ||||
SIGSETAND(block, p->p_siglist); | SIGSETAND(block, p->p_siglist); | ||||
while ((sig = sig_ffs(&block)) != 0) { | while ((sig = sig_ffs(&block)) != 0) { | ||||
SIGDELSET(block, sig); | SIGDELSET(block, sig); | ||||
td = sigtd(p, sig, 0); | td = sigtd(p, sig, (flags & SIGPROCMASK_FASTBLK) != 0); | ||||
Not Done Inline ActionsThis version doesn't modify sigtd to be vaguely aware of fastblock threads, right? It seems like we may end up just burning CPU rescheduling signals onto the same thread. sigtd will by default pick curthread if it matches p and doesn't slow-block (td_sigmask) the signal. If we do not modify sigtd, we probably want to wrap the call to reschedule_signals with <save td_sigmask>; SIGFILLSET(td->td_sigmask); and <restore td_sigmask> to prevent fastblock threads from scheduling masked signals onto themselves preferentially. cem: This version doesn't modify `sigtd` to be vaguely aware of fastblock threads, right? It seems… | |||||
signotify(td); | signotify(td); | ||||
if (!(flags & SIGPROCMASK_PS_LOCKED)) | if (!(flags & SIGPROCMASK_PS_LOCKED)) | ||||
mtx_lock(&ps->ps_mtx); | mtx_lock(&ps->ps_mtx); | ||||
if (p->p_flag & P_TRACED || | if (p->p_flag & P_TRACED || | ||||
(SIGISMEMBER(ps->ps_sigcatch, sig) && | (SIGISMEMBER(ps->ps_sigcatch, sig) && | ||||
!SIGISMEMBER(td->td_sigmask, sig))) | !SIGISMEMBER(td->td_sigmask, sig))) | ||||
tdsigwakeup(td, sig, SIG_CATCH, | tdsigwakeup(td, sig, SIG_CATCH, | ||||
(SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : | (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : | ||||
▲ Show 20 Lines • Show All 142 Lines • ▼ Show 20 Lines | for (;;) { | ||||
SIGSETOR(sigpending, p->p_sigqueue.sq_signals); | SIGSETOR(sigpending, p->p_sigqueue.sq_signals); | ||||
SIGSETNAND(sigpending, td->td_sigmask); | SIGSETNAND(sigpending, td->td_sigmask); | ||||
if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags & | if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags & | ||||
(TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY) | (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY) | ||||
SIG_STOPSIGMASK(sigpending); | SIG_STOPSIGMASK(sigpending); | ||||
if (SIGISEMPTY(sigpending)) /* no signal to send */ | if (SIGISEMPTY(sigpending)) /* no signal to send */ | ||||
return (0); | return (0); | ||||
/* | |||||
* Do fast sigblock if requested by usermode. Since | |||||
* we do know that there was a signal pending at this | |||||
* point, set the FAST_SIGBLOCK_PEND as indicator for | |||||
* usermode to perform a dummy call to | |||||
* FAST_SIGBLOCK_UNBLOCK, which causes immediate | |||||
* delivery of postponed pending signal. | |||||
*/ | |||||
if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) { | |||||
if (td->td_sigblock_val != 0) | |||||
SIGSETNAND(sigpending, fastblock_mask); | |||||
if (SIGISEMPTY(sigpending)) { | |||||
td->td_pflags |= TDP_SIGFASTPENDING; | |||||
return (0); | |||||
} | |||||
} | |||||
if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED && | if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED && | ||||
(p->p_flag2 & P2_PTRACE_FSTP) != 0 && | (p->p_flag2 & P2_PTRACE_FSTP) != 0 && | ||||
SIGISMEMBER(sigpending, SIGSTOP)) { | SIGISMEMBER(sigpending, SIGSTOP)) { | ||||
/* | /* | ||||
* If debugger just attached, always consume | * If debugger just attached, always consume | ||||
* SIGSTOP from ptrace(PT_ATTACH) first, to | * SIGSTOP from ptrace(PT_ATTACH) first, to | ||||
* execute the debugger attach ritual in | * execute the debugger attach ritual in | ||||
* order. | * order. | ||||
▲ Show 20 Lines • Show All 1,051 Lines • ▼ Show 20 Lines | sig_drop_caught(struct proc *p) | ||||
PROC_LOCK_ASSERT(p, MA_OWNED); | PROC_LOCK_ASSERT(p, MA_OWNED); | ||||
mtx_assert(&ps->ps_mtx, MA_OWNED); | mtx_assert(&ps->ps_mtx, MA_OWNED); | ||||
while (SIGNOTEMPTY(ps->ps_sigcatch)) { | while (SIGNOTEMPTY(ps->ps_sigcatch)) { | ||||
sig = sig_ffs(&ps->ps_sigcatch); | sig = sig_ffs(&ps->ps_sigcatch); | ||||
sigdflt(ps, sig); | sigdflt(ps, sig); | ||||
if ((sigprop(sig) & SIGPROP_IGNORE) != 0) | if ((sigprop(sig) & SIGPROP_IGNORE) != 0) | ||||
sigqueue_delete_proc(p, sig); | sigqueue_delete_proc(p, sig); | ||||
} | } | ||||
} | |||||
int | |||||
sys_sigfastblock(struct thread *td, struct sigfastblock_args *uap) | |||||
{ | |||||
struct proc *p; | |||||
int error, res; | |||||
uint32_t oldval; | |||||
error = 0; | |||||
switch (uap->cmd) { | |||||
case SIGFASTBLOCK_SETPTR: | |||||
if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) { | |||||
error = EBUSY; | |||||
break; | |||||
} | |||||
if (((uintptr_t)(uap->ptr) & (sizeof(uint32_t) - 1)) != 0) { | |||||
error = EINVAL; | |||||
break; | |||||
} | |||||
td->td_pflags |= TDP_SIGFASTBLOCK; | |||||
td->td_sigblock_ptr = uap->ptr; | |||||
break; | |||||
case SIGFASTBLOCK_UNBLOCK: | |||||
if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) { | |||||
error = EINVAL; | |||||
break; | |||||
} | |||||
again: | |||||
res = casueword32(td->td_sigblock_ptr, SIGFASTBLOCK_PEND, | |||||
&oldval, 0); | |||||
if (res == -1) { | |||||
error = EFAULT; | |||||
break; | |||||
} | |||||
if (res == 1) { | |||||
if (oldval != SIGFASTBLOCK_PEND) { | |||||
error = EBUSY; | |||||
break; | |||||
} | |||||
error = thread_check_susp(td, false); | |||||
if (error != 0) | |||||
break; | |||||
goto again; | |||||
} | |||||
td->td_sigblock_val = 0; | |||||
/* | |||||
* Rely on normal ast mechanism to deliver pending | |||||
* signals to current thread. But notify others about | |||||
* fake unblock. | |||||
*/ | |||||
p = td->td_proc; | |||||
if (error == 0 && p->p_numthreads != 1) { | |||||
PROC_LOCK(p); | |||||
reschedule_signals(p, td->td_sigmask, 0); | |||||
PROC_UNLOCK(p); | |||||
} | |||||
break; | |||||
case SIGFASTBLOCK_UNSETPTR: | |||||
if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) { | |||||
error = EINVAL; | |||||
break; | |||||
} | |||||
res = fueword32(td->td_sigblock_ptr, &oldval); | |||||
if (res == -1) { | |||||
error = EFAULT; | |||||
break; | |||||
} | |||||
if (oldval != 0 && oldval != SIGFASTBLOCK_PEND) { | |||||
error = EBUSY; | |||||
break; | |||||
} | |||||
td->td_pflags &= ~TDP_SIGFASTBLOCK; | |||||
td->td_sigblock_val = 0; | |||||
break; | |||||
default: | |||||
error = EINVAL; | |||||
break; | |||||
} | |||||
return (error); | |||||
} | |||||
void | |||||
fetch_sigfastblock(struct thread *td) | |||||
{ | |||||
if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) | |||||
return; | |||||
if (fueword32(td->td_sigblock_ptr, &td->td_sigblock_val) == -1) { | |||||
fetch_sigfastblock_failed(td, false); | |||||
Done Inline ActionsThe other callers pass true or false. jilles: The other callers pass `true` or `false`. | |||||
return; | |||||
} | |||||
td->td_sigblock_val &= ~SIGFASTBLOCK_FLAGS; | |||||
} | |||||
void | |||||
fetch_sigfastblock_failed(struct thread *td, bool write) | |||||
{ | |||||
ksiginfo_t ksi; | |||||
/* | |||||
* Prevent further fetches and SIGSEGVs, allowing thread to | |||||
* issue syscalls despite corruption. | |||||
*/ | |||||
td->td_pflags &= ~TDP_SIGFASTBLOCK; | |||||
ksiginfo_init_trap(&ksi); | |||||
ksi.ksi_signo = SIGSEGV; | |||||
ksi.ksi_code = write ? SEGV_ACCERR : SEGV_MAPERR; | |||||
ksi.ksi_addr = td->td_sigblock_ptr; | |||||
trapsignal(td, &ksi); | |||||
} | } |
__read_mostly?