Page MenuHomeFreeBSD

D49678.id153637.diff
No OneTemporary

D49678.id153637.diff

diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -3286,6 +3286,49 @@
}
}
+static void
+sig_prep_first_dbg_stop(struct thread *td, struct proc *p, sigset_t *sp)
+{
+ MPASS(td->td_proc == p);
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+
+ if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED &&
+ (p->p_flag2 & P2_PTRACE_FSTP) != 0 &&
+ SIGISMEMBER(*sp, SIGSTOP)) {
+ /*
+ * If debugger just attached, always consume SIGSTOP
+ * from ptrace(PT_ATTACH) first, to execute the
+ * debugger attach ritual in order.
+ */
+ td->td_dbgflags |= TDB_FSTP;
+ SIGEMPTYSET(*sp);
+ SIGADDSET(*sp, SIGSTOP);
+ }
+}
+
+bool
+sig_handle_sigstop(struct thread *td)
+{
+ struct proc *p;
+ sigset_t sigpending;
+ enum sigstatus res;
+
+ p = td->td_proc;
+ PROC_LOCK(p);
+ sigpending = td->td_sigqueue.sq_signals;
+ SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
+ sig_prep_first_dbg_stop(td, p, &sigpending);
+ if (SIGISMEMBER(sigpending, SIGSTOP)) {
+ mtx_lock(&p->p_sigacts->ps_mtx);
+ res = sigprocess(td, SIGSTOP);
+ mtx_unlock(&p->p_sigacts->ps_mtx);
+ } else {
+ res = SIGSTATUS_IGNORE;
+ }
+ PROC_UNLOCK(p);
+ return (res == SIGSTATUS_HANDLED);
+}
+
/*
* If the current process has received a signal (should be caught or cause
* termination, should interrupt current syscall), return the signal number.
@@ -3336,19 +3379,7 @@
}
}
- if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED &&
- (p->p_flag2 & P2_PTRACE_FSTP) != 0 &&
- SIGISMEMBER(sigpending, SIGSTOP)) {
- /*
- * If debugger just attached, always consume
- * SIGSTOP from ptrace(PT_ATTACH) first, to
- * execute the debugger attach ritual in
- * order.
- */
- td->td_dbgflags |= TDB_FSTP;
- SIGEMPTYSET(sigpending);
- SIGADDSET(sigpending, SIGSTOP);
- }
+ sig_prep_first_dbg_stop(td, p, &sigpending);
SIG_FOREACH(sig, &sigpending) {
switch (sigprocess(td, sig)) {
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c
--- a/sys/kern/subr_sleepqueue.c
+++ b/sys/kern/subr_sleepqueue.c
@@ -143,9 +143,11 @@
#endif
} __aligned(CACHE_LINE_SIZE);
-#ifdef SLEEPQUEUE_PROFILING
+
static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
- "sleepq profiling");
+ "sleepq implementation knobs");
+
+#ifdef SLEEPQUEUE_PROFILING
static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains,
CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"sleepq chain stats");
@@ -159,6 +161,11 @@
static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
static uma_zone_t sleepq_zone;
+static bool sleepq_sigstop_transparent = true;
+SYSCTL_BOOL(_debug_sleepq, OID_AUTO, sigstop_transparent, CTLFLAG_RW,
+ &sleepq_sigstop_transparent, 0,
+ "hide wakes from SIGSTOP on interruptible sleeps");
+
/*
* Prototypes for non-exported routines.
*/
@@ -411,8 +418,11 @@
thread_lock(td);
callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1);
thread_unlock(td);
+ td->td_sqint_pr1 = pr1;
+ td->td_sqint_flags = flags;
+ td->td_sqint_cpu = PCPU_GET(cpuid);
callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1,
- sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC |
+ sleepq_timeout, td, td->td_sqint_cpu, flags | C_PRECALC |
C_DIRECT_EXEC);
}
@@ -659,6 +669,54 @@
sleepq_switch(wchan, pri);
}
+static bool
+sleepq_handle_sigstop(struct thread *td, const void *wchan,
+ struct lock_object *lock, const char *wmesg, int queue, sbintime_t tmo,
+ int flags)
+{
+ if (!sleepq_sigstop_transparent)
+ return (false);
+
+ if ((td->td_flags & TDF_SBDRY) != 0 ||
+ !(td_ast_pending(td, TDA_SIG) ||
+ td_ast_pending(td, TDA_SUSPEND)))
+ return (false);
+ if (sig_handle_sigstop(td)) {
+//printf("STS td %p td_flags %#x p_flag %#x\n", td, td->td_flags, td->td_proc->p_flag);
+ /*
+ * The thread's interruptible sleep was aborted in
+ * order to suspend the process. Propagating the
+ * aborted sleep to the userspace boundary causes
+ * spurious EINTR returns from syscalls. Instead, we
+ * can handle signal processing here, noting that the
+ * place where interruptible non-boundary sleep is
+ * allowed is not much different from the userret()
+ * place.
+ *
+ * Note that we check the current process and thread
+ * states without locks. Also, only stops can be
+ * handled this way, since it is not safe to exit
+ * sleeping thread, because it might own resources
+ * needing destructors.
+ */
+ if (tmo != 0 && tmo <= sbinuptime())
+ return (false);
+ sleepq_lock(wchan);
+ td->td_intrval = 0;
+ sleepq_add(wchan, lock, wmesg, flags | SLEEPQ_INTERRUPTIBLE,
+ queue);
+ if (tmo != 0) {
+ td->td_sleeptimo = tmo;
+ callout_reset_sbt_on(&td->td_slpcallout,
+ tmo, td->td_sqint_pr1, sleepq_timeout,
+ td, td->td_sqint_cpu, td->td_sqint_flags |
+ C_PRECALC | C_DIRECT_EXEC);
+ }
+ return (true);
+ }
+ return (false);
+}
+
/*
* Block the current thread until it is awakened from its sleep queue
* or it is interrupted by a signal.
@@ -666,12 +724,29 @@
int
sleepq_wait_sig(const void *wchan, int pri)
{
- int rcatch;
+ struct thread *td;
+ const char *wmesg;
+ struct sleepqueue *sq;
+ struct lock_object *lock;
+ int flags, queue, rcatch, rvals;
- rcatch = sleepq_catch_signals(wchan, pri);
- if (rcatch)
- return (rcatch);
- return (sleepq_check_signals());
+ td = curthread;
+ wmesg = td->td_wmesg;
+ queue = td->td_sqqueue;
+ sq = sleepq_lookup(wchan);
+ lock = sq->sq_lock;
+ flags = sq->sq_type;
+
+ for (;;) {
+ rcatch = sleepq_catch_signals(wchan, pri);
+ if (rcatch != 0)
+ return (rcatch);
+ rvals = sleepq_check_signals();
+ if (rvals != 0 && sleepq_handle_sigstop(td, wchan, lock,
+ wmesg, queue, 0, flags))
+ continue;
+ return (rvals);
+ }
}
/*
@@ -699,17 +774,36 @@
int
sleepq_timedwait_sig(const void *wchan, int pri)
{
- int rcatch, rvalt, rvals;
+ struct thread *td;
+ const char *wmesg;
+ struct sleepqueue *sq;
+ struct lock_object *lock;
+ sbintime_t tmo;
+ int flags, queue, rcatch, rvalt, rvals;
- rcatch = sleepq_catch_signals(wchan, pri);
- /* We must always call check_timeout() to clear sleeptimo. */
- rvalt = sleepq_check_timeout();
- rvals = sleepq_check_signals();
- if (rcatch)
- return (rcatch);
- if (rvals)
- return (rvals);
- return (rvalt);
+ td = curthread;
+ wmesg = td->td_wmesg;
+ queue = td->td_sqqueue;
+ tmo = td->td_sleeptimo;
+ sq = sleepq_lookup(wchan);
+ lock = sq->sq_lock;
+ flags = sq->sq_type;
+
+ for (;;) {
+ rcatch = sleepq_catch_signals(wchan, pri);
+ /* We must always call check_timeout() to clear sleeptimo. */
+ rvalt = sleepq_check_timeout();
+ rvals = sleepq_check_signals();
+ if (rcatch != 0)
+ return (rcatch);
+ if (rvals != 0) {
+ if (sleepq_handle_sigstop(td, wchan, lock, wmesg,
+ queue, tmo, flags))
+ continue;
+ return (rvals);
+ }
+ return (rvalt);
+ }
}
/*
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -387,6 +387,9 @@
int td_pmcpend;
void *td_remotereq; /* (c) dbg remote request. */
off_t td_ktr_io_lim; /* (k) limit for ktrace file size */
+ sbintime_t td_sqint_pr1;
+ int td_sqint_flags;
+ int td_sqint_cpu;
#ifdef EPOCH_TRACE
SLIST_HEAD(, epoch_tracker) td_epochs;
#endif
diff --git a/sys/sys/signalvar.h b/sys/sys/signalvar.h
--- a/sys/sys/signalvar.h
+++ b/sys/sys/signalvar.h
@@ -397,6 +397,7 @@
int sig_ast_checksusp(struct thread *td);
int sig_ast_needsigchk(struct thread *td);
void sig_drop_caught(struct proc *p);
+bool sig_handle_sigstop(struct thread *td);
void sigexit(struct thread *td, int sig) __dead2;
int sigev_findtd(struct proc *p, struct sigevent *sigev, struct thread **);
void sigfastblock_clear(struct thread *td);

File Metadata

Mime Type
text/plain
Expires
Mon, Feb 9, 1:12 AM (17 h, 47 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28519289
Default Alt Text
D49678.id153637.diff (7 KB)

Event Timeline