Page MenuHomeFreeBSD

D35888.id108493.diff
No OneTemporary

D35888.id108493.diff

diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
--- a/sys/amd64/amd64/exception.S
+++ b/sys/amd64/amd64/exception.S
@@ -585,7 +585,7 @@
jnz 4f
/* Check for and handle AST's on return to userland. */
movq PCPU(CURTHREAD),%rax
- testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
+ cmpl $0,TD_AST(%rax)
jne 3f
call handle_ibrs_exit
callq *mds_handler
@@ -1141,7 +1141,7 @@
*/
cli
movq PCPU(CURTHREAD),%rax
- testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
+ cmpl $0,TD_AST(%rax)
je doreti_exit
sti
movq %rsp,%rdi /* pass a pointer to the trapframe */
diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c
--- a/sys/amd64/amd64/genassym.c
+++ b/sys/amd64/amd64/genassym.c
@@ -82,6 +82,7 @@
ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
+ASSYM(TD_AST, offsetof(struct thread, td_ast));
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
ASSYM(TD_PFLAGS, offsetof(struct thread, td_pflags));
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
@@ -90,9 +91,6 @@
ASSYM(TD_MD_PCB, offsetof(struct thread, td_md.md_pcb));
ASSYM(TD_MD_STACK_BASE, offsetof(struct thread, td_md.md_stack_base));
-ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
-ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
-
ASSYM(TDP_CALLCHAIN, TDP_CALLCHAIN);
ASSYM(TDP_KTHREAD, TDP_KTHREAD);
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -170,6 +170,8 @@
cpu_max_ext_state_size);
}
+ td2->td_frame = (struct trapframe *)td2->td_md.md_stack_base - 1;
+
/*
* Set registers for trampoline to user mode. Leave space for the
* return address on stack. These are the kernel mode register values.
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -365,13 +365,7 @@
static int __inline
vcpu_should_yield(struct vm *vm, int vcpu)
{
-
- if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED))
- return (1);
- else if (curthread->td_owepreempt)
- return (1);
- else
- return (0);
+ return (curthread->td_ast != 0 || curthread->td_owepreempt != 0);
}
#endif
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -1331,7 +1331,7 @@
RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion");
mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
"vmrndv", hz);
- if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
+ if ((td->td_ast & TDAI(TDA_SUSPEND)) != 0) {
mtx_unlock(&vm->rendezvous_mtx);
error = thread_check_susp(td, true);
if (error != 0)
@@ -1421,7 +1421,7 @@
msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
- if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
+ if ((td->td_ast & TDAI(TDA_SUSPEND)) != 0) {
vcpu_unlock(vcpu);
error = thread_check_susp(td, false);
if (error != 0)
@@ -1593,7 +1593,7 @@
vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
- if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
+ if ((td->td_ast & TDAI(TDA_SUSPEND)) != 0) {
vcpu_unlock(vcpu);
error = thread_check_susp(td, false);
vcpu_lock(vcpu);
diff --git a/sys/compat/linux/linux_event.c b/sys/compat/linux/linux_event.c
--- a/sys/compat/linux/linux_event.c
+++ b/sys/compat/linux/linux_event.c
@@ -458,9 +458,7 @@
* usermode and TDP_OLDMASK is cleared, restoring old
* sigmask.
*/
- thread_lock(td);
- td->td_flags |= TDF_ASTPENDING;
- thread_unlock(td);
+ ast_sched(td, TDA_SIGSUSPEND);
}
coargs.leventlist = events;
diff --git a/sys/compat/linux/linux_fork.c b/sys/compat/linux/linux_fork.c
--- a/sys/compat/linux/linux_fork.c
+++ b/sys/compat/linux/linux_fork.c
@@ -323,7 +323,7 @@
sched_fork_thread(td, newtd);
thread_unlock(td);
if (P_SHOULDSTOP(p))
- newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
+ ast_sched(newtd, TDA_SUSPEND);
if (p->p_ptevents & PTRACE_LWP)
newtd->td_dbgflags |= TDB_BORN;
diff --git a/sys/compat/linuxkpi/common/include/linux/sched.h b/sys/compat/linuxkpi/common/include/linux/sched.h
--- a/sys/compat/linuxkpi/common/include/linux/sched.h
+++ b/sys/compat/linuxkpi/common/include/linux/sched.h
@@ -130,7 +130,7 @@
#define yield() kern_yield(PRI_UNCHANGED)
#define sched_yield() sched_relinquish(curthread)
-#define need_resched() (curthread->td_flags & TDF_NEEDRESCHED)
+#define need_resched() ((curthread->td_ast & TDAI(TDA_SCHED)) != 0)
static inline int
cond_resched_lock(spinlock_t *lock)
diff --git a/sys/dev/hwpmc/hwpmc_soft.c b/sys/dev/hwpmc/hwpmc_soft.c
--- a/sys/dev/hwpmc/hwpmc_soft.c
+++ b/sys/dev/hwpmc/hwpmc_soft.c
@@ -430,10 +430,12 @@
}
if (user_mode) {
- /* If in user mode setup AST to process
+ /*
+ * If in user mode setup AST to process
* callchain out of interrupt context.
+ * XXXKIB is curthread locked?
*/
- curthread->td_flags |= TDF_ASTPENDING;
+ ast_sched_locked(curthread, TDA_HWPMC);
}
} else
pc->soft_values[ri]++;
@@ -446,6 +448,16 @@
return (processed);
}
+static void
+ast_hwpmc(struct thread *td, int tda __unused)
+{
+ /* Handle Software PMC callchain capture. */
+ if (PMC_IS_PENDING_CALLCHAIN(td))
+ PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_USER_CALLCHAIN_SOFT,
+ (void *)td->td_frame);
+}
+
+
void
pmc_soft_initialize(struct pmc_mdep *md)
{
@@ -477,6 +489,8 @@
pcd->pcd_stop_pmc = soft_stop_pmc;
md->pmd_npmc += SOFT_NPMCS;
+
+ ast_register(TDA_HWPMC, ASTR_UNCOND, 0, ast_hwpmc);
}
void
@@ -493,6 +507,7 @@
KASSERT(md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_class ==
PMC_CLASS_SOFT, ("[soft,%d] class mismatch", __LINE__));
#endif
+ ast_deregister(TDA_HWPMC);
free(soft_pcpu, M_PMC);
soft_pcpu = NULL;
}
diff --git a/sys/fs/nfsserver/nfs_nfsdkrpc.c b/sys/fs/nfsserver/nfs_nfsdkrpc.c
--- a/sys/fs/nfsserver/nfs_nfsdkrpc.c
+++ b/sys/fs/nfsserver/nfs_nfsdkrpc.c
@@ -327,7 +327,7 @@
svc_freereq(rqst);
out:
- td_softdep_cleanup(curthread);
+ ast_kclear(curthread);
NFSEXITCODE(0);
}
diff --git a/sys/geom/geom.h b/sys/geom/geom.h
--- a/sys/geom/geom.h
+++ b/sys/geom/geom.h
@@ -59,6 +59,7 @@
struct gctl_req;
struct g_configargs;
struct disk_zone_args;
+struct thread;
typedef int g_config_t (struct g_configargs *ca);
typedef void g_ctl_req_t (struct gctl_req *, struct g_class *cp, char const *verb);
@@ -270,6 +271,7 @@
void g_orphan_provider(struct g_provider *pp, int error);
struct g_event *g_alloc_event(int flag);
void g_post_event_ep(g_event_t *func, void *arg, struct g_event *ep, ...);
+void g_waitidle(struct thread *td);
/* geom_subr.c */
int g_access(struct g_consumer *cp, int nread, int nwrite, int nexcl);
diff --git a/sys/geom/geom_event.c b/sys/geom/geom_event.c
--- a/sys/geom/geom_event.c
+++ b/sys/geom/geom_event.c
@@ -81,7 +81,7 @@
#define EV_INPROGRESS 0x10000
void
-g_waitidle(void)
+g_waitidle(struct thread *td)
{
g_topology_assert_not();
@@ -96,6 +96,24 @@
curthread->td_pflags &= ~TDP_GEOM;
}
+static void
+ast_geom(struct thread *td, int tda __unused)
+{
+ /*
+ * If this thread tickled GEOM, we need to wait for the giggling to
+ * stop before we return to userland
+ */
+ g_waitidle(td);
+}
+
+static void
+geom_event_init(void *arg __unused)
+{
+ ast_register(TDA_GEOM, ASTR_ASTF_REQUIRED | ASTR_TDP | ASTR_KCLEAR,
+ TDP_GEOM, ast_geom);
+}
+SYSINIT(geom_event, SI_SUB_INTRINSIC, SI_ORDER_ANY, geom_event_init, NULL);
+
struct g_attrchanged_args {
struct g_provider *pp;
const char *attr;
@@ -353,9 +371,7 @@
mtx_unlock(&g_eventlock);
wakeup(&g_wait_event);
curthread->td_pflags |= TDP_GEOM;
- thread_lock(curthread);
- curthread->td_flags |= TDF_ASTPENDING;
- thread_unlock(curthread);
+ ast_sched(curthread, TDA_GEOM);
}
void
diff --git a/sys/i386/i386/exception.s b/sys/i386/i386/exception.s
--- a/sys/i386/i386/exception.s
+++ b/sys/i386/i386/exception.s
@@ -470,7 +470,7 @@
*/
cli
movl PCPU(CURTHREAD),%eax
- testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%eax)
+ cmpl $0,TD_AST(%eax)
je doreti_exit
sti
pushl %esp /* pass a pointer to the trapframe */
diff --git a/sys/i386/i386/genassym.c b/sys/i386/i386/genassym.c
--- a/sys/i386/i386/genassym.c
+++ b/sys/i386/i386/genassym.c
@@ -85,6 +85,7 @@
ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
+ASSYM(TD_AST, offsetof(struct thread, td_ast));
ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
ASSYM(TD_PFLAGS, offsetof(struct thread, td_pflags));
@@ -96,9 +97,6 @@
ASSYM(P_MD, offsetof(struct proc, p_md));
ASSYM(MD_LDT, offsetof(struct mdproc, md_ldt));
-ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
-ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
-
ASSYM(TD0_KSTACK_PAGES, TD0_KSTACK_PAGES);
ASSYM(PAGE_SIZE, PAGE_SIZE);
ASSYM(PAGE_SHIFT, PAGE_SHIFT);
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -385,6 +385,38 @@
static int devpoll_run = 0;
#endif
+static void
+ast_oweupc(struct thread *td, int tda __unused)
+{
+ if ((td->td_proc->p_flag & P_PROFIL) == 0)
+ return;
+ addupc_task(td, td->td_profil_addr, td->td_profil_ticks);
+ td->td_profil_ticks = 0;
+ td->td_pflags &= ~TDP_OWEUPC;
+}
+
+static void
+ast_alrm(struct thread *td, int tda __unused)
+{
+ struct proc *p;
+
+ p = td->td_proc;
+ PROC_LOCK(p);
+ kern_psignal(p, SIGVTALRM);
+ PROC_UNLOCK(p);
+}
+
+static void
+ast_prof(struct thread *td, int tda __unused)
+{
+ struct proc *p;
+
+ p = td->td_proc;
+ PROC_LOCK(p);
+ kern_psignal(p, SIGPROF);
+ PROC_UNLOCK(p);
+}
+
/*
* Initialize clock frequencies and start both clocks running.
*/
@@ -408,6 +440,10 @@
profhz = i;
psratio = profhz / i;
+ ast_register(TDA_OWEUPC, ASTR_ASTF_REQUIRED, 0, ast_oweupc);
+ ast_register(TDA_ALRM, ASTR_ASTF_REQUIRED, 0, ast_alrm);
+ ast_register(TDA_PROF, ASTR_ASTF_REQUIRED, 0, ast_prof);
+
#ifdef SW_WATCHDOG
/* Enable hardclock watchdog now, even if a hardware watchdog exists. */
watchdog_attach();
@@ -423,30 +459,27 @@
hardclock_itimer(struct thread *td, struct pstats *pstats, int cnt, int usermode)
{
struct proc *p;
- int flags;
+ int ast;
- flags = 0;
+ ast = 0;
p = td->td_proc;
if (usermode &&
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
PROC_ITIMLOCK(p);
if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL],
tick * cnt) == 0)
- flags |= TDF_ALRMPEND | TDF_ASTPENDING;
+ ast |= TDAI(TDA_ALRM);
PROC_ITIMUNLOCK(p);
}
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
PROC_ITIMLOCK(p);
if (itimerdecr(&pstats->p_timer[ITIMER_PROF],
tick * cnt) == 0)
- flags |= TDF_PROFPEND | TDF_ASTPENDING;
+ ast |= TDAI(TDA_PROF);
PROC_ITIMUNLOCK(p);
}
- if (flags != 0) {
- thread_lock(td);
- td->td_flags |= flags;
- thread_unlock(td);
- }
+ if (ast != 0)
+ ast_sched_mask(td, ast);
}
void
diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c
--- a/sys/kern/kern_event.c
+++ b/sys/kern/kern_event.c
@@ -1782,8 +1782,8 @@
KQ_UNLOCK(kq);
}
-void
-kqueue_drain_schedtask(void)
+static void
+ast_kqueue(struct thread *td, int tda __unused)
{
taskqueue_quiesce(taskqueue_kqueue_ctx);
}
@@ -1791,8 +1791,6 @@
static void
kqueue_schedtask(struct kqueue *kq)
{
- struct thread *td;
-
KQ_OWNED(kq);
KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
("scheduling kqueue task while draining"));
@@ -1800,10 +1798,7 @@
if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task);
kq->kq_state |= KQ_TASKSCHED;
- td = curthread;
- thread_lock(td);
- td->td_flags |= TDF_ASTPENDING | TDF_KQTICKLED;
- thread_unlock(td);
+ ast_sched(curthread, TDA_KQUEUE);
}
}
@@ -2813,6 +2808,7 @@
knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, 0);
+ ast_register(TDA_KQUEUE, ASTR_ASTF_REQUIRED, 0, ast_kqueue);
}
SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -254,7 +254,7 @@
/*
* Deref SU mp, since the thread does not return to userspace.
*/
- td_softdep_cleanup(td);
+ ast_kclear(td);
/*
* MUST abort all other threads before proceeding past here.
@@ -405,13 +405,6 @@
pdescfree(td);
fdescfree(td);
- /*
- * If this thread tickled GEOM, we need to wait for the giggling to
- * stop before we return to userland
- */
- if (td->td_pflags & TDP_GEOM)
- g_waitidle();
-
/*
* Remove ourself from our leader's peer list and wake our leader.
*/
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -499,7 +499,7 @@
* to avoid calling thread_lock() again.
*/
if ((fr->fr_flags & RFPPWAIT) != 0)
- td->td_flags |= TDF_ASTPENDING;
+ ast_sched_locked(td, TDA_VFORK);
thread_unlock(td);
/*
@@ -814,8 +814,8 @@
}
}
-void
-fork_rfppwait(struct thread *td)
+static void
+ast_vfork(struct thread *td, int tda __unused)
{
struct proc *p, *p2;
@@ -1181,3 +1181,11 @@
ktrsysret(SYS_fork, 0, 0);
#endif
}
+
+static void
+fork_init(void *arg __unused)
+{
+ ast_register(TDA_VFORK, ASTR_ASTF_REQUIRED | ASTR_TDP, TDP_RFPPWAIT,
+ ast_vfork);
+}
+SYSINIT(fork, SI_SUB_INTRINSIC, SI_ORDER_ANY, fork_init, NULL);
diff --git a/sys/kern/kern_ktrace.c b/sys/kern/kern_ktrace.c
--- a/sys/kern/kern_ktrace.c
+++ b/sys/kern/kern_ktrace.c
@@ -209,6 +209,12 @@
KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
}
+static void
+ast_ktrace(struct thread *td, int tda __unused)
+{
+ KTRUSERRET(td);
+}
+
static void
ktrace_init(void *dummy)
{
@@ -223,6 +229,8 @@
M_ZERO);
STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
}
+ ast_register(TDA_KTRACE, ASTR_UNCOND, 0, ast_ktrace);
+
}
SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
@@ -370,9 +378,7 @@
mtx_lock(&ktrace_mtx);
STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
mtx_unlock(&ktrace_mtx);
- thread_lock(td);
- td->td_flags |= TDF_ASTPENDING;
- thread_unlock(td);
+ ast_sched(td, TDA_KTRACE);
}
/*
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -246,8 +246,7 @@
#endif
/* Free all OSD associated to this thread. */
osd_thread_exit(td);
- td_softdep_cleanup(td);
- MPASS(td->td_su == NULL);
+ ast_kclear(td);
/* Make sure all thread destructors are executed */
EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
diff --git a/sys/kern/kern_racct.c b/sys/kern/kern_racct.c
--- a/sys/kern/kern_racct.c
+++ b/sys/kern/kern_racct.c
@@ -1098,9 +1098,14 @@
RACCT_UNLOCK();
}
-void
-racct_proc_throttled(struct proc *p)
+static void
+ast_racct(struct thread *td, int tda __unused)
{
+ struct proc *p;
+
+ p = td->td_proc;
+ if (!racct_enable || p->p_throttled == 0)
+ return;
ASSERT_RACCT_ENABLED();
@@ -1144,24 +1149,24 @@
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
- td->td_flags |= TDF_ASTPENDING;
+ ast_sched_locked(td, TDA_RACCT);
switch (TD_GET_STATE(td)) {
case TDS_RUNQ:
/*
* If the thread is on the scheduler run-queue, we can
* not just remove it from there. So we set the flag
- * TDF_NEEDRESCHED for the thread, so that once it is
+ * TDA_SCHED for the thread, so that once it is
* running, it is taken off the cpu as soon as possible.
*/
- td->td_flags |= TDF_NEEDRESCHED;
+ ast_sched_locked(td, TDA_SCHED);
break;
case TDS_RUNNING:
/*
* If the thread is running, we request a context
- * switch for it by setting the TDF_NEEDRESCHED flag.
+ * switch for it by setting the TDA_SCHED flag.
*/
- td->td_flags |= TDF_NEEDRESCHED;
+ ast_sched_locked(td, TDA_SCHED);
#ifdef SMP
cpuid = td->td_oncpu;
if ((cpuid != NOCPU) && (td != curthread))
@@ -1355,6 +1360,8 @@
racct_zone = uma_zcreate("racct", sizeof(struct racct),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+ ast_register(TDA_RACCT, ASTR_ASTF_REQUIRED, 0, ast_racct);
+
/*
* XXX: Move this somewhere.
*/
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -116,6 +116,7 @@
static int filt_signal(struct knote *kn, long hint);
static struct thread *sigtd(struct proc *p, int sig, bool fast_sigblock);
static void sigqueue_start(void);
+static void sigfastblock_setpend(struct thread *td, bool resched);
static uma_zone_t ksiginfo_zone = NULL;
struct filterops sig_filtops = {
@@ -272,7 +273,79 @@
for (int32_t __i = -1, __bits = 0; \
_SIG_FOREACH_ADVANCE(i, set); ) \
-sigset_t fastblock_mask;
+static sigset_t fastblock_mask;
+
+static void
+ast_sig(struct thread *td, int tda)
+{
+ struct proc *p;
+ int sig;
+ bool resched_sigs;
+
+ p = td->td_proc;
+
+#ifdef DIAGNOSTIC
+ if (p->p_numthreads == 1 && (tda & TDAI2(TDA_SIG, TDA_AST)) == 0) {
+ PROC_LOCK(p);
+ thread_lock(td);
+ /*
+ * Note that TDA_SIG should be re-read from
+ * td_ast, since signal might have been delivered
+ * after we cleared td_flags above. This is one of
+ * the reason for looping check for AST condition.
+ * See comment in userret() about P_PPWAIT.
+ */
+ if ((p->p_flag & P_PPWAIT) == 0 &&
+ (td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
+ if (SIGPENDING(td) && ((tda | td->td_ast) &
+ TDAI2(TDA_SIG, TDA_AST)) == 0) {
+ thread_unlock(td); /* fix dumps */
+ panic(
+ "failed2 to set signal flags for ast p %p "
+ "td %p tda %#x td_ast %#x fl %#x",
+ p, td, tda, td->td_ast, td->td_flags);
+ }
+ }
+ thread_unlock(td);
+ PROC_UNLOCK(p);
+ }
+#endif
+
+ /*
+ * Check for signals. Unlocked reads of p_pendingcnt or
+ * p_siglist might cause process-directed signal to be handled
+ * later.
+ */
+ if ((tda & TDA_SIG) != 0 || p->p_pendingcnt > 0 ||
+ !SIGISEMPTY(p->p_siglist)) {
+ sigfastblock_fetch(td);
+ PROC_LOCK(p);
+ mtx_lock(&p->p_sigacts->ps_mtx);
+ while ((sig = cursig(td)) != 0) {
+ KASSERT(sig >= 0, ("sig %d", sig));
+ postsig(sig);
+ }
+ mtx_unlock(&p->p_sigacts->ps_mtx);
+ PROC_UNLOCK(p);
+ resched_sigs = true;
+ } else {
+ resched_sigs = false;
+ }
+
+ /*
+ * Handle deferred update of the fast sigblock value, after
+ * the postsig() loop was performed.
+ */
+ sigfastblock_setpend(td, resched_sigs);
+}
+
+static void
+ast_sigsuspend(struct thread *td, int tda __unused)
+{
+ MPASS((td->td_pflags & TDP_OLDMASK) != 0);
+ td->td_pflags &= ~TDP_OLDMASK;
+ kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0);
+}
static void
sigqueue_start(void)
@@ -285,6 +358,9 @@
p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
SIGFILLSET(fastblock_mask);
SIG_CANTMASK(fastblock_mask);
+ ast_register(TDA_SIG, ASTR_UNCOND, 0, ast_sig);
+ ast_register(TDA_SIGSUSPEND, ASTR_ASTF_REQUIRED | ASTR_TDP,
+ TDP_OLDMASK, ast_sigsuspend);
}
ksiginfo_t *
@@ -644,11 +720,8 @@
PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
- if (SIGPENDING(td)) {
- thread_lock(td);
- td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
- thread_unlock(td);
- }
+ if (SIGPENDING(td))
+ ast_sched(td, TDA_SIG);
}
/*
@@ -2587,7 +2660,7 @@
wakeup_swapper = 0;
FOREACH_THREAD_IN_PROC(p, td2) {
thread_lock(td2);
- td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
+ ast_sched_locked(td2, TDA_SUSPEND);
if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
(td2->td_flags & TDF_SINTR)) {
if (td2->td_flags & TDF_SBDRY) {
@@ -2608,7 +2681,7 @@
thread_suspend_one(td2);
} else if (!TD_IS_SUSPENDED(td2)) {
if (sending || td != td2)
- td2->td_flags |= TDF_ASTPENDING;
+ ast_sched_locked(td2, TDA_AST);
#ifdef SMP
if (TD_IS_RUNNING(td2) && td2 != td)
forward_signal(td2);
@@ -3268,7 +3341,7 @@
p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
- if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
+ if ((td->td_ast & TDAI(TDA_SUSPEND)) == 0)
return (0);
ret = thread_suspend_check(1);
@@ -3286,7 +3359,7 @@
p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
- if ((td->td_flags & TDF_NEEDSIGCHK) == 0)
+ if ((td->td_ast & TDAI(TDA_SIG)) == 0)
return (0);
ps = p->p_sigacts;
@@ -3332,7 +3405,7 @@
int ret;
td = curthread;
- if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0)
+ if ((td->td_ast & TDAI2(TDA_SIG, TDA_SUSPEND)) == 0)
return (0);
p = td->td_proc;
@@ -3354,7 +3427,7 @@
bool res;
td = curthread;
- if ((td->td_flags & TDF_NEEDSIGCHK) == 0)
+ if ((td->td_ast & TDAI(TDA_SIG)) == 0)
return (false);
p = td->td_proc;
@@ -4224,9 +4297,7 @@
reschedule_signals(p, td->td_sigmask, 0);
PROC_UNLOCK(p);
}
- thread_lock(td);
- td->td_flags |= TDF_ASTPENDING | TDF_NEEDSIGCHK;
- thread_unlock(td);
+ ast_sched(td, TDA_SIG);
}
int
@@ -4379,7 +4450,7 @@
}
}
-void
+static void
sigfastblock_setpend(struct thread *td, bool resched)
{
struct proc *p;
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -632,11 +632,27 @@
loadav, NULL, C_DIRECT_EXEC | C_PREL(32));
}
-/* ARGSUSED */
static void
-synch_setup(void *dummy)
+ast_scheduler(struct thread *td, int tda __unused)
+{
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_CSW))
+ ktrcsw(1, 1, __func__);
+#endif
+ thread_lock(td);
+ sched_prio(td, td->td_user_pri);
+ mi_switch(SW_INVOL | SWT_NEEDRESCHED);
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_CSW))
+ ktrcsw(0, 1, __func__);
+#endif
+}
+
+static void
+synch_setup(void *dummy __unused)
{
callout_init(&loadav_callout, 1);
+ ast_register(TDA_SCHED, ASTR_ASTF_REQUIRED, 0, ast_scheduler);
/* Kick off timeout driven events by calling first time. */
loadav(NULL);
diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c
--- a/sys/kern/kern_thr.c
+++ b/sys/kern/kern_thr.c
@@ -257,7 +257,7 @@
sched_fork_thread(td, newtd);
thread_unlock(td);
if (P_SHOULDSTOP(p))
- newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
+ ast_sched(newtd, TDA_SUSPEND);
if (p->p_ptevents & PTRACE_LWP)
newtd->td_dbgflags |= TDB_BORN;
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -87,11 +87,11 @@
#ifdef __amd64__
_Static_assert(offsetof(struct thread, td_flags) == 0x108,
"struct thread KBI td_flags");
-_Static_assert(offsetof(struct thread, td_pflags) == 0x110,
+_Static_assert(offsetof(struct thread, td_pflags) == 0x114,
"struct thread KBI td_pflags");
-_Static_assert(offsetof(struct thread, td_frame) == 0x4a8,
+_Static_assert(offsetof(struct thread, td_frame) == 0x4b0,
"struct thread KBI td_frame");
-_Static_assert(offsetof(struct thread, td_emuldata) == 0x6b0,
+_Static_assert(offsetof(struct thread, td_emuldata) == 0x6c0,
"struct thread KBI td_emuldata");
_Static_assert(offsetof(struct proc, p_flag) == 0xb8,
"struct proc KBI p_flag");
@@ -406,8 +406,7 @@
#endif
/* Free all OSD associated to this thread. */
osd_thread_exit(td);
- td_softdep_cleanup(td);
- MPASS(td->td_su == NULL);
+ ast_kclear(td);
seltdfini(td);
}
@@ -479,6 +478,21 @@
thread_link(td, p);
}
+static void
+ast_suspend(struct thread *td, int tda __unused)
+{
+ struct proc *p;
+
+ p = td->td_proc;
+ /*
+ * We need to check to see if we have to exit or wait due to a
+ * single threading requirement or some other STOP condition.
+ */
+ PROC_LOCK(p);
+ thread_suspend_check(0);
+ PROC_UNLOCK(p);
+}
+
extern int max_threads_per_proc;
/*
@@ -544,6 +558,7 @@
callout_init(&thread_reap_callout, 1);
callout_reset(&thread_reap_callout, 5 * hz,
thread_reap_callout_cb, NULL);
+ ast_register(TDA_SUSPEND, ASTR_ASTF_REQUIRED, 0, ast_suspend);
}
/*
@@ -1246,7 +1261,7 @@
if (td2 == td)
continue;
thread_lock(td2);
- td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
+ ast_sched_locked(td2, TDA_SUSPEND);
if (TD_IS_INHIBITED(td2)) {
wakeup_swapper |= weed_inhib(mode, td2, p);
#ifdef SMP
@@ -1492,10 +1507,10 @@
int error;
/*
- * The check for TDF_NEEDSUSPCHK is racy, but it is enough to
+ * The check for TDA_SUSPEND is racy, but it is enough to
* eventually break the lockstep loop.
*/
- if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
+ if ((td->td_ast & TDAI(TDA_SUSPEND)) == 0)
return (0);
error = 0;
p = td->td_proc;
@@ -1526,7 +1541,7 @@
}
PROC_UNLOCK(p);
thread_lock(td);
- td->td_flags &= ~TDF_NEEDSUSPCHK;
+ ast_unsched_locked(td, TDA_SUSPEND);
TD_SET_SUSPENDED(td);
sched_sleep(td, 0);
PROC_SUNLOCK(p);
@@ -1547,7 +1562,7 @@
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
p->p_suspcount++;
- td->td_flags &= ~TDF_NEEDSUSPCHK;
+ ast_unsched_locked(td, TDA_SUSPEND);
TD_SET_SUSPENDED(td);
sched_sleep(td, 0);
}
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -874,7 +874,8 @@
if (td->td_lock == TDQ_LOCKPTR(tdq) &&
(td->td_flags & TDF_IDLETD) == 0 &&
THREAD_CAN_MIGRATE(td)) {
- td->td_flags |= TDF_NEEDRESCHED | TDF_PICKCPU;
+ td->td_flags |= TDF_PICKCPU;
+ ast_sched_locked(td, TDA_SCHED);
if (high != curcpu)
ipi_cpu(high, IPI_AST);
}
@@ -1998,7 +1999,7 @@
if (td->td_priority > td->td_user_pri)
sched_prio(td, td->td_user_pri);
else if (td->td_priority != td->td_user_pri)
- td->td_flags |= TDF_NEEDRESCHED;
+ ast_sched_locked(td, TDA_SCHED);
}
/*
@@ -2211,7 +2212,8 @@
td->td_lastcpu = td->td_oncpu;
preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
(flags & SW_PREEMPT) != 0;
- td->td_flags &= ~(TDF_NEEDRESCHED | TDF_PICKCPU | TDF_SLICEEND);
+ td->td_flags &= ~(TDF_PICKCPU | TDF_SLICEEND);
+ ast_unsched_locked(td, TDA_SCHED);
td->td_owepreempt = 0;
atomic_store_char(&tdq->tdq_owepreempt, 0);
if (!TD_IS_IDLETHREAD(td))
@@ -2644,8 +2646,10 @@
SCHED_STAT_INC(ithread_demotions);
sched_prio(td, td->td_base_pri + RQ_PPQ);
}
- } else
- td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
+ } else {
+ ast_sched_locked(td, TDA_SCHED);
+ td->td_flags |= TDF_SLICEEND;
+ }
}
}
@@ -2720,7 +2724,7 @@
cpri = ctd->td_priority;
if (pri < cpri)
- ctd->td_flags |= TDF_NEEDRESCHED;
+ ast_sched_locked(ctd, TDA_SCHED);
if (KERNEL_PANICKED() || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
return;
if (!sched_shouldpreempt(pri, cpri, 0))
@@ -2892,7 +2896,7 @@
* target thread is not running locally send an ipi to force
* the issue.
*/
- td->td_flags |= TDF_NEEDRESCHED;
+ ast_sched_locked(td, TDA_SCHED);
if (td != curthread)
ipi_cpu(ts->ts_cpu, IPI_PREEMPT);
#endif
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -130,9 +130,7 @@
td->td_profil_addr = pc;
td->td_profil_ticks = ticks;
td->td_pflags |= TDP_OWEUPC;
- thread_lock(td);
- td->td_flags |= TDF_ASTPENDING;
- thread_unlock(td);
+ ast_sched(td, TDA_OWEUPC);
}
/*
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c
--- a/sys/kern/subr_sleepqueue.c
+++ b/sys/kern/subr_sleepqueue.c
@@ -452,7 +452,7 @@
* thread. If not, we can switch immediately.
*/
thread_lock(td);
- if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0)
+ if ((td->td_ast & TDAI2(TDA_SIG, TDA_SUSPEND)) == 0)
return (0);
thread_unlock(td);
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -186,7 +186,7 @@
int id;
/*
- * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
+ * signotify() has already set TDA_AST and TDA_SIG on td_ast for
* this thread, so all we need to do is poke it if it is currently
* executing so that it executes ast().
*/
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -4,7 +4,7 @@
* Copyright (C) 1994, David Greenman
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
- * Copyright (c) 2007 The FreeBSD Foundation
+ * Copyright (c) 2007, 2022 The FreeBSD Foundation
*
* This code is derived from software contributed to Berkeley by
* the University of Utah, and William Jolitz.
@@ -47,35 +47,22 @@
__FBSDID("$FreeBSD$");
#include "opt_hwpmc_hooks.h"
-#include "opt_ktrace.h"
-#include "opt_sched.h"
#include <sys/param.h>
-#include <sys/bus.h>
-#include <sys/capsicum.h>
-#include <sys/event.h>
#include <sys/kernel.h>
+#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/msan.h>
#include <sys/mutex.h>
-#include <sys/pmckern.h>
#include <sys/proc.h>
#include <sys/ktr.h>
-#include <sys/ptrace.h>
-#include <sys/racct.h>
#include <sys/resourcevar.h>
#include <sys/sched.h>
-#include <sys/signalvar.h>
#include <sys/syscall.h>
#include <sys/syscallsubr.h>
#include <sys/sysent.h>
#include <sys/systm.h>
#include <sys/vmmeter.h>
-#ifdef KTRACE
-#include <sys/uio.h>
-#include <sys/ktrace.h>
-#endif
-#include <security/audit/audit.h>
#include <machine/cpu.h>
@@ -87,10 +74,6 @@
#include <sys/pmckern.h>
#endif
-#include <security/mac/mac_framework.h>
-
-void (*softdep_ast_cleanup)(struct thread *);
-
/*
* Define the code needed before returning to user mode, for trap and
* syscall.
@@ -119,13 +102,13 @@
thread_lock(td);
if ((p->p_flag & P_PPWAIT) == 0 &&
(td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
- if (SIGPENDING(td) && (td->td_flags &
- (TDF_NEEDSIGCHK | TDF_ASTPENDING)) !=
- (TDF_NEEDSIGCHK | TDF_ASTPENDING)) {
+ if (SIGPENDING(td) && (td->td_ast &
+ TDAI2(TDA_AST, TDA_SIG)) == 0) {
thread_unlock(td);
panic(
- "failed to set signal flags for ast p %p td %p fl %x",
- p, td, td->td_flags);
+ "failed to set signal flags for ast p %p "
+ "td %p td_ast %#x fl %#x",
+ p, td, td->td_ast, td->td_flags);
}
}
thread_unlock(td);
@@ -205,181 +188,171 @@
#endif
}
-/*
- * Process an asynchronous software trap.
- * This is relatively easy.
- * This function will return with preemption disabled.
- */
+static void
+ast_prep(struct thread *td, int tda __unused)
+{
+ VM_CNT_INC(v_trap);
+ td->td_pticks = 0;
+ if (td->td_cowgen != atomic_load_int(&td->td_proc->p_cowgen))
+ thread_cow_update(td);
+
+}
+
+struct ast_entry {
+ int ae_flags;
+ int ae_tdp;
+ void (*ae_f)(struct thread *td, int ast);
+};
+
+_Static_assert(TDAI(TDA_MAX) <= UINT_MAX, "Too many ASTs");
+
+static struct ast_entry ast_entries[TDA_MAX] __read_mostly = {
+ [TDA_AST] = { .ae_f = ast_prep, .ae_flags = ASTR_UNCOND},
+};
+
void
-ast(struct trapframe *framep)
+ast_register(int ast, int flags, int tdp,
+ void (*f)(struct thread *, int asts))
{
- struct thread *td;
- struct proc *p;
- int flags, sig;
- bool resched_sigs;
+ struct ast_entry *ae;
+
+ MPASS(ast < TDA_MAX);
+ MPASS((flags & ASTR_TDP) == 0 || ((flags & ASTR_ASTF_REQUIRED) != 0
+ && __bitcount(tdp) == 1));
+ ae = &ast_entries[ast];
+ MPASS(ae->ae_f == NULL);
+ ae->ae_flags = flags;
+ ae->ae_tdp = tdp;
+ atomic_interrupt_fence();
+ ae->ae_f = f;
+}
- kmsan_mark(framep, sizeof(*framep), KMSAN_STATE_INITED);
+void
+ast_deregister(int ast)
+{
+ struct ast_entry *ae;
+
+ MPASS(ast < TDA_MAX);
+ ae = &ast_entries[ast];
+ MPASS(ae->ae_f != NULL);
+ ae->ae_f = NULL;
+ atomic_interrupt_fence();
+ ae->ae_flags = 0;
+ ae->ae_tdp = 0;
+}
- td = curthread;
- p = td->td_proc;
+void
+ast_sched_locked(struct thread *td, int tda)
+{
+ THREAD_LOCK_ASSERT(td, MA_OWNED);
+ MPASS(tda < TDA_MAX);
- CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid,
- p->p_comm);
- KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
- WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode");
- mtx_assert(&Giant, MA_NOTOWNED);
- THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
- td->td_frame = framep;
- td->td_pticks = 0;
+ td->td_ast |= TDAI(tda);
+}
- /*
- * This updates the td_flag's for the checks below in one
- * "atomic" operation with turning off the astpending flag.
- * If another AST is triggered while we are handling the
- * AST's saved in flags, the astpending flag will be set and
- * ast() will be called again.
- */
+void
+ast_unsched_locked(struct thread *td, int tda)
+{
+ THREAD_LOCK_ASSERT(td, MA_OWNED);
+ MPASS(tda < TDA_MAX);
+
+ td->td_ast &= ~TDAI(tda);
+}
+
+void
+ast_sched(struct thread *td, int tda)
+{
thread_lock(td);
- flags = td->td_flags;
- td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK |
- TDF_NEEDRESCHED | TDF_ALRMPEND | TDF_PROFPEND | TDF_MACPEND |
- TDF_KQTICKLED);
+ ast_sched_locked(td, tda);
thread_unlock(td);
- VM_CNT_INC(v_trap);
-
- if (td->td_cowgen != atomic_load_int(&p->p_cowgen))
- thread_cow_update(td);
- if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) {
- addupc_task(td, td->td_profil_addr, td->td_profil_ticks);
- td->td_profil_ticks = 0;
- td->td_pflags &= ~TDP_OWEUPC;
- }
-#ifdef HWPMC_HOOKS
- /* Handle Software PMC callchain capture. */
- if (PMC_IS_PENDING_CALLCHAIN(td))
- PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_USER_CALLCHAIN_SOFT, (void *) framep);
-#endif
- if ((td->td_pflags & TDP_RFPPWAIT) != 0)
- fork_rfppwait(td);
- if (flags & TDF_ALRMPEND) {
- PROC_LOCK(p);
- kern_psignal(p, SIGVTALRM);
- PROC_UNLOCK(p);
- }
- if (flags & TDF_PROFPEND) {
- PROC_LOCK(p);
- kern_psignal(p, SIGPROF);
- PROC_UNLOCK(p);
- }
-#ifdef MAC
- if (flags & TDF_MACPEND)
- mac_thread_userret(td);
-#endif
- if (flags & TDF_NEEDRESCHED) {
-#ifdef KTRACE
- if (KTRPOINT(td, KTR_CSW))
- ktrcsw(1, 1, __func__);
-#endif
- thread_lock(td);
- sched_prio(td, td->td_user_pri);
- mi_switch(SW_INVOL | SWT_NEEDRESCHED);
-#ifdef KTRACE
- if (KTRPOINT(td, KTR_CSW))
- ktrcsw(0, 1, __func__);
-#endif
- }
+}
- td_softdep_cleanup(td);
- MPASS(td->td_su == NULL);
+void
+ast_sched_mask(struct thread *td, int ast)
+{
+ thread_lock(td);
+ td->td_ast |= ast;
+ thread_unlock(td);
+}
- /*
- * If this thread tickled GEOM, we need to wait for the giggling to
- * stop before we return to userland
- */
- if (__predict_false(td->td_pflags & TDP_GEOM))
- g_waitidle();
+static bool
+ast_handler_calc_tdp_run(struct thread *td, const struct ast_entry *ae)
+{
+ return ((ae->ae_flags & ASTR_TDP) == 0 ||
+ (td->td_pflags & ae->ae_tdp) != 0);
+}
-#ifdef DIAGNOSTIC
- if (p->p_numthreads == 1 && (flags & TDF_NEEDSIGCHK) == 0) {
- PROC_LOCK(p);
- thread_lock(td);
- /*
- * Note that TDF_NEEDSIGCHK should be re-read from
- * td_flags, since signal might have been delivered
- * after we cleared td_flags above. This is one of
- * the reason for looping check for AST condition.
- * See comment in userret() about P_PPWAIT.
- */
- if ((p->p_flag & P_PPWAIT) == 0 &&
- (td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
- if (SIGPENDING(td) && (td->td_flags &
- (TDF_NEEDSIGCHK | TDF_ASTPENDING)) !=
- (TDF_NEEDSIGCHK | TDF_ASTPENDING)) {
- thread_unlock(td); /* fix dumps */
- panic(
- "failed2 to set signal flags for ast p %p td %p fl %x %x",
- p, td, flags, td->td_flags);
- }
- }
- thread_unlock(td);
- PROC_UNLOCK(p);
- }
-#endif
+/*
+ * Process an asynchronous software trap.
+ */
+static void
+ast_handler(struct thread *td, struct trapframe *framep)
+{
+ struct ast_entry *ae;
+ void (*f)(struct thread *td, int asts);
+ int a, td_ast;
- /*
- * Check for signals. Unlocked reads of p_pendingcnt or
- * p_siglist might cause process-directed signal to be handled
- * later.
- */
- if (flags & TDF_NEEDSIGCHK || p->p_pendingcnt > 0 ||
- !SIGISEMPTY(p->p_siglist)) {
- sigfastblock_fetch(td);
- PROC_LOCK(p);
- mtx_lock(&p->p_sigacts->ps_mtx);
- while ((sig = cursig(td)) != 0) {
- KASSERT(sig >= 0, ("sig %d", sig));
- postsig(sig);
- }
- mtx_unlock(&p->p_sigacts->ps_mtx);
- PROC_UNLOCK(p);
- resched_sigs = true;
- } else {
- resched_sigs = false;
+ if (framep != NULL) {
+ kmsan_mark(framep, sizeof(*framep), KMSAN_STATE_INITED);
+ td->td_frame = framep;
}
- if ((flags & TDF_KQTICKLED) != 0)
- kqueue_drain_schedtask();
+ CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, td->td_proc->p_pid,
+ td->td_proc->p_comm);
+ KASSERT(framep == NULL || TRAPF_USERMODE(framep),
+ ("ast in kernel mode"));
+ WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode");
+ mtx_assert(&Giant, MA_NOTOWNED);
+ THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
/*
- * Handle deferred update of the fast sigblock value, after
- * the postsig() loop was performed.
+ * This updates the td_ast for the checks below in one
+ * "atomic" operation with turning off all scheduled ast's.
+ * If another AST is triggered while we are handling the
+ * AST's saved in td_ast, the td_ast is again non-zero and
+ * ast() will be called again.
*/
- sigfastblock_setpend(td, resched_sigs);
-
-#ifdef KTRACE
- KTRUSERRET(td);
-#endif
+ thread_lock(td);
+ td_ast = td->td_ast;
+ td->td_ast = 0;
+ thread_unlock(td);
- /*
- * We need to check to see if we have to exit or wait due to a
- * single threading requirement or some other STOP condition.
- */
- if (flags & TDF_NEEDSUSPCHK) {
- PROC_LOCK(p);
- thread_suspend_check(0);
- PROC_UNLOCK(p);
+ for (a = 0; a < nitems(ast_entries); a++) {
+ ae = &ast_entries[a];
+ f = ae->ae_f;
+ if (f == NULL)
+ continue;
+ atomic_interrupt_fence();
+
+ bool run = false;
+ if (__predict_false(framep == NULL)) {
+ if ((ae->ae_flags & ASTR_KCLEAR) != 0)
+ run = ast_handler_calc_tdp_run(td, ae);
+ } else {
+ if ((ae->ae_flags & ASTR_UNCOND) != 0)
+ run = true;
+ else if ((ae->ae_flags & ASTR_ASTF_REQUIRED) != 0 &&
+ (td_ast & TDAI(a)) != 0)
+ run = ast_handler_calc_tdp_run(td, ae);
+ }
+ if (run)
+ f(td, td_ast);
}
+}
- if (td->td_pflags & TDP_OLDMASK) {
- td->td_pflags &= ~TDP_OLDMASK;
- kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0);
- }
+void
+ast_kclear(struct thread *td)
+{
+ ast_handler(td, NULL);
+}
-#ifdef RACCT
- if (__predict_false(racct_enable && p->p_throttled != 0))
- racct_proc_throttled(p);
-#endif
+void
+ast(struct trapframe *framep)
+{
+ struct thread *td;
+ td = curthread;
+ ast_handler(td, framep);
userret(td, framep);
}
diff --git a/sys/kern/sys_generic.c b/sys/kern/sys_generic.c
--- a/sys/kern/sys_generic.c
+++ b/sys/kern/sys_generic.c
@@ -1052,9 +1052,7 @@
* usermode and TDP_OLDMASK is cleared, restoring old
* sigmask.
*/
- thread_lock(td);
- td->td_flags |= TDF_ASTPENDING;
- thread_unlock(td);
+ ast_sched(td, TDA_SIGSUSPEND);
}
error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits);
return (error);
@@ -1533,9 +1531,7 @@
* usermode and TDP_OLDMASK is cleared, restoring old
* sigmask.
*/
- thread_lock(td);
- td->td_flags |= TDF_ASTPENDING;
- thread_unlock(td);
+ ast_sched(td, TDA_SIGSUSPEND);
}
seltdinit(td);
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -1072,9 +1072,7 @@
CTR2(KTR_PTRACE, "PT_SUSPEND: tid %d (pid %d)", td2->td_tid,
p->p_pid);
td2->td_dbgflags |= TDB_SUSPEND;
- thread_lock(td2);
- td2->td_flags |= TDF_NEEDSUSPCHK;
- thread_unlock(td2);
+ ast_sched(td2, TDA_SUSPEND);
break;
case PT_RESUME:
diff --git a/sys/kern/vfs_mountroot.c b/sys/kern/vfs_mountroot.c
--- a/sys/kern/vfs_mountroot.c
+++ b/sys/kern/vfs_mountroot.c
@@ -978,6 +978,7 @@
vfs_mountroot_wait(void)
{
struct root_hold_token *h;
+ struct thread *td;
struct timeval lastfail;
int curfail;
@@ -986,8 +987,9 @@
curfail = 0;
lastfail.tv_sec = 0;
ppsratecheck(&lastfail, &curfail, 1);
+ td = curthread;
while (1) {
- g_waitidle();
+ g_waitidle(td);
mtx_lock(&root_holds_mtx);
if (TAILQ_EMPTY(&root_holds)) {
mtx_unlock(&root_holds_mtx);
@@ -1004,7 +1006,7 @@
hz);
TSUNWAIT("root mount");
}
- g_waitidle();
+ g_waitidle(td);
TSEXIT();
}
@@ -1030,7 +1032,7 @@
* Note that we must wait for GEOM to finish reconfiguring itself,
* eg for geom_part(4) to finish tasting.
*/
- g_waitidle();
+ g_waitidle(curthread);
if (parse_mount_dev_present(dev))
return (0);
diff --git a/sys/security/mac_lomac/mac_lomac.c b/sys/security/mac_lomac/mac_lomac.c
--- a/sys/security/mac_lomac/mac_lomac.c
+++ b/sys/security/mac_lomac/mac_lomac.c
@@ -532,9 +532,7 @@
subj->mac_lomac.ml_rangelow = objlabel->ml_single;
subj->mac_lomac.ml_rangehigh = objlabel->ml_single;
subj->mac_lomac.ml_flags |= MAC_LOMAC_FLAG_UPDATE;
- thread_lock(curthread);
- curthread->td_flags |= TDF_ASTPENDING | TDF_MACPEND;
- thread_unlock(curthread);
+ ast_sched(curthread, TDA_MAC);
/*
* Avoid memory allocation while holding a mutex; cache the label.
@@ -594,13 +592,25 @@
}
}
+static void
+ast_mac(struct thread *td, int tda __unused)
+{
+ mac_thread_userret(td);
+}
+
/*
* Policy module operations.
*/
static void
-lomac_init(struct mac_policy_conf *conf)
+lomac_init(struct mac_policy_conf *conf __unused)
{
+ ast_register(TDA_MAC, ASTR_ASTF_REQUIRED, 0, ast_mac);
+}
+static void
+lomac_fini(struct mac_policy_conf *conf __unused)
+{
+ ast_deregister(TDA_MAC);
}
/*
@@ -2898,6 +2908,7 @@
static struct mac_policy_ops lomac_ops =
{
.mpo_init = lomac_init,
+ .mpo_destroy = lomac_fini,
.mpo_bpfdesc_check_receive = lomac_bpfdesc_check_receive,
.mpo_bpfdesc_create = lomac_bpfdesc_create,
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -263,6 +263,7 @@
/* Cleared during fork1() */
#define td_startzero td_flags
int td_flags; /* (t) TDF_* flags. */
+ int td_ast; /* (t) TDA_* indicators */
int td_inhibitors; /* (t) Why can not run. */
int td_pflags; /* (k) Private thread (TDP_*) flags. */
int td_pflags2; /* (k) Private thread (TDP2_*) flags. */
@@ -457,13 +458,13 @@
#define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */
#define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */
#define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */
-#define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */
-#define TDF_KQTICKLED 0x00001000 /* AST drain kqueue taskqueue */
+#define TDF_UNUSED1 0x00000800 /* Available */
+#define TDF_UNUSED2 0x00001000 /* Available */
#define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */
#define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */
-#define TDF_NEEDSUSPCHK 0x00008000 /* Thread may need to suspend. */
-#define TDF_NEEDRESCHED 0x00010000 /* Thread needs to yield. */
-#define TDF_NEEDSIGCHK 0x00020000 /* Thread may need signal delivery. */
+#define TDF_UNUSED3 0x00008000 /* Available */
+#define TDF_UNUSED4 0x00010000 /* Available */
+#define TDF_UNUSED5 0x00020000 /* Available */
#define TDF_NOLOAD 0x00040000 /* Ignore during load avg calculations. */
#define TDF_SERESTART 0x00080000 /* ERESTART on stop attempts. */
#define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */
@@ -474,9 +475,36 @@
#define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */
#define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */
#define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */
-#define TDF_ALRMPEND 0x10000000 /* Pending SIGVTALRM needs to be posted. */
-#define TDF_PROFPEND 0x20000000 /* Pending SIGPROF needs to be posted. */
-#define TDF_MACPEND 0x40000000 /* AST-based MAC event pending. */
+#define TDF_UNUSED6 0x10000000 /* Available */
+#define TDF_UNUSED7 0x20000000 /* Available */
+#define TDF_UNUSED8 0x40000000 /* Available */
+#define TDF_UNUSED9 0x80000000 /* Available */
+
+enum {
+ TDA_AST = 0, /* Special: call all non-flagged AST handlers */
+ TDA_OWEUPC,
+ TDA_HWPMC,
+ TDA_VFORK,
+ TDA_ALRM,
+ TDA_PROF,
+ TDA_MAC,
+ TDA_SCHED,
+ TDA_UFS,
+ TDA_GEOM,
+ TDA_KQUEUE,
+ TDA_RACCT,
+ TDA_MOD1, /* For third party use, before signals are */
+ TAD_MOD2, /* processed .. */
+ TDA_SIG,
+ TDA_KTRACE,
+ TDA_SUSPEND,
+ TDA_SIGSUSPEND,
+ TDA_MOD3, /* .. and after */
+ TAD_MOD4,
+ TDA_MAX,
+};
+#define TDAI(tda) (1U << (tda))
+#define TDAI2(tda1, tda2) (TDAI(tda1) | TDAI(tda2))
/* Userland debug flags */
#define TDB_SUSPEND 0x00000001 /* Thread is suspended by debugger */
@@ -1111,7 +1139,23 @@
int pget(pid_t pid, int flags, struct proc **pp);
+/* ast_register() flags */
+#define ASTR_ASTF_REQUIRED 0x0001 /* td_ast TDAI(TDA_X) flag set is
+ required for call */
+#define ASTR_TDP 0x0002 /* td_pflags flag set is required */
+#define ASTR_KCLEAR 0x0004 /* call me on ast_kclear() */
+#define ASTR_UNCOND 0x0008 /* call me always */
+
void ast(struct trapframe *framep);
+void ast_kclear(struct thread *td);
+void ast_register(int ast, int ast_flags, int tdp,
+ void (*f)(struct thread *td, int asts));
+void ast_deregister(int tda);
+void ast_sched_locked(struct thread *td, int tda);
+void ast_sched_mask(struct thread *td, int ast);
+void ast_sched(struct thread *td, int tda);
+void ast_unsched_locked(struct thread *td, int tda);
+
struct thread *choosethread(void);
int cr_cansee(struct ucred *u1, struct ucred *u2);
int cr_canseesocket(struct ucred *cred, struct socket *so);
@@ -1124,7 +1168,6 @@
int enterthispgrp(struct proc *p, struct pgrp *pgrp);
void faultin(struct proc *p);
int fork1(struct thread *, struct fork_req *);
-void fork_rfppwait(struct thread *);
void fork_exit(void (*)(void *, struct trapframe *), void *,
struct trapframe *);
void fork_return(struct thread *, struct trapframe *);
@@ -1296,15 +1339,6 @@
return ((struct td_sched *)&td[1]);
}
-extern void (*softdep_ast_cleanup)(struct thread *);
-static __inline void
-td_softdep_cleanup(struct thread *td)
-{
-
- if (td->td_su != NULL && softdep_ast_cleanup != NULL)
- softdep_ast_cleanup(td);
-}
-
#define PROC_ID_PID 0
#define PROC_ID_GROUP 1
#define PROC_ID_SESSION 2
diff --git a/sys/sys/racct.h b/sys/sys/racct.h
--- a/sys/sys/racct.h
+++ b/sys/sys/racct.h
@@ -196,7 +196,6 @@
void racct_proc_ucred_changed(struct proc *p, struct ucred *oldcred,
struct ucred *newcred);
void racct_move(struct racct *dest, struct racct *src);
-void racct_proc_throttled(struct proc *p);
void racct_proc_throttle(struct proc *p, int timeout);
#else
diff --git a/sys/sys/signalvar.h b/sys/sys/signalvar.h
--- a/sys/sys/signalvar.h
+++ b/sys/sys/signalvar.h
@@ -272,7 +272,6 @@
#endif
#ifdef _KERNEL
-extern sigset_t fastblock_mask;
extern bool sigfastblock_fetch_always;
/* Return nonzero if process p has an unmasked pending signal. */
@@ -406,7 +405,6 @@
int sigev_findtd(struct proc *p, struct sigevent *sigev, struct thread **);
void sigfastblock_clear(struct thread *td);
void sigfastblock_fetch(struct thread *td);
-void sigfastblock_setpend(struct thread *td, bool resched);
int sig_intr(void);
void siginit(struct proc *p);
void signotify(struct thread *td);
diff --git a/sys/sys/systm.h b/sys/sys/systm.h
--- a/sys/sys/systm.h
+++ b/sys/sys/systm.h
@@ -150,7 +150,6 @@
void *phashinit(int count, struct malloc_type *type, u_long *nentries);
void *phashinit_flags(int count, struct malloc_type *type, u_long *nentries,
int flags);
-void g_waitidle(void);
void cpu_flush_dcache(void *, size_t);
void cpu_rootconf(void);
diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c
--- a/sys/ufs/ffs/ffs_softdep.c
+++ b/sys/ufs/ffs/ffs_softdep.c
@@ -869,7 +869,7 @@
static int request_cleanup(struct mount *, int);
static int softdep_request_cleanup_flush(struct mount *, struct ufsmount *);
static void schedule_cleanup(struct mount *);
-static void softdep_ast_cleanup_proc(struct thread *);
+static void softdep_ast_cleanup_proc(struct thread *, int);
static struct ufsmount *softdep_bp_to_mp(struct buf *bp);
static int process_worklist_item(struct mount *, int, int);
static void process_removes(struct vnode *);
@@ -2546,7 +2546,8 @@
bioops.io_complete = softdep_disk_write_complete;
bioops.io_deallocate = softdep_deallocate_dependencies;
bioops.io_countdeps = softdep_count_dependencies;
- softdep_ast_cleanup = softdep_ast_cleanup_proc;
+ ast_register(TDA_UFS, ASTR_KCLEAR | ASTR_ASTF_REQUIRED, 0,
+ softdep_ast_cleanup_proc);
/* Initialize the callout with an mtx. */
callout_init_mtx(&softdep_callout, &lk, 0);
@@ -2565,7 +2566,7 @@
bioops.io_complete = NULL;
bioops.io_deallocate = NULL;
bioops.io_countdeps = NULL;
- softdep_ast_cleanup = NULL;
+ ast_deregister(TDA_UFS);
callout_drain(&softdep_callout);
}
@@ -13818,13 +13819,11 @@
vfs_rel(td->td_su);
vfs_ref(mp);
td->td_su = mp;
- thread_lock(td);
- td->td_flags |= TDF_ASTPENDING;
- thread_unlock(td);
+ ast_sched(td, TDA_UFS);
}
static void
-softdep_ast_cleanup_proc(struct thread *td)
+softdep_ast_cleanup_proc(struct thread *td, int ast __unused)
{
struct mount *mp;
struct ufsmount *ump;

File Metadata

Mime Type
text/plain
Expires
Tue, Jan 27, 5:17 PM (10 h, 42 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28058630
Default Alt Text
D35888.id108493.diff (48 KB)

Event Timeline