Index: sys/amd64/amd64/stack_machdep.c =================================================================== --- sys/amd64/amd64/stack_machdep.c +++ sys/amd64/amd64/stack_machdep.c @@ -33,6 +33,7 @@ #include #include +#include #include #include @@ -78,6 +79,18 @@ } void +stack_save_inpanic(struct stack *st, struct thread *td) +{ +#ifdef SMP + const struct pcb *pcb = &stoppcbs[td->td_oncpu]; +#else + const struct pcb *pcb = td->td_pcb; +#endif + + stack_capture(st, pcb->pcb_rbp); +} + +void stack_save(struct stack *st) { register_t rbp; Index: sys/kern/kern_clock.c =================================================================== --- sys/kern/kern_clock.c +++ sys/kern/kern_clock.c @@ -41,6 +41,7 @@ #include "opt_device_polling.h" #include "opt_hwpmc_hooks.h" #include "opt_ntp.h" +#include "opt_stack.h" #include "opt_watchdog.h" #include @@ -69,6 +70,10 @@ #include #include +#ifdef STACK +#include +#endif + #ifdef GPROF #include #endif @@ -563,6 +568,13 @@ } #endif /* SW_WATCHDOG */ } + +#if defined(STACK) + /* Call into the stack capturing code if it's interested. */ + if (__predict_false(stack_hardclock_interested)) + stack_hardclock(); +#endif /* STACK */ + if (curcpu == CPU_FIRST()) cpu_tick_calibration(); } Index: sys/kern/kern_proc.c =================================================================== --- sys/kern/kern_proc.c +++ sys/kern/kern_proc.c @@ -2398,18 +2398,18 @@ bzero(kkstp, sizeof(*kkstp)); (void)sbuf_new(&sb, kkstp->kkst_trace, sizeof(kkstp->kkst_trace), SBUF_FIXEDLEN); - thread_lock(td); kkstp->kkst_tid = td->td_tid; - if (TD_IS_SWAPPED(td)) + PROC_UNLOCK(p); + error = stack_save_thread(st, td); + if (error == EFAULT) kkstp->kkst_state = KKST_STATE_SWAPPED; - else if (TD_IS_RUNNING(td)) - kkstp->kkst_state = KKST_STATE_RUNNING; - else { + else if (error == 0) kkstp->kkst_state = KKST_STATE_STACKOK; - stack_save_td(st, td); - } - thread_unlock(td); - PROC_UNLOCK(p); + else + KASSERT(0, ("unexpected error %d", error)); + /* Handle serialization window. */ + if (td->td_proc != p || td->td_tid != kkstp->kkst_tid) + continue; stack_sbuf_print(&sb, st); sbuf_finish(&sb); sbuf_delete(&sb); Index: sys/kern/subr_stack.c =================================================================== --- sys/kern/subr_stack.c +++ sys/kern/subr_stack.c @@ -35,7 +35,10 @@ #include #endif #include +#include #include +#include +#include #include #include #include @@ -275,3 +278,151 @@ *name = "??"; return (ENOENT); } + +struct stack_save_context { + TAILQ_ENTRY(stack_save_context) sc_entry; + struct thread *sc_td; + struct stack *sc_st; + unsigned int sc_hardclock_cpuid; + uint8_t sc_hardclock_processed; + + /* + * Getting a stack of a running thread requires using the + * hardclock interrupt, so we may schedule it and miss. + */ + uint8_t sc_done; +}; + +/** + * When this is set, hardclock() calls stack_hardclock(). + */ +int stack_hardclock_interested = 0; + +/** + * A list of stack requests to be handled by stack_hardclock(). + */ +static TAILQ_HEAD(, stack_save_context) stack_hardclock_list = + TAILQ_HEAD_INITIALIZER(stack_hardclock_list); + +static struct mtx stack_hardclock_lock; +MTX_SYSINIT(stack_hardclock_lock, &stack_hardclock_lock, + "stack_hardclock_lock", MTX_SPIN); + +/** + * Process the entries in the list for the current CPU. If the thread + * whose stack is requested is running on the CPU, grab its stack. In + * any case, remove the entries for the current CPU from the list and + * wake up the stack_get_running(). + */ +void +stack_hardclock(void) +{ + struct stack_save_context *ctx, *ctx_next; + const unsigned int cpuid = PCPU_GET(cpuid); + struct thread *td = curthread; + + mtx_lock_spin(&stack_hardclock_lock); + + TAILQ_FOREACH_SAFE(ctx, &stack_hardclock_list, sc_entry, ctx_next) { + if (ctx->sc_hardclock_cpuid != cpuid) + continue; + + TAILQ_REMOVE(&stack_hardclock_list, ctx, sc_entry); + ctx->sc_hardclock_processed = 1; + if (ctx->sc_td == td) { + stack_save(ctx->sc_st); + ctx->sc_done = 1; + } + wakeup(ctx); + } + + stack_hardclock_interested = !TAILQ_EMPTY(&stack_hardclock_list); + + mtx_unlock_spin(&stack_hardclock_lock); +} + +/** + * If the thread is running on another CPU, add it to the hardclock + * list which will be processed during the clock interrupt. Return + * true if we got the stack, false otherwise. + */ +static int +stack_save_running(struct stack *st, struct thread *td) +{ + struct stack_save_context ctx; + + bzero(&ctx, sizeof(ctx)); + ctx.sc_td = td; + ctx.sc_st = st; + ctx.sc_hardclock_cpuid = td->td_oncpu; + ctx.sc_hardclock_processed = 0; + thread_unlock(td); + + WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, + "stack_save_running(td other than curthread)"); + + mtx_lock_spin(&stack_hardclock_lock); + TAILQ_INSERT_TAIL(&stack_hardclock_list, &ctx, sc_entry); + stack_hardclock_interested = true; + while (!ctx.sc_hardclock_processed) { + /* + * If the box is panic'ing, the clock interrupt has + * been disabled. + */ + if (panicstr) { + TAILQ_REMOVE(&stack_hardclock_list, &ctx, sc_entry); + break; + } + msleep_spin(&ctx, &stack_hardclock_lock, __func__, 1); + } + mtx_unlock_spin(&stack_hardclock_lock); + + return (ctx.sc_done); +} + +/** + * stack_save_thread + * + * Save the stack of the specified thread. + * + * Return values: + * 0 - success + * EFAULT - thread is swapped out + */ +int +stack_save_thread(struct stack *st, struct thread *td) +{ + + mtx_assert(td->td_lock, MA_NOTOWNED); + + if (td == curthread) { + stack_save(st); + return (0); + } + +retry: + thread_lock(td); + if (TD_IS_RUNNING(td)) { + if (panicstr) { + stack_save_inpanic(st, td); + thread_unlock(td); + return (0); + } + + /* + * stack_save_running() drops the thread lock. + */ + if (!stack_save_running(st, td)) + goto retry; + return (0); + } + if (TD_IS_SWAPPED(td)) { + thread_unlock(td); + return (EFAULT); + } + + stack_save_td(st, td); + thread_unlock(td); + + return (0); +} Index: sys/sys/stack.h =================================================================== --- sys/sys/stack.h +++ sys/sys/stack.h @@ -61,4 +61,13 @@ void stack_save(struct stack *); void stack_save_td(struct stack *, struct thread *); +/** + * The interface to hardclock + */ +extern int stack_hardclock_interested; +void stack_hardclock(void); + +int stack_save_thread(struct stack *, struct thread *); +void stack_save_inpanic(struct stack *st, struct thread *td); + #endif