diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -1761,8 +1761,11 @@ * Walks the page tables to translate a kernel virtual address to a * physical address. Returns true if the kva is valid and stores the * physical address in pa if it is not NULL. + * + * See the comment above data_abort() for the rationale for specifying + * NO_PERTHREAD_SSP here. */ -bool +bool NO_PERTHREAD_SSP pmap_klookup(vm_offset_t va, vm_paddr_t *pa) { pt_entry_t *pte, tpte; @@ -7168,10 +7171,6 @@ /* Store the new curthread */ PCPU_SET(curthread, new); -#if defined(PERTHREAD_SSP) - /* Set the new threads SSP canary */ - __asm("msr sp_el0, %0" :: "r"(&new->td_md.md_canary)); -#endif /* And the new pcb */ pcb = new->td_pcb; diff --git a/sys/arm64/arm64/swtch.S b/sys/arm64/arm64/swtch.S --- a/sys/arm64/arm64/swtch.S +++ b/sys/arm64/arm64/swtch.S @@ -80,9 +80,17 @@ /* This returns the thread pointer so no need to save it */ bl ptrauth_switch +#ifdef PERTHREAD_SSP + mov x19, x0 +#endif /* This returns the thread pcb */ bl pmap_switch mov x4, x0 +#ifdef PERTHREAD_SSP + /* Update the per-thread stack canary pointer. */ + add x19, x19, #(TD_MD_CANARY) + msr sp_el0, x19 +#endif /* If we are single stepping, enable it */ ldr w5, [x4, #PCB_FLAGS] @@ -159,6 +167,11 @@ mov x2, x21 mov x1, x20 mov x0, x19 +#ifdef PERTHREAD_SSP + /* Update the per-thread stack canary pointer. */ + add x20, x20, #(TD_MD_CANARY) + msr sp_el0, x20 +#endif /* * Release the old thread. diff --git a/sys/arm64/arm64/trap.c b/sys/arm64/arm64/trap.c --- a/sys/arm64/arm64/trap.c +++ b/sys/arm64/arm64/trap.c @@ -242,7 +242,13 @@ panic("Unhandled EL%d external data abort", lower ? 0: 1); } -static void +/* + * It is unsafe to access the stack canary value stored in "td" until + * kernel map translation faults are handled, see the pmap_klookup() call below. + * Thus, stack-smashing detection with per-thread canaries must be disabled in + * this function. + */ +static void NO_PERTHREAD_SSP data_abort(struct thread *td, struct trapframe *frame, uint64_t esr, uint64_t far, int lower) { @@ -450,7 +456,10 @@ } #endif -void +/* + * See the comment above data_abort(). + */ +void NO_PERTHREAD_SSP do_el1h_sync(struct thread *td, struct trapframe *frame) { uint32_t exception; diff --git a/sys/arm64/include/param.h b/sys/arm64/include/param.h --- a/sys/arm64/include/param.h +++ b/sys/arm64/include/param.h @@ -109,6 +109,12 @@ #define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */ #define PCPU_PAGES 1 +#ifdef PERTHREAD_SSP +#define NO_PERTHREAD_SSP __nostackprotector +#else +#define NO_PERTHREAD_SSP +#endif + /* * Mach derived conversion macros */ diff --git a/sys/sys/cdefs.h b/sys/sys/cdefs.h --- a/sys/sys/cdefs.h +++ b/sys/sys/cdefs.h @@ -896,6 +896,16 @@ #define __nosanitizethread #endif +/* + * Make it possible to opt out of stack smashing protection. + */ +#if __has_attribute(no_stack_protector) +#define __nostackprotector __attribute__((no_stack_protector)) +#else +#define __nostackprotector \ + __attribute__((__optimize__("-fno-stack-protector"))) +#endif + /* Guard variables and structure members by lock. */ #define __guarded_by(x) __lock_annotate(guarded_by(x)) #define __pt_guarded_by(x) __lock_annotate(pt_guarded_by(x))