Index: sys/arm64/arm64/db_trace.c =================================================================== --- sys/arm64/arm64/db_trace.c +++ sys/arm64/arm64/db_trace.c @@ -123,7 +123,7 @@ } frame->fp = tf->tf_x[29]; - frame->pc = tf->tf_elr; + frame->pc = ADDR_MAKE_CANONICAL(tf->tf_elr); if (!INKERNEL(frame->fp)) break; } else { Index: sys/arm64/arm64/exception.S =================================================================== --- sys/arm64/arm64/exception.S +++ sys/arm64/arm64/exception.S @@ -73,6 +73,9 @@ blr x1 1: + ldr x0, [x18, #PC_CURTHREAD] + bl ptrauth_exit_el0 + ldr x0, [x18, #(PC_CURTHREAD)] bl dbg_monitor_enter msr daifclr, #8 /* Enable the debug exception */ @@ -97,6 +100,9 @@ mov x1, sp bl dbg_monitor_exit + ldr x0, [x18, #PC_CURTHREAD] + bl ptrauth_enter_el0 + /* Remove the SSBD (CVE-2018-3639) workaround if needed */ ldr x1, [x18, #PC_SSBD] cbz x1, 1f Index: sys/arm64/arm64/identcpu.c =================================================================== --- sys/arm64/arm64/identcpu.c +++ sys/arm64/arm64/identcpu.c @@ -653,11 +653,21 @@ MRS_FIELD_VALUE_END, }; +static struct mrs_field_hwcap id_aa64isar1_gpi_caps[] = { + MRS_HWCAP(&elf_hwcap, HWCAP_PACG, ID_AA64ISAR1_GPI_IMPL), + MRS_HWCAP_END +}; + static struct mrs_field_value id_aa64isar1_gpa[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL), MRS_FIELD_VALUE_END, }; +static struct mrs_field_hwcap id_aa64isar1_gpa_caps[] = { + MRS_HWCAP(&elf_hwcap, HWCAP_PACG, ID_AA64ISAR1_GPA_IMPL), + MRS_HWCAP_END +}; + static struct mrs_field_value id_aa64isar1_lrcpc[] = { MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_RCPC_8_3, "RCPC-8.3"), @@ -698,6 +708,11 @@ MRS_FIELD_VALUE_END, }; +static struct mrs_field_hwcap id_aa64isar1_api_caps[] = { + MRS_HWCAP(&elf_hwcap, HWCAP_PACA, ID_AA64ISAR1_API_PAC), + MRS_HWCAP_END +}; + static struct mrs_field_value id_aa64isar1_apa[] = { MRS_FIELD_VALUE(ID_AA64ISAR1_APA_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR1_APA_PAC, "APA PAC"), @@ -705,6 +720,11 @@ MRS_FIELD_VALUE_END, }; +static struct mrs_field_hwcap id_aa64isar1_apa_caps[] = { + MRS_HWCAP(&elf_hwcap, HWCAP_PACA, ID_AA64ISAR1_APA_PAC), + MRS_HWCAP_END +}; + static struct mrs_field_value id_aa64isar1_dpb[] = { MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_DCCVAP, "DCPoP"), @@ -731,16 +751,20 @@ id_aa64isar1_sb_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, FRINTTS, false, MRS_LOWER, id_aa64isar1_frintts, id_aa64isar1_frintts_caps), - MRS_FIELD(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi), - MRS_FIELD(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa), + MRS_FIELD_HWCAP(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi, + id_aa64isar1_gpi_caps), + MRS_FIELD_HWCAP(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa, + id_aa64isar1_gpa_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, LRCPC, false, MRS_LOWER, id_aa64isar1_lrcpc, id_aa64isar1_lrcpc_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, FCMA, false, MRS_LOWER, id_aa64isar1_fcma, id_aa64isar1_fcma_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, JSCVT, false, MRS_LOWER, id_aa64isar1_jscvt, id_aa64isar1_jscvt_caps), - MRS_FIELD(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api), - MRS_FIELD(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa), + MRS_FIELD_HWCAP(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api, + id_aa64isar1_api_caps), + MRS_FIELD_HWCAP(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa, + id_aa64isar1_apa_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, DPB, false, MRS_LOWER, id_aa64isar1_dpb, id_aa64isar1_dpb_caps), MRS_FIELD_END, Index: sys/arm64/arm64/locore.S =================================================================== --- sys/arm64/arm64/locore.S +++ sys/arm64/arm64/locore.S @@ -145,6 +145,16 @@ bl initarm /* We are done with the boot params */ add sp, sp, #BOOTPARAMS_SIZE + + /* + * Enable pointer authentication in the kernel. We set the keys for + * thread0 in initarm so have to wait until it returns to enable it. + * If we were to enable it in initarm then any authentication when + * returning would fail as it was called with pointer authentication + * disabled. + */ + bl ptrauth_start + bl mi_startup /* We should not get here */ @@ -225,7 +235,7 @@ ret 1: /* Configure the Hypervisor */ - mov x2, #(HCR_RW) + ldr x2, =(HCR_RW | HCR_APK | HCR_API) msr hcr_el2, x2 /* Load the Virtualization Process ID Register */ Index: sys/arm64/arm64/machdep.c =================================================================== --- sys/arm64/arm64/machdep.c +++ sys/arm64/arm64/machdep.c @@ -579,6 +579,9 @@ * Clear debug register state. It is not applicable to the new process. */ bzero(&pcb->pcb_dbg_regs, sizeof(pcb->pcb_dbg_regs)); + + /* Generate new pointer authentication keys */ + ptrauth_exec(td); } /* Sanity check these are the same size, they will be memcpy'd to and fro */ @@ -930,6 +933,7 @@ thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate; thread0.td_pcb->pcb_vfpcpu = UINT_MAX; thread0.td_frame = &proc0_tf; + ptrauth_thread0(&thread0); pcpup->pc_curpcb = thread0.td_pcb; } @@ -1345,6 +1349,13 @@ panic("Invalid bus configuration: %s", kern_getenv("kern.cfg.order")); + /* + * Check if pointer authentication is available on this system, and + * if so enable its use. This needs to be called before init_proc0 + * as that will configure the thread0 pointer authentication keys. + */ + ptrauth_init(); + /* * Dump the boot metadata. We have to wait for cninit() since console * output is required. If it's grossly incorrect the kernel will never Index: sys/arm64/arm64/mp_machdep.c =================================================================== --- sys/arm64/arm64/mp_machdep.c +++ sys/arm64/arm64/mp_machdep.c @@ -58,6 +58,7 @@ #include #include +#include #include #include #include @@ -208,6 +209,8 @@ pmap_t pmap0; u_int mpidr; + ptrauth_mp_start(cpu); + /* * Verify that the value passed in 'cpu' argument (aka context_id) is * valid. Some older U-Boot based PSCI implementations are buggy, Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -6529,7 +6529,7 @@ * to cpu_switch(). */ struct pcb * -pmap_switch(struct thread *old __unused, struct thread *new) +pmap_switch(struct thread *new) { pcpu_bp_harden bp_harden; struct pcb *pcb; Index: sys/arm64/arm64/ptrauth.c =================================================================== --- /dev/null +++ sys/arm64/arm64/ptrauth.c @@ -0,0 +1,238 @@ +/*- + * Copyright (c) 2021 The FreeBSD Foundation + * + * This software was developed by Andrew Turner under sponsorship from + * the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include + +#include +#include + +#define SCTLR_PTRAUTH (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB) + +static bool __read_mostly enable_ptrauth = false; + +/* Functions called from assembly. */ +void ptrauth_start(void); +struct thread *ptrauth_switch(struct thread *); +void ptrauth_exit_el0(struct thread *); +void ptrauth_enter_el0(struct thread *); + +void +ptrauth_init(void) +{ + uint64_t isar1; + + get_kernel_reg(ID_AA64ISAR1_EL1, &isar1); + + /* + * This assumes if there is pointer authentication on the boot CPU + * it will also be available on any non-boot CPUs. If this is ever + * not the case we will have to add a quirk. + */ + if (ID_AA64ISAR1_APA_VAL(isar1) > 0 || ID_AA64ISAR1_API_VAL(isar1) > 0) + enable_ptrauth = true; +} + +/* Copy the keys when forking a new process */ +void +ptrauth_fork(struct thread *new_td, struct thread *orig_td) +{ + if (!enable_ptrauth) + return; + + memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user, + sizeof(new_td->td_md.md_ptrauth_user)); +} + +/* Generate new userspace keys when executing a new process */ +void +ptrauth_exec(struct thread *td) +{ + if (!enable_ptrauth) + return; + + arc4rand(&td->td_md.md_ptrauth_user, sizeof(td->td_md.md_ptrauth_user), + 0); +} + +/* + * Copy the user keys when creating a new userspace thread until it's clear + * how the ABI expects the various keys to be assigned. + */ +void +ptrauth_copy_thread(struct thread *new_td, struct thread *orig_td) +{ + if (!enable_ptrauth) + return; + + memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user, + sizeof(new_td->td_md.md_ptrauth_user)); +} + +/* Generate new kernel keys when executing a new kernel thread */ +void +ptrauth_thread_alloc(struct thread *td) +{ + if (!enable_ptrauth) + return; + + arc4rand(&td->td_md.md_ptrauth_kern, sizeof(td->td_md.md_ptrauth_kern), + 0); +} + +/* + * Load the userspace keys. We can't use WRITE_SPECIALREG as we need + * to set the architecture extension. + */ +#define LOAD_KEY(space, name) \ +__asm __volatile( \ + ".arch_extension pauth \n" \ + "msr "#name"keylo_el1, %0 \n" \ + "msr "#name"keyhi_el1, %1 \n" \ + ".arch_extension nopauth \n" \ + :: "r"(td->td_md.md_ptrauth_##space.name.pa_key_lo), \ + "r"(td->td_md.md_ptrauth_##space.name.pa_key_hi)) + +void +ptrauth_thread0(struct thread *td) +{ + if (!enable_ptrauth) + return; + + /* TODO: Generate a random number here */ + memset(&td->td_md.md_ptrauth_kern, 0, + sizeof(td->td_md.md_ptrauth_kern)); + LOAD_KEY(kern, apia); + /* + * No isb as this is called before ptrauth_start so can rely on + * the instruction barrier there. + */ +} + +/* + * Enable pointer authentication. After this point userspace and the kernel + * can sign return addresses, etc. based on their keys + * + * This assumes either all or no CPUs have pointer authentication support, + * and, if supported, all CPUs have the same algorithm. + */ +void +ptrauth_start(void) +{ + uint64_t sctlr; + + if (!enable_ptrauth) + return; + + /* Enable pointer authentication */ + sctlr = READ_SPECIALREG(sctlr_el1); + sctlr |= SCTLR_PTRAUTH; + WRITE_SPECIALREG(sctlr_el1, sctlr); + isb(); +} + +#ifdef SMP +void +ptrauth_mp_start(uint64_t cpu) +{ + struct ptrauth_key start_key; + uint64_t sctlr; + + if (!enable_ptrauth) + return; + + /* + * We need a key until we call sched_throw, however we don't have + * a thread until then. Create a key just for use within + * init_secondary and whatever it calls. As init_secondary never + * returns it is safe to do so from within it. + * + * As it's only used for a short length of time just use the cpu + * as the key. + */ + start_key.pa_key_lo = cpu; + start_key.pa_key_hi = ~cpu; + + __asm __volatile( + ".arch_extension pauth \n" + "msr apiakeylo_el1, %0 \n" + "msr apiakeyhi_el1, %1 \n" + ".arch_extension nopauth \n" + :: "r"(start_key.pa_key_lo), "r"(start_key.pa_key_hi)); + + /* Enable pointer authentication */ + sctlr = READ_SPECIALREG(sctlr_el1); + sctlr |= SCTLR_PTRAUTH; + WRITE_SPECIALREG(sctlr_el1, sctlr); + isb(); +} +#endif + +struct thread * +ptrauth_switch(struct thread *td) +{ + if (enable_ptrauth) { + LOAD_KEY(kern, apia); + isb(); + } + + return (td); +} + +/* Called when we are exiting uerspace and entering the kernel */ +void +ptrauth_exit_el0(struct thread *td) +{ + if (!enable_ptrauth) + return; + + LOAD_KEY(kern, apia); + isb(); +} + +/* Called when we are about to exit the kernel and enter userspace */ +void +ptrauth_enter_el0(struct thread *td) +{ + if (!enable_ptrauth) + return; + + LOAD_KEY(user, apia); + LOAD_KEY(user, apib); + LOAD_KEY(user, apda); + LOAD_KEY(user, apdb); + LOAD_KEY(user, apga); + /* + * No isb as this is called from the exception handler so can rely + * on the eret instruction to be the needed context synchronizing event. + */ +} Index: sys/arm64/arm64/stack_machdep.c =================================================================== --- sys/arm64/arm64/stack_machdep.c +++ sys/arm64/arm64/stack_machdep.c @@ -70,7 +70,7 @@ return (EOPNOTSUPP); frame.fp = td->td_pcb->pcb_x[29]; - frame.pc = td->td_pcb->pcb_lr; + frame.pc = ADDR_MAKE_CANONICAL(td->td_pcb->pcb_lr); stack_capture(td, st, &frame); return (0); Index: sys/arm64/arm64/swtch.S =================================================================== --- sys/arm64/arm64/swtch.S +++ sys/arm64/arm64/swtch.S @@ -70,13 +70,16 @@ #ifdef VFP /* Backup the new thread pointer around a call to C code */ - mov x19, x0 - mov x20, x1 + mov x19, x1 bl vfp_discard - mov x1, x20 mov x0, x19 +#else + mov x0, x1 #endif + /* This returns the thread pointer so no need to save it */ + bl ptrauth_switch + /* This returns the thread pcb */ bl pmap_switch mov x4, x0 @@ -152,10 +155,14 @@ /* Load the pcb address */ mov x1, x4 bl vfp_save_state - mov x1, x20 - mov x0, x19 + mov x0, x20 +#else + mov x0, x1 #endif + /* This returns the thread pointer so no need to save it */ + bl ptrauth_switch + /* This returns the thread pcb */ bl pmap_switch /* Move the new pcb out of the way */ mov x4, x0 @@ -213,6 +220,16 @@ mov fp, #0 /* Stack traceback stops here. */ bl _C_LABEL(fork_exit) + /* + * Disable interrupts as we are setting userspace specific + * state that we won't handle correctly in an interrupt while + * in the kernel. + */ + msr daifset, #2 + + ldr x0, [x18, #PC_CURTHREAD] + bl ptrauth_enter_el0 + /* Restore the registers other than x0 and x1 */ ldp x2, x3, [sp, #TF_X + 2 * 8] ldp x4, x5, [sp, #TF_X + 4 * 8] @@ -229,12 +246,6 @@ ldp x26, x27, [sp, #TF_X + 26 * 8] ldp x28, x29, [sp, #TF_X + 28 * 8] - /* - * Disable interrupts to avoid - * overwriting spsr_el1 and sp_el0 by an IRQ exception. - */ - msr daifset, #2 - /* Restore sp and lr */ ldp x0, x1, [sp, #TF_SP] msr sp_el0, x0 Index: sys/arm64/arm64/trap.c =================================================================== --- sys/arm64/arm64/trap.c +++ sys/arm64/arm64/trap.c @@ -453,6 +453,12 @@ panic("No debugger in kernel."); #endif break; + case EXCP_FPAC: + /* We can see this if the authentication on PAC fails */ + print_registers(frame); + printf(" far: %16lx\n", READ_SPECIALREG(far_el1)); + panic("FPAC kernel exception"); + break; case EXCP_UNKNOWN: if (undef_insn(1, frame)) break; @@ -543,6 +549,11 @@ exception); userret(td, frame); break; + case EXCP_FPAC: + call_trapsignal(td, SIGILL, ILL_ILLOPN, (void *)frame->tf_elr, + exception); + userret(td, frame); + break; case EXCP_SP_ALIGN: call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_sp, exception); Index: sys/arm64/arm64/unwind.c =================================================================== --- sys/arm64/arm64/unwind.c +++ sys/arm64/arm64/unwind.c @@ -48,7 +48,7 @@ /* FP to previous frame (X29) */ frame->fp = ((uintptr_t *)fp)[0]; /* LR (X30) */ - frame->pc = ((uintptr_t *)fp)[1] - 4; + frame->pc = ADDR_MAKE_CANONICAL(((uintptr_t *)fp)[1] - 4); return (true); } Index: sys/arm64/arm64/vm_machdep.c =================================================================== --- sys/arm64/arm64/vm_machdep.c +++ sys/arm64/arm64/vm_machdep.c @@ -94,6 +94,8 @@ /* Clear the debug register state. */ bzero(&pcb2->pcb_dbg_regs, sizeof(pcb2->pcb_dbg_regs)); + ptrauth_fork(td2, td1); + tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1); bcopy(td1->td_frame, tf, sizeof(*tf)); tf->tf_x[0] = 0; @@ -187,6 +189,9 @@ /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_daif = td0->td_md.md_saved_daif & ~DAIF_I_MASKED; + + /* Generate new pointer authentication keys. */ + ptrauth_copy_thread(td, td0); } /* @@ -247,6 +252,7 @@ td->td_kstack_pages * PAGE_SIZE) - 1; td->td_frame = (struct trapframe *)STACKALIGN( (struct trapframe *)td->td_pcb - 1); + ptrauth_thread_alloc(td); } void Index: sys/arm64/include/armreg.h =================================================================== --- sys/arm64/include/armreg.h +++ sys/arm64/include/armreg.h @@ -222,6 +222,7 @@ #define EXCP_SVC64 0x15 /* SVC trap for AArch64 */ #define EXCP_HVC 0x16 /* HVC trap */ #define EXCP_MSR 0x18 /* MSR/MRS trap */ +#define EXCP_FPAC 0x1c /* Faulting PAC trap */ #define EXCP_INSN_ABORT_L 0x20 /* Instruction abort, from lower EL */ #define EXCP_INSN_ABORT 0x21 /* Instruction abort, from same EL */ #define EXCP_PC_ALIGN 0x22 /* PC alignment fault */ Index: sys/arm64/include/cpu.h =================================================================== --- sys/arm64/include/cpu.h +++ sys/arm64/include/cpu.h @@ -171,6 +171,17 @@ void install_cpu_errata(void); void swi_vm(void *v); +/* Pointer Authentication Code (PAC) support */ +void ptrauth_init(void); +void ptrauth_fork(struct thread *, struct thread *); +void ptrauth_exec(struct thread *); +void ptrauth_copy_thread(struct thread *, struct thread *); +void ptrauth_thread_alloc(struct thread *); +void ptrauth_thread0(struct thread *); +#ifdef SMP +void ptrauth_mp_start(uint64_t); +#endif + /* Functions to read the sanitised view of the special registers */ void update_special_regs(u_int); bool extract_user_id_field(u_int, u_int, uint8_t *); Index: sys/arm64/include/pmap.h =================================================================== --- sys/arm64/include/pmap.h +++ sys/arm64/include/pmap.h @@ -192,7 +192,7 @@ int pmap_fault(pmap_t, uint64_t, uint64_t); -struct pcb *pmap_switch(struct thread *, struct thread *); +struct pcb *pmap_switch(struct thread *); extern void (*pmap_clean_stage2_tlbi)(void); extern void (*pmap_invalidate_vpipt_icache)(void); Index: sys/arm64/include/proc.h =================================================================== --- sys/arm64/include/proc.h +++ sys/arm64/include/proc.h @@ -34,9 +34,34 @@ #ifndef _MACHINE_PROC_H_ #define _MACHINE_PROC_H_ +struct ptrauth_key { + uint64_t pa_key_lo; + uint64_t pa_key_hi; +}; + struct mdthread { int md_spinlock_count; /* (k) */ register_t md_saved_daif; /* (k) */ + + /* + * The pointer authentication keys. These are shared within a process, + * however this may change for some keys as the PAuth ABI Extension to + * ELF for the Arm 64-bit Architecture [1] is currently (July 2021) at + * an Alpha release quality so may change. + * + * [1] https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst + */ + struct { + struct ptrauth_key apia; + struct ptrauth_key apib; + struct ptrauth_key apda; + struct ptrauth_key apdb; + struct ptrauth_key apga; + } md_ptrauth_user; + + struct { + struct ptrauth_key apia; + } md_ptrauth_kern; }; struct mdproc { Index: sys/arm64/include/vmparam.h =================================================================== --- sys/arm64/include/vmparam.h +++ sys/arm64/include/vmparam.h @@ -162,6 +162,15 @@ #define ADDR_IS_CANONICAL(addr) \ (((addr) & 0xffff000000000000UL) == 0 || \ ((addr) & 0xffff000000000000UL) == 0xffff000000000000UL) +#define ADDR_MAKE_CANONICAL(addr) ({ \ + __typeof(addr) _tmp_addr = (addr); \ + \ + _tmp_addr &= ~0xffff000000000000UL; \ + if (ADDR_IS_KERNEL(addr)) \ + _tmp_addr |= 0xffff000000000000UL; \ + \ + _tmp_addr; \ +}) /* 95 TiB maximum for the direct map region */ #define DMAP_MIN_ADDRESS (0xffffa00000000000UL) Index: sys/conf/files.arm64 =================================================================== --- sys/conf/files.arm64 +++ sys/conf/files.arm64 @@ -66,6 +66,8 @@ arm64/arm64/mp_machdep.c optional smp arm64/arm64/nexus.c standard arm64/arm64/ofw_machdep.c optional fdt +arm64/arm64/ptrauth.c standard \ + compile-with "${NORMAL_C:N-mbranch-protection*}" arm64/arm64/pmap.c standard arm64/arm64/stack_machdep.c optional ddb | stack arm64/arm64/support.S standard