Page MenuHomeFreeBSD

D31261.id101374.diff
No OneTemporary

D31261.id101374.diff

diff --git a/sys/arm64/arm64/db_trace.c b/sys/arm64/arm64/db_trace.c
--- a/sys/arm64/arm64/db_trace.c
+++ b/sys/arm64/arm64/db_trace.c
@@ -122,7 +122,7 @@
}
frame->fp = tf->tf_x[29];
- frame->pc = tf->tf_elr;
+ frame->pc = ADDR_MAKE_CANONICAL(tf->tf_elr);
if (!INKERNEL(frame->fp))
break;
} else {
diff --git a/sys/arm64/arm64/exception.S b/sys/arm64/arm64/exception.S
--- a/sys/arm64/arm64/exception.S
+++ b/sys/arm64/arm64/exception.S
@@ -88,6 +88,9 @@
blr x1
1:
+ ldr x0, [x18, #PC_CURTHREAD]
+ bl ptrauth_exit_el0
+
ldr x0, [x18, #(PC_CURTHREAD)]
bl dbg_monitor_enter
@@ -114,6 +117,9 @@
mov x1, sp
bl dbg_monitor_exit
+ ldr x0, [x18, #PC_CURTHREAD]
+ bl ptrauth_enter_el0
+
/* Remove the SSBD (CVE-2018-3639) workaround if needed */
ldr x1, [x18, #PC_SSBD]
cbz x1, 1f
diff --git a/sys/arm64/arm64/exec_machdep.c b/sys/arm64/arm64/exec_machdep.c
--- a/sys/arm64/arm64/exec_machdep.c
+++ b/sys/arm64/arm64/exec_machdep.c
@@ -397,6 +397,9 @@
* Clear debug register state. It is not applicable to the new process.
*/
bzero(&pcb->pcb_dbg_regs, sizeof(pcb->pcb_dbg_regs));
+
+ /* Generate new pointer authentication keys */
+ ptrauth_exec(td);
}
/* Sanity check these are the same size, they will be memcpy'd to and from */
diff --git a/sys/arm64/arm64/identcpu.c b/sys/arm64/arm64/identcpu.c
--- a/sys/arm64/arm64/identcpu.c
+++ b/sys/arm64/arm64/identcpu.c
@@ -654,11 +654,21 @@
MRS_FIELD_VALUE_END,
};
+static struct mrs_field_hwcap id_aa64isar1_gpi_caps[] = {
+ MRS_HWCAP(&elf_hwcap, HWCAP_PACG, ID_AA64ISAR1_GPI_IMPL),
+ MRS_HWCAP_END
+};
+
static struct mrs_field_value id_aa64isar1_gpa[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL),
MRS_FIELD_VALUE_END,
};
+static struct mrs_field_hwcap id_aa64isar1_gpa_caps[] = {
+ MRS_HWCAP(&elf_hwcap, HWCAP_PACG, ID_AA64ISAR1_GPA_IMPL),
+ MRS_HWCAP_END
+};
+
static struct mrs_field_value id_aa64isar1_lrcpc[] = {
MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_NONE, ""),
MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_RCPC_8_3, "RCPC-8.3"),
@@ -699,6 +709,11 @@
MRS_FIELD_VALUE_END,
};
+static struct mrs_field_hwcap id_aa64isar1_api_caps[] = {
+ MRS_HWCAP(&elf_hwcap, HWCAP_PACA, ID_AA64ISAR1_API_PAC),
+ MRS_HWCAP_END
+};
+
static struct mrs_field_value id_aa64isar1_apa[] = {
MRS_FIELD_VALUE(ID_AA64ISAR1_APA_NONE, ""),
MRS_FIELD_VALUE(ID_AA64ISAR1_APA_PAC, "APA PAC"),
@@ -706,6 +721,11 @@
MRS_FIELD_VALUE_END,
};
+static struct mrs_field_hwcap id_aa64isar1_apa_caps[] = {
+ MRS_HWCAP(&elf_hwcap, HWCAP_PACA, ID_AA64ISAR1_APA_PAC),
+ MRS_HWCAP_END
+};
+
static struct mrs_field_value id_aa64isar1_dpb[] = {
MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_NONE, ""),
MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_DCCVAP, "DCPoP"),
@@ -732,16 +752,20 @@
id_aa64isar1_sb_caps),
MRS_FIELD_HWCAP(ID_AA64ISAR1, FRINTTS, false, MRS_LOWER,
id_aa64isar1_frintts, id_aa64isar1_frintts_caps),
- MRS_FIELD(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi),
- MRS_FIELD(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa),
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi,
+ id_aa64isar1_gpi_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa,
+ id_aa64isar1_gpa_caps),
MRS_FIELD_HWCAP(ID_AA64ISAR1, LRCPC, false, MRS_LOWER,
id_aa64isar1_lrcpc, id_aa64isar1_lrcpc_caps),
MRS_FIELD_HWCAP(ID_AA64ISAR1, FCMA, false, MRS_LOWER,
id_aa64isar1_fcma, id_aa64isar1_fcma_caps),
MRS_FIELD_HWCAP(ID_AA64ISAR1, JSCVT, false, MRS_LOWER,
id_aa64isar1_jscvt, id_aa64isar1_jscvt_caps),
- MRS_FIELD(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api),
- MRS_FIELD(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa),
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api,
+ id_aa64isar1_api_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa,
+ id_aa64isar1_apa_caps),
MRS_FIELD_HWCAP(ID_AA64ISAR1, DPB, false, MRS_LOWER, id_aa64isar1_dpb,
id_aa64isar1_dpb_caps),
MRS_FIELD_END,
diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S
--- a/sys/arm64/arm64/locore.S
+++ b/sys/arm64/arm64/locore.S
@@ -152,6 +152,16 @@
bl initarm
/* We are done with the boot params */
add sp, sp, #BOOTPARAMS_SIZE
+
+ /*
+ * Enable pointer authentication in the kernel. We set the keys for
+ * thread0 in initarm so have to wait until it returns to enable it.
+ * If we were to enable it in initarm then any authentication when
+ * returning would fail as it was called with pointer authentication
+ * disabled.
+ */
+ bl ptrauth_start
+
bl mi_startup
/* We should not get here */
@@ -239,7 +249,7 @@
ret
1:
/* Configure the Hypervisor */
- mov x2, #(HCR_RW)
+ ldr x2, =(HCR_RW | HCR_APK | HCR_API)
msr hcr_el2, x2
/* Load the Virtualization Process ID Register */
diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c
--- a/sys/arm64/arm64/machdep.c
+++ b/sys/arm64/arm64/machdep.c
@@ -374,6 +374,7 @@
thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
thread0.td_frame = &proc0_tf;
+ ptrauth_thread0(&thread0);
pcpup->pc_curpcb = thread0.td_pcb;
/*
@@ -832,6 +833,13 @@
panic("Invalid bus configuration: %s",
kern_getenv("kern.cfg.order"));
+ /*
+ * Check if pointer authentication is available on this system, and
+ * if so enable its use. This needs to be called before init_proc0
+ * as that will configure the thread0 pointer authentication keys.
+ */
+ ptrauth_init();
+
/*
* Dump the boot metadata. We have to wait for cninit() since console
* output is required. If it's grossly incorrect the kernel will never
diff --git a/sys/arm64/arm64/mp_machdep.c b/sys/arm64/arm64/mp_machdep.c
--- a/sys/arm64/arm64/mp_machdep.c
+++ b/sys/arm64/arm64/mp_machdep.c
@@ -58,6 +58,7 @@
#include <vm/vm_map.h>
#include <machine/machdep.h>
+#include <machine/cpu.h>
#include <machine/debug_monitor.h>
#include <machine/intr.h>
#include <machine/smp.h>
@@ -208,6 +209,8 @@
pmap_t pmap0;
u_int mpidr;
+ ptrauth_mp_start(cpu);
+
/*
* Verify that the value passed in 'cpu' argument (aka context_id) is
* valid. Some older U-Boot based PSCI implementations are buggy,
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -6668,11 +6668,11 @@
}
/*
- * To eliminate the unused parameter "old", we would have to add an instruction
- * to cpu_switch().
+ * Activate the thread we are switching to.
+ * To simplify the assembly in cpu_throw return the new threads pcb.
*/
struct pcb *
-pmap_switch(struct thread *old __unused, struct thread *new)
+pmap_switch(struct thread *new)
{
pcpu_bp_harden bp_harden;
struct pcb *pcb;
diff --git a/sys/arm64/arm64/ptrauth.c b/sys/arm64/arm64/ptrauth.c
new file mode 100644
--- /dev/null
+++ b/sys/arm64/arm64/ptrauth.c
@@ -0,0 +1,262 @@
+/*-
+ * Copyright (c) 2021 The FreeBSD Foundation
+ *
+ * This software was developed by Andrew Turner under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This manages pointer authentication. As it needs to enable the use of
+ * pointer authentication and change the keys we must built this with
+ * pointer authentication disabled.
+ */
+#ifdef __ARM_FEATURE_PAC_DEFAULT
+#error Must be built with pointer authentication disabled
+#endif
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/libkern.h>
+#include <sys/proc.h>
+#include <sys/reboot.h>
+
+#include <machine/armreg.h>
+#include <machine/cpu.h>
+
+#define SCTLR_PTRAUTH (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)
+
+static bool __read_mostly enable_ptrauth = false;
+
+/* Functions called from assembly. */
+void ptrauth_start(void);
+struct thread *ptrauth_switch(struct thread *);
+void ptrauth_exit_el0(struct thread *);
+void ptrauth_enter_el0(struct thread *);
+
+void
+ptrauth_init(void)
+{
+ uint64_t isar1;
+ int pac_enable;
+
+ /*
+ * Allow the sysadmin to disable pointer authentication globally,
+ * e.g. on broken hardware.
+ */
+ pac_enable = 1;
+ TUNABLE_INT_FETCH("hw.pac.enable", &pac_enable);
+ if (!pac_enable) {
+ if (boothowto & RB_VERBOSE)
+ printf("Pointer authentication is disabled\n");
+ return;
+ }
+
+ get_kernel_reg(ID_AA64ISAR1_EL1, &isar1);
+
+ /*
+ * This assumes if there is pointer authentication on the boot CPU
+ * it will also be available on any non-boot CPUs. If this is ever
+ * not the case we will have to add a quirk.
+ */
+ if (ID_AA64ISAR1_APA_VAL(isar1) > 0 || ID_AA64ISAR1_API_VAL(isar1) > 0)
+ enable_ptrauth = true;
+}
+
+/* Copy the keys when forking a new process */
+void
+ptrauth_fork(struct thread *new_td, struct thread *orig_td)
+{
+ if (!enable_ptrauth)
+ return;
+
+ memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user,
+ sizeof(new_td->td_md.md_ptrauth_user));
+}
+
+/* Generate new userspace keys when executing a new process */
+void
+ptrauth_exec(struct thread *td)
+{
+ if (!enable_ptrauth)
+ return;
+
+ arc4rand(&td->td_md.md_ptrauth_user, sizeof(td->td_md.md_ptrauth_user),
+ 0);
+}
+
+/*
+ * Copy the user keys when creating a new userspace thread until it's clear
+ * how the ABI expects the various keys to be assigned.
+ */
+void
+ptrauth_copy_thread(struct thread *new_td, struct thread *orig_td)
+{
+ if (!enable_ptrauth)
+ return;
+
+ memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user,
+ sizeof(new_td->td_md.md_ptrauth_user));
+}
+
+/* Generate new kernel keys when executing a new kernel thread */
+void
+ptrauth_thread_alloc(struct thread *td)
+{
+ if (!enable_ptrauth)
+ return;
+
+ arc4rand(&td->td_md.md_ptrauth_kern, sizeof(td->td_md.md_ptrauth_kern),
+ 0);
+}
+
+/*
+ * Load the userspace keys. We can't use WRITE_SPECIALREG as we need
+ * to set the architecture extension.
+ */
+#define LOAD_KEY(space, name) \
+__asm __volatile( \
+ ".arch_extension pauth \n" \
+ "msr "#name"keylo_el1, %0 \n" \
+ "msr "#name"keyhi_el1, %1 \n" \
+ ".arch_extension nopauth \n" \
+ :: "r"(td->td_md.md_ptrauth_##space.name.pa_key_lo), \
+ "r"(td->td_md.md_ptrauth_##space.name.pa_key_hi))
+
+void
+ptrauth_thread0(struct thread *td)
+{
+ if (!enable_ptrauth)
+ return;
+
+ /* TODO: Generate a random number here */
+ memset(&td->td_md.md_ptrauth_kern, 0,
+ sizeof(td->td_md.md_ptrauth_kern));
+ LOAD_KEY(kern, apia);
+ /*
+ * No isb as this is called before ptrauth_start so can rely on
+ * the instruction barrier there.
+ */
+}
+
+/*
+ * Enable pointer authentication. After this point userspace and the kernel
+ * can sign return addresses, etc. based on their keys
+ *
+ * This assumes either all or no CPUs have pointer authentication support,
+ * and, if supported, all CPUs have the same algorithm.
+ */
+void
+ptrauth_start(void)
+{
+ uint64_t sctlr;
+
+ if (!enable_ptrauth)
+ return;
+
+ /* Enable pointer authentication */
+ sctlr = READ_SPECIALREG(sctlr_el1);
+ sctlr |= SCTLR_PTRAUTH;
+ WRITE_SPECIALREG(sctlr_el1, sctlr);
+ isb();
+}
+
+#ifdef SMP
+void
+ptrauth_mp_start(uint64_t cpu)
+{
+ struct ptrauth_key start_key;
+ uint64_t sctlr;
+
+ if (!enable_ptrauth)
+ return;
+
+ /*
+ * We need a key until we call sched_throw, however we don't have
+ * a thread until then. Create a key just for use within
+ * init_secondary and whatever it calls. As init_secondary never
+ * returns it is safe to do so from within it.
+ *
+ * As it's only used for a short length of time just use the cpu
+ * as the key.
+ */
+ start_key.pa_key_lo = cpu;
+ start_key.pa_key_hi = ~cpu;
+
+ __asm __volatile(
+ ".arch_extension pauth \n"
+ "msr apiakeylo_el1, %0 \n"
+ "msr apiakeyhi_el1, %1 \n"
+ ".arch_extension nopauth \n"
+ :: "r"(start_key.pa_key_lo), "r"(start_key.pa_key_hi));
+
+ /* Enable pointer authentication */
+ sctlr = READ_SPECIALREG(sctlr_el1);
+ sctlr |= SCTLR_PTRAUTH;
+ WRITE_SPECIALREG(sctlr_el1, sctlr);
+ isb();
+}
+#endif
+
+struct thread *
+ptrauth_switch(struct thread *td)
+{
+ if (enable_ptrauth) {
+ LOAD_KEY(kern, apia);
+ isb();
+ }
+
+ return (td);
+}
+
+/* Called when we are exiting uerspace and entering the kernel */
+void
+ptrauth_exit_el0(struct thread *td)
+{
+ if (!enable_ptrauth)
+ return;
+
+ LOAD_KEY(kern, apia);
+ isb();
+}
+
+/* Called when we are about to exit the kernel and enter userspace */
+void
+ptrauth_enter_el0(struct thread *td)
+{
+ if (!enable_ptrauth)
+ return;
+
+ LOAD_KEY(user, apia);
+ LOAD_KEY(user, apib);
+ LOAD_KEY(user, apda);
+ LOAD_KEY(user, apdb);
+ LOAD_KEY(user, apga);
+ /*
+ * No isb as this is called from the exception handler so can rely
+ * on the eret instruction to be the needed context synchronizing event.
+ */
+}
diff --git a/sys/arm64/arm64/stack_machdep.c b/sys/arm64/arm64/stack_machdep.c
--- a/sys/arm64/arm64/stack_machdep.c
+++ b/sys/arm64/arm64/stack_machdep.c
@@ -69,7 +69,7 @@
return (EOPNOTSUPP);
frame.fp = td->td_pcb->pcb_x[29];
- frame.pc = td->td_pcb->pcb_lr;
+ frame.pc = ADDR_MAKE_CANONICAL(td->td_pcb->pcb_lr);
stack_capture(td, st, &frame);
return (0);
diff --git a/sys/arm64/arm64/swtch.S b/sys/arm64/arm64/swtch.S
--- a/sys/arm64/arm64/swtch.S
+++ b/sys/arm64/arm64/swtch.S
@@ -71,13 +71,16 @@
#ifdef VFP
/* Backup the new thread pointer around a call to C code */
- mov x19, x0
- mov x20, x1
+ mov x19, x1
bl vfp_discard
- mov x1, x20
mov x0, x19
+#else
+ mov x0, x1
#endif
+ /* This returns the thread pointer so no need to save it */
+ bl ptrauth_switch
+ /* This returns the thread pcb */
bl pmap_switch
mov x4, x0
@@ -153,10 +156,14 @@
/* Load the pcb address */
mov x1, x4
bl vfp_save_state
- mov x1, x20
- mov x0, x19
+ mov x0, x20
+#else
+ mov x0, x1
#endif
+ /* This returns the thread pointer so no need to save it */
+ bl ptrauth_switch
+ /* This returns the thread pcb */
bl pmap_switch
/* Move the new pcb out of the way */
mov x4, x0
@@ -213,11 +220,15 @@
bl _C_LABEL(fork_exit)
/*
- * Disable interrupts to avoid
- * overwriting spsr_el1 and sp_el0 by an IRQ exception.
+ * Disable interrupts as we are setting userspace specific
+ * state that we won't handle correctly in an interrupt while
+ * in the kernel.
*/
msr daifset, #(DAIF_D | DAIF_INTR)
+ ldr x0, [x18, #PC_CURTHREAD]
+ bl ptrauth_enter_el0
+
/* Restore sp, lr, elr, and spsr */
ldp x18, lr, [sp, #TF_SP]
ldp x10, x11, [sp, #TF_ELR]
diff --git a/sys/arm64/arm64/trap.c b/sys/arm64/arm64/trap.c
--- a/sys/arm64/arm64/trap.c
+++ b/sys/arm64/arm64/trap.c
@@ -483,6 +483,12 @@
panic("No debugger in kernel.");
#endif
break;
+ case EXCP_FPAC:
+ /* We can see this if the authentication on PAC fails */
+ print_registers(frame);
+ printf(" far: %16lx\n", READ_SPECIALREG(far_el1));
+ panic("FPAC kernel exception");
+ break;
case EXCP_UNKNOWN:
if (undef_insn(1, frame))
break;
@@ -573,6 +579,11 @@
exception);
userret(td, frame);
break;
+ case EXCP_FPAC:
+ call_trapsignal(td, SIGILL, ILL_ILLOPN, (void *)frame->tf_elr,
+ exception);
+ userret(td, frame);
+ break;
case EXCP_SP_ALIGN:
call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_sp,
exception);
diff --git a/sys/arm64/arm64/unwind.c b/sys/arm64/arm64/unwind.c
--- a/sys/arm64/arm64/unwind.c
+++ b/sys/arm64/arm64/unwind.c
@@ -47,7 +47,7 @@
/* FP to previous frame (X29) */
frame->fp = ((uintptr_t *)fp)[0];
/* LR (X30) */
- frame->pc = ((uintptr_t *)fp)[1] - 4;
+ frame->pc = ADDR_MAKE_CANONICAL(((uintptr_t *)fp)[1] - 4);
return (true);
}
diff --git a/sys/arm64/arm64/vm_machdep.c b/sys/arm64/arm64/vm_machdep.c
--- a/sys/arm64/arm64/vm_machdep.c
+++ b/sys/arm64/arm64/vm_machdep.c
@@ -94,6 +94,8 @@
/* Clear the debug register state. */
bzero(&pcb2->pcb_dbg_regs, sizeof(pcb2->pcb_dbg_regs));
+ ptrauth_fork(td2, td1);
+
tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1);
bcopy(td1->td_frame, tf, sizeof(*tf));
tf->tf_x[0] = 0;
@@ -197,6 +199,9 @@
/* Set the new canary */
arc4random_buf(&td->td_md.md_canary, sizeof(td->td_md.md_canary));
#endif
+
+ /* Generate new pointer authentication keys. */
+ ptrauth_copy_thread(td, td0);
}
/*
@@ -259,6 +264,7 @@
td->td_kstack_pages * PAGE_SIZE) - 1;
td->td_frame = (struct trapframe *)STACKALIGN(
(struct trapframe *)td->td_pcb - 1);
+ ptrauth_thread_alloc(td);
}
void
diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h
--- a/sys/arm64/include/armreg.h
+++ b/sys/arm64/include/armreg.h
@@ -225,6 +225,7 @@
#define EXCP_SVC64 0x15 /* SVC trap for AArch64 */
#define EXCP_HVC 0x16 /* HVC trap */
#define EXCP_MSR 0x18 /* MSR/MRS trap */
+#define EXCP_FPAC 0x1c /* Faulting PAC trap */
#define EXCP_INSN_ABORT_L 0x20 /* Instruction abort, from lower EL */
#define EXCP_INSN_ABORT 0x21 /* Instruction abort, from same EL */
#define EXCP_PC_ALIGN 0x22 /* PC alignment fault */
diff --git a/sys/arm64/include/cpu.h b/sys/arm64/include/cpu.h
--- a/sys/arm64/include/cpu.h
+++ b/sys/arm64/include/cpu.h
@@ -171,6 +171,28 @@
void identify_cpu(u_int);
void install_cpu_errata(void);
+/* Pointer Authentication Code (PAC) support */
+void ptrauth_init(void);
+void ptrauth_fork(struct thread *, struct thread *);
+void ptrauth_exec(struct thread *);
+void ptrauth_copy_thread(struct thread *, struct thread *);
+void ptrauth_thread_alloc(struct thread *);
+void ptrauth_thread0(struct thread *);
+#ifdef SMP
+void ptrauth_mp_start(uint64_t);
+#endif
+
+/* Pointer Authentication Code (PAC) support */
+void ptrauth_init(void);
+void ptrauth_fork(struct thread *, struct thread *);
+void ptrauth_exec(struct thread *);
+void ptrauth_copy_thread(struct thread *, struct thread *);
+void ptrauth_thread_alloc(struct thread *);
+void ptrauth_thread0(struct thread *);
+#ifdef SMP
+void ptrauth_mp_start(uint64_t);
+#endif
+
/* Functions to read the sanitised view of the special registers */
void update_special_regs(u_int);
bool extract_user_id_field(u_int, u_int, uint8_t *);
diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h
--- a/sys/arm64/include/pmap.h
+++ b/sys/arm64/include/pmap.h
@@ -192,7 +192,7 @@
int pmap_fault(pmap_t, uint64_t, uint64_t);
-struct pcb *pmap_switch(struct thread *, struct thread *);
+struct pcb *pmap_switch(struct thread *);
extern void (*pmap_clean_stage2_tlbi)(void);
extern void (*pmap_invalidate_vpipt_icache)(void);
diff --git a/sys/arm64/include/proc.h b/sys/arm64/include/proc.h
--- a/sys/arm64/include/proc.h
+++ b/sys/arm64/include/proc.h
@@ -34,10 +34,35 @@
#ifndef _MACHINE_PROC_H_
#define _MACHINE_PROC_H_
+struct ptrauth_key {
+ uint64_t pa_key_lo;
+ uint64_t pa_key_hi;
+};
+
struct mdthread {
int md_spinlock_count; /* (k) */
register_t md_saved_daif; /* (k) */
uintptr_t md_canary;
+
+ /*
+ * The pointer authentication keys. These are shared within a process,
+ * however this may change for some keys as the PAuth ABI Extension to
+ * ELF for the Arm 64-bit Architecture [1] is currently (July 2021) at
+ * an Alpha release quality so may change.
+ *
+ * [1] https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst
+ */
+ struct {
+ struct ptrauth_key apia;
+ struct ptrauth_key apib;
+ struct ptrauth_key apda;
+ struct ptrauth_key apdb;
+ struct ptrauth_key apga;
+ } md_ptrauth_user;
+
+ struct {
+ struct ptrauth_key apia;
+ } md_ptrauth_kern;
};
struct mdproc {
diff --git a/sys/arm64/include/vmparam.h b/sys/arm64/include/vmparam.h
--- a/sys/arm64/include/vmparam.h
+++ b/sys/arm64/include/vmparam.h
@@ -162,6 +162,15 @@
#define ADDR_IS_CANONICAL(addr) \
(((addr) & 0xffff000000000000UL) == 0 || \
((addr) & 0xffff000000000000UL) == 0xffff000000000000UL)
+#define ADDR_MAKE_CANONICAL(addr) ({ \
+ __typeof(addr) _tmp_addr = (addr); \
+ \
+ _tmp_addr &= ~0xffff000000000000UL; \
+ if (ADDR_IS_KERNEL(addr)) \
+ _tmp_addr |= 0xffff000000000000UL; \
+ \
+ _tmp_addr; \
+})
/* 95 TiB maximum for the direct map region */
#define DMAP_MIN_ADDRESS (0xffffa00000000000UL)
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -66,6 +66,8 @@
arm64/arm64/mp_machdep.c optional smp
arm64/arm64/nexus.c standard
arm64/arm64/ofw_machdep.c optional fdt
+arm64/arm64/ptrauth.c standard \
+ compile-with "${NORMAL_C:N-mbranch-protection*}"
arm64/arm64/pmap.c standard
arm64/arm64/ptrace_machdep.c standard
arm64/arm64/sigtramp.S standard

File Metadata

Mime Type
text/plain
Expires
Tue, Jan 20, 3:25 PM (2 h, 14 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27764654
Default Alt Text
D31261.id101374.diff (21 KB)

Event Timeline