diff --git a/sys/arm/arm/exception.S b/sys/arm/arm/exception.S index 3df91d6e3c1e..2c25628f77cc 100644 --- a/sys/arm/arm/exception.S +++ b/sys/arm/arm/exception.S @@ -1,373 +1,374 @@ /* $NetBSD: exception.S,v 1.13 2003/10/31 16:30:15 scw Exp $ */ /*- * Copyright (c) 1994-1997 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * exception.S * * Low level handlers for exception vectors * * Created : 24/09/94 * * Based on kate/display/abort.s * */ #include "assym.inc" #include #include #include #include +#include #ifdef KDTRACE_HOOKS .bss .align 4 .global _C_LABEL(dtrace_invop_jump_addr) _C_LABEL(dtrace_invop_jump_addr): .word 0 .word 0 #endif .text .align 2 /* * ASM macros for pushing and pulling trapframes from the stack * * These macros are used to handle the irqframe and trapframe structures * defined above. */ /* * PUSHFRAME - macro to push a trap frame on the stack in the current mode * Since the current mode is used, the SVC lr field is not defined. */ #define PUSHFRAME \ sub sp, sp, #4; /* Align the stack */ \ str lr, [sp, #-4]!; /* Push the return address */ \ sub sp, sp, #(4*17); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ mrs r0, spsr; /* Put the SPSR on the stack */ \ str r0, [sp, #-4]!; /* * PULLFRAME - macro to pull a trap frame from the stack in the current mode * Since the current mode is used, the SVC lr field is ignored. */ #define PULLFRAME \ ldr r0, [sp], #4 ; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; \ clrex; \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*17); /* Adjust the stack pointer */ \ ldr lr, [sp], #4; /* Pull the return address */ \ add sp, sp, #4 /* Align the stack */ /* * PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode * This should only be used if the processor is not currently in SVC32 * mode. The processor mode is switched to SVC mode and the trap frame is * stored. The SVC lr field is used to store the previous value of * lr in SVC mode. */ #define PUSHFRAMEINSVC \ stmdb sp, {r0-r3}; /* Save 4 registers */ \ mov r0, lr; /* Save xxx32 r14 */ \ mov r1, sp; /* Save xxx32 sp */ \ mrs r3, spsr; /* Save xxx32 spsr */ \ mrs r2, cpsr; /* Get the CPSR */ \ bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \ orr r2, r2, #(PSR_SVC32_MODE); \ msr cpsr_c, r2; /* Punch into SVC mode */ \ mov r2, sp; /* Save SVC sp */ \ bic sp, sp, #7; /* Align sp to an 8-byte addrress */ \ sub sp, sp, #(4 * 17); /* Pad trapframe to keep alignment */ \ /* and for dtrace to emulate push/pop */ \ str r0, [sp, #-4]!; /* Push return address */ \ str lr, [sp, #-4]!; /* Push SVC lr */ \ str r2, [sp, #-4]!; /* Push SVC sp */ \ msr spsr_fsxc, r3; /* Restore correct spsr */ \ ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \ sub sp, sp, #(4*15); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ mrs r0, spsr; /* Put the SPSR on the stack */ \ str r0, [sp, #-4]! /* * PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack * in SVC32 mode and restore the saved processor mode and PC. * This should be used when the SVC lr register needs to be restored on * exit. */ #define PULLFRAMEFROMSVCANDEXIT \ ldr r0, [sp], #4; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; /* restore SPSR */ \ clrex; \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*15); /* Adjust the stack pointer */ \ ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */ /* * Unwind hints so we can unwind past functions that use * PULLFRAMEFROMSVCANDEXIT. They are run in reverse order. * As the last thing we do is restore the stack pointer * we can ignore the padding at the end of struct trapframe. */ #define UNWINDSVCFRAME \ .save {r13-r15}; /* Restore sp, lr, pc */ \ .pad #(2*4); /* Skip user sp and lr */ \ .save {r0-r12}; /* Restore r0-r12 */ \ .pad #(4) /* Skip spsr */ #define DO_AST \ ldr r0, [sp]; /* Get the SPSR from stack */ \ mrs r4, cpsr; /* save CPSR */ \ orr r1, r4, #(PSR_I); \ msr cpsr_c, r1; /* Disable interrupts */ \ and r0, r0, #(PSR_MODE); /* Returning to USR mode? */ \ teq r0, #(PSR_USR32_MODE); \ bne 2f; /* Nope, get out now */ \ bic r4, r4, #(PSR_I); \ 1: GET_CURTHREAD_PTR(r5); \ ldr r1, [r5, #(TD_AST)]; \ teq r1, #0; \ beq 2f; /* Nope. Just bail */ \ msr cpsr_c, r4; /* Restore interrupts */ \ mov r0, sp; \ bl _C_LABEL(ast); /* ast(frame) */ \ orr r0, r4, #(PSR_I); \ msr cpsr_c, r0; \ b 1b; \ 2: /* * Entry point for a Software Interrupt (SWI). * * The hardware switches to svc32 mode on a swi, so we're already on the * right stack; just build a trapframe and call the handler. */ ASENTRY_NP(swi_entry) PUSHFRAME /* Build the trapframe on the */ mov r0, sp /* scv32 stack, pass it to the */ bl _C_LABEL(swi_handler) /* swi handler. */ /* * The fork_trampoline() code in swtch.S aranges for the MI fork_exit() * to return to swi_exit here, to return to userland. The net effect is * that a newly created thread appears to return from a SWI just like * the parent thread that created it. */ ASEENTRY_NP(swi_exit) DO_AST /* Handle pending signals. */ PULLFRAME /* Deallocate trapframe. */ movs pc, lr /* Return to userland. */ STOP_UNWINDING /* Don't unwind into user mode. */ EEND(swi_exit) END(swi_entry) /* * Standard exception exit handler. * * This is used to return from all exceptions except SWI. It uses DO_AST and * PULLFRAMEFROMSVCANDEXIT and can only be called if the exception entry code * used PUSHFRAMEINSVC. * * If the return is to user mode, this uses DO_AST to deliver any pending * signals and/or handle TDF_NEEDRESCHED first. */ ASENTRY_NP(exception_exit) DO_AST /* Handle pending signals. */ PULLFRAMEFROMSVCANDEXIT /* Return. */ UNWINDSVCFRAME /* Special unwinding for exceptions. */ END(exception_exit) /* * Entry point for a Prefetch Abort exception. * * The hardware switches to the abort mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the abort mode stack). */ ASENTRY_NP(prefetch_abort_entry) sub lr, lr, #4 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. Pass the */ mov r1, #1 /* Type flag */ b _C_LABEL(abort_handler) END(prefetch_abort_entry) /* * Entry point for a Data Abort exception. * * The hardware switches to the abort mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the abort mode stack). */ ASENTRY_NP(data_abort_entry) sub lr, lr, #8 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Exception exit routine */ mov r0, sp /* Trapframe to the handler */ mov r1, #0 /* Type flag */ b _C_LABEL(abort_handler) END(data_abort_entry) /* * Entry point for an Undefined Instruction exception. * * The hardware switches to the undefined mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the undefined mode stack). */ ASENTRY_NP(undefined_entry) PUSHFRAMEINSVC /* mode stack, build trapframe there. */ mov r4, r0 /* R0 contains SPSR */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. pass frame */ ldr r2, [sp, #(TF_PC)] /* load pc */ tst r4, #(PSR_T) /* test if PSR_T */ subne r2, r2, #(THUMB_INSN_SIZE) subeq r2, r2, #(INSN_SIZE) str r2, [sp, #TF_PC] /* store pc */ #ifdef KDTRACE_HOOKS /* Check if dtrace is enabled */ ldr r1, =_C_LABEL(dtrace_invop_jump_addr) ldr r3, [r1] cmp r3, #0 beq undefinedinstruction and r4, r4, #(PSR_MODE) /* Mask out unneeded bits */ cmp r4, #(PSR_USR32_MODE) /* Check if we came from usermode */ beq undefinedinstruction ldr r4, [r2] /* load instrution */ ldr r1, =FBT_BREAKPOINT /* load fbt inv op */ cmp r1, r4 bne undefinedinstruction bx r3 /* call invop_jump_addr */ #endif b undefinedinstruction /* call stadnard handler */ END(undefined_entry) /* * Entry point for a normal IRQ. * * The hardware switches to the IRQ mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the IRQ mode stack). */ ASENTRY_NP(irq_entry) sub lr, lr, #4 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. Pass the */ mov r1, #INTR_ROOT_IRQ /* trapframe and PIC root to the handler. */ b _C_LABEL(intr_irq_handler) END(irq_entry) /* * Entry point for an Address Exception exception. * This is an arm26 exception that should never happen. */ ASENTRY_NP(addr_exception_entry) mov r3, lr mrs r2, spsr mrs r1, cpsr adr r0, Laddr_exception_msg b _C_LABEL(panic) Laddr_exception_msg: .asciz "Address Exception CPSR=0x%08x SPSR=0x%08x LR=0x%08x\n" .balign 4 END(addr_exception_entry) /* * Entry point for the system Reset vector. * This should never happen, so panic. */ ASENTRY_NP(reset_entry) mov r1, lr adr r0, Lreset_panicmsg b _C_LABEL(panic) /* NOTREACHED */ Lreset_panicmsg: .asciz "Reset vector called, LR = 0x%08x" .balign 4 END(reset_entry) /* * page0 and page0_data -- An image of the ARM vectors which is copied to * the ARM vectors page (high or low) as part of CPU initialization. The * code that does the copy assumes that page0_data holds one 32-bit word * of data for each of the predefined ARM vectors. It also assumes that * page0_data follows the vectors in page0, but other stuff can appear * between the two. We currently leave room between the two for some fiq * handler code to be copied in. */ .global _C_LABEL(page0), _C_LABEL(page0_data) _C_LABEL(page0): ldr pc, .Lreset_entry ldr pc, .Lundefined_entry ldr pc, .Lswi_entry ldr pc, .Lprefetch_abort_entry ldr pc, .Ldata_abort_entry ldr pc, .Laddr_exception_entry ldr pc, .Lirq_entry _C_LABEL(page0_data): .Lreset_entry: .word reset_entry .Lundefined_entry: .word undefined_entry .Lswi_entry: .word swi_entry .Lprefetch_abort_entry: .word prefetch_abort_entry .Ldata_abort_entry: .word data_abort_entry .Laddr_exception_entry: .word addr_exception_entry .Lirq_entry: .word irq_entry diff --git a/sys/arm/arm/genassym.c b/sys/arm/arm/genassym.c index 9d6232739022..24c470bdde86 100644 --- a/sys/arm/arm/genassym.c +++ b/sys/arm/arm/genassym.c @@ -1,143 +1,139 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2004 Olivier Houchard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include -#include #include #include #include -#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* For KERNVIRTADDR */ #include #include #include #include #include ASSYM(KERNBASE, KERNBASE); ASSYM(KERNVIRTADDR, KERNVIRTADDR); ASSYM(CPU_ASID_KERNEL,CPU_ASID_KERNEL); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); ASSYM(PCB_PAGEDIR, offsetof(struct pcb, pcb_pagedir)); ASSYM(PCB_R4, offsetof(struct pcb, pcb_regs.sf_r4)); ASSYM(PCB_R5, offsetof(struct pcb, pcb_regs.sf_r5)); ASSYM(PCB_R6, offsetof(struct pcb, pcb_regs.sf_r6)); ASSYM(PCB_R7, offsetof(struct pcb, pcb_regs.sf_r7)); ASSYM(PCB_R8, offsetof(struct pcb, pcb_regs.sf_r8)); ASSYM(PCB_R9, offsetof(struct pcb, pcb_regs.sf_r9)); ASSYM(PCB_R10, offsetof(struct pcb, pcb_regs.sf_r10)); ASSYM(PCB_R11, offsetof(struct pcb, pcb_regs.sf_r11)); ASSYM(PCB_R12, offsetof(struct pcb, pcb_regs.sf_r12)); ASSYM(PCB_SP, offsetof(struct pcb, pcb_regs.sf_sp)); ASSYM(PCB_LR, offsetof(struct pcb, pcb_regs.sf_lr)); ASSYM(PCB_PC, offsetof(struct pcb, pcb_regs.sf_pc)); ASSYM(PCB_TPIDRURW, offsetof(struct pcb, pcb_regs.sf_tpidrurw)); ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(M_LEN, offsetof(struct mbuf, m_len)); ASSYM(M_DATA, offsetof(struct mbuf, m_data)); ASSYM(M_NEXT, offsetof(struct mbuf, m_next)); ASSYM(IP_SRC, offsetof(struct ip, ip_src)); ASSYM(IP_DST, offsetof(struct ip, ip_dst)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_AST, offsetof(struct thread, td_ast)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_MD, offsetof(struct thread, td_md)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); ASSYM(TF_SPSR, offsetof(struct trapframe, tf_spsr)); ASSYM(TF_R0, offsetof(struct trapframe, tf_r0)); ASSYM(TF_R1, offsetof(struct trapframe, tf_r1)); ASSYM(TF_PC, offsetof(struct trapframe, tf_pc)); ASSYM(P_PID, offsetof(struct proc, p_pid)); ASSYM(P_FLAG, offsetof(struct proc, p_flag)); ASSYM(SIGF_UC, offsetof(struct sigframe, sf_uc)); #ifdef VFP ASSYM(PCB_VFPSTATE, offsetof(struct pcb, pcb_vfpstate)); #endif ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap)); ASSYM(PC_BP_HARDEN_KIND, offsetof(struct pcpu, pc_bp_harden_kind)); ASSYM(PCPU_BP_HARDEN_KIND_NONE, PCPU_BP_HARDEN_KIND_NONE); ASSYM(PCPU_BP_HARDEN_KIND_BPIALL, PCPU_BP_HARDEN_KIND_BPIALL); ASSYM(PCPU_BP_HARDEN_KIND_ICIALLU, PCPU_BP_HARDEN_KIND_ICIALLU); ASSYM(PAGE_SIZE, PAGE_SIZE); #ifdef PMAP_INCLUDE_PTE_SYNC ASSYM(PMAP_INCLUDE_PTE_SYNC, 1); #endif ASSYM(MAXCOMLEN, MAXCOMLEN); ASSYM(MAXCPU, MAXCPU); ASSYM(_NCPUWORDS, _NCPUWORDS); ASSYM(PCPU_SIZE, sizeof(struct pcpu)); ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace)); ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active)); ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid)); ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS); ASSYM(DCACHE_LINE_SIZE, offsetof(struct cpuinfo, dcache_line_size)); ASSYM(DCACHE_LINE_MASK, offsetof(struct cpuinfo, dcache_line_mask)); ASSYM(ICACHE_LINE_SIZE, offsetof(struct cpuinfo, icache_line_size)); ASSYM(ICACHE_LINE_MASK, offsetof(struct cpuinfo, icache_line_mask)); -ASSYM(INTR_ROOT_IRQ, INTR_ROOT_IRQ); - /* * Emit the LOCORE_MAP_MB option as a #define only if the option was set. */ #include "opt_locore.h" #ifdef LOCORE_MAP_MB ASSYM(LOCORE_MAP_MB, LOCORE_MAP_MB); #endif diff --git a/sys/arm64/arm64/exception.S b/sys/arm64/arm64/exception.S index d8c024d01921..13095def8b00 100644 --- a/sys/arm64/arm64/exception.S +++ b/sys/arm64/arm64/exception.S @@ -1,357 +1,358 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include "assym.inc" +#include .text /* * This is limited to 28 instructions as it's placed in the exception vector * slot that is 32 instructions long. We need one for the branch, and three * for the prologue. */ .macro save_registers_head el .if \el == 1 mov x18, sp stp x0, x1, [sp, #(TF_X - TF_SIZE - 128)]! .else stp x0, x1, [sp, #(TF_X - TF_SIZE)]! .endif stp x2, x3, [sp, #(2 * 8)] stp x4, x5, [sp, #(4 * 8)] stp x6, x7, [sp, #(6 * 8)] stp x8, x9, [sp, #(8 * 8)] stp x10, x11, [sp, #(10 * 8)] stp x12, x13, [sp, #(12 * 8)] stp x14, x15, [sp, #(14 * 8)] stp x16, x17, [sp, #(16 * 8)] stp x18, x19, [sp, #(18 * 8)] stp x20, x21, [sp, #(20 * 8)] stp x22, x23, [sp, #(22 * 8)] stp x24, x25, [sp, #(24 * 8)] stp x26, x27, [sp, #(26 * 8)] stp x28, x29, [sp, #(28 * 8)] .if \el == 0 mrs x18, sp_el0 .endif mrs x10, elr_el1 mrs x11, spsr_el1 mrs x12, esr_el1 mrs x13, far_el1 stp x18, lr, [sp, #(TF_SP - TF_X)]! stp x10, x11, [sp, #(TF_ELR)] stp x12, x13, [sp, #(TF_ESR)] mrs x18, tpidr_el1 .endm .macro save_registers el add x29, sp, #(TF_SIZE) .if \el == 0 #if defined(PERTHREAD_SSP) /* Load the SSP canary to sp_el0 */ ldr x1, [x18, #(PC_CURTHREAD)] add x1, x1, #(TD_MD_CANARY) msr sp_el0, x1 #endif /* Apply the SSBD (CVE-2018-3639) workaround if needed */ ldr x1, [x18, #PC_SSBD] cbz x1, 1f mov w0, #1 blr x1 1: ldr x0, [x18, #PC_CURTHREAD] bl ptrauth_exit_el0 ldr x0, [x18, #(PC_CURTHREAD)] bl dbg_monitor_enter /* Unmask debug and SError exceptions */ msr daifclr, #(DAIF_D | DAIF_A) .else /* * Unmask debug and SError exceptions. * For EL1, debug exceptions are conditionally unmasked in * do_el1h_sync(). */ msr daifclr, #(DAIF_A) .endif .endm .macro restore_registers el /* * Mask all exceptions, x18 may change in the interrupt exception * handler. */ msr daifset, #(DAIF_ALL) .if \el == 0 ldr x0, [x18, #PC_CURTHREAD] mov x1, sp bl dbg_monitor_exit ldr x0, [x18, #PC_CURTHREAD] bl ptrauth_enter_el0 /* Remove the SSBD (CVE-2018-3639) workaround if needed */ ldr x1, [x18, #PC_SSBD] cbz x1, 1f mov w0, #0 blr x1 1: .endif ldp x18, lr, [sp, #(TF_SP)] ldp x10, x11, [sp, #(TF_ELR)] .if \el == 0 msr sp_el0, x18 .endif msr spsr_el1, x11 msr elr_el1, x10 ldp x0, x1, [sp, #(TF_X + 0 * 8)] ldp x2, x3, [sp, #(TF_X + 2 * 8)] ldp x4, x5, [sp, #(TF_X + 4 * 8)] ldp x6, x7, [sp, #(TF_X + 6 * 8)] ldp x8, x9, [sp, #(TF_X + 8 * 8)] ldp x10, x11, [sp, #(TF_X + 10 * 8)] ldp x12, x13, [sp, #(TF_X + 12 * 8)] ldp x14, x15, [sp, #(TF_X + 14 * 8)] ldp x16, x17, [sp, #(TF_X + 16 * 8)] .if \el == 0 /* * We only restore the callee saved registers when returning to * userland as they may have been updated by a system call or signal. */ ldp x18, x19, [sp, #(TF_X + 18 * 8)] ldp x20, x21, [sp, #(TF_X + 20 * 8)] ldp x22, x23, [sp, #(TF_X + 22 * 8)] ldp x24, x25, [sp, #(TF_X + 24 * 8)] ldp x26, x27, [sp, #(TF_X + 26 * 8)] ldp x28, x29, [sp, #(TF_X + 28 * 8)] .else ldr x29, [sp, #(TF_X + 29 * 8)] .endif .if \el == 0 add sp, sp, #(TF_SIZE) .else mov sp, x18 mrs x18, tpidr_el1 .endif .endm .macro do_ast mrs x19, daif /* Make sure the IRQs are enabled before calling ast() */ bic x19, x19, #(PSR_I | PSR_F) 1: /* * Mask interrupts while checking the ast pending flag */ msr daifset, #(DAIF_INTR) /* Read the current thread AST mask */ ldr x1, [x18, #PC_CURTHREAD] /* Load curthread */ ldr w1, [x1, #(TD_AST)] /* Check if we have a non-zero AST mask */ cbz w1, 2f /* Restore interrupts */ msr daif, x19 /* handle the ast */ mov x0, sp bl _C_LABEL(ast) /* Re-check for new ast scheduled */ b 1b 2: .endm #ifdef KMSAN /* * The KMSAN runtime relies on a TLS block to track initialization and origin * state for function parameters and return values. To keep this state * consistent in the face of asynchronous kernel-mode traps, the runtime * maintains a stack of blocks: when handling an exception or interrupt, * kmsan_intr_enter() pushes the new block to be used until the handler is * complete, at which point kmsan_intr_leave() restores the previous block. * * Thus, KMSAN_ENTER/LEAVE hooks are required only in handlers for events that * may have happened while in kernel-mode. In particular, they are not required * around amd64_syscall() or ast() calls. Otherwise, kmsan_intr_enter() can be * called unconditionally, without distinguishing between entry from user-mode * or kernel-mode. */ #define KMSAN_ENTER bl kmsan_intr_enter #define KMSAN_LEAVE bl kmsan_intr_leave #else #define KMSAN_ENTER #define KMSAN_LEAVE #endif ENTRY(handle_el1h_sync) save_registers 1 KMSAN_ENTER ldr x0, [x18, #PC_CURTHREAD] mov x1, sp bl do_el1h_sync KMSAN_LEAVE restore_registers 1 ERET END(handle_el1h_sync) ENTRY(handle_el1h_irq) save_registers 1 KMSAN_ENTER mov x0, sp mov x1, #INTR_ROOT_IRQ bl intr_irq_handler KMSAN_LEAVE restore_registers 1 ERET END(handle_el1h_irq) ENTRY(handle_el1h_fiq) save_registers 1 KMSAN_ENTER mov x0, sp mov x1, #INTR_ROOT_FIQ bl intr_irq_handler KMSAN_LEAVE restore_registers 1 ERET END(handle_el1h_fiq) ENTRY(handle_el1h_serror) save_registers 1 KMSAN_ENTER mov x0, sp 1: bl do_serror b 1b KMSAN_LEAVE END(handle_el1h_serror) ENTRY(handle_el0_sync) save_registers 0 KMSAN_ENTER ldr x0, [x18, #PC_CURTHREAD] mov x1, sp mov x22, x0 str x1, [x0, #TD_FRAME] bl do_el0_sync do_ast str xzr, [x22, #TD_FRAME] KMSAN_LEAVE restore_registers 0 ERET END(handle_el0_sync) ENTRY(handle_el0_irq) save_registers 0 KMSAN_ENTER mov x0, sp mov x1, #INTR_ROOT_IRQ bl intr_irq_handler do_ast KMSAN_LEAVE restore_registers 0 ERET END(handle_el0_irq) ENTRY(handle_el0_fiq) save_registers 0 KMSAN_ENTER mov x0, sp mov x1, #INTR_ROOT_FIQ bl intr_irq_handler do_ast KMSAN_LEAVE restore_registers 0 ERET END(handle_el0_fiq) ENTRY(handle_el0_serror) save_registers 0 KMSAN_ENTER mov x0, sp 1: bl do_serror b 1b KMSAN_LEAVE END(handle_el0_serror) ENTRY(handle_empty_exception) save_registers 0 KMSAN_ENTER mov x0, sp 1: bl unhandled_exception b 1b KMSAN_LEAVE END(handle_empty_exception) .macro vector name, el .align 7 save_registers_head \el b handle_\name dsb sy isb /* Break instruction to ensure we aren't executing code here. */ brk 0x42 .endm .macro vempty el vector empty_exception \el .endm .align 11 .globl exception_vectors exception_vectors: vempty 1 /* Synchronous EL1t */ vempty 1 /* IRQ EL1t */ vempty 1 /* FIQ EL1t */ vempty 1 /* Error EL1t */ vector el1h_sync 1 /* Synchronous EL1h */ vector el1h_irq 1 /* IRQ EL1h */ vector el1h_fiq 1 /* FIQ EL1h */ vector el1h_serror 1 /* Error EL1h */ vector el0_sync 0 /* Synchronous 64-bit EL0 */ vector el0_irq 0 /* IRQ 64-bit EL0 */ vector el0_fiq 0 /* FIQ 64-bit EL0 */ vector el0_serror 0 /* Error 64-bit EL0 */ vector el0_sync 0 /* Synchronous 32-bit EL0 */ vector el0_irq 0 /* IRQ 32-bit EL0 */ vector el0_fiq 0 /* FIQ 32-bit EL0 */ vector el0_serror 0 /* Error 32-bit EL0 */ GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/genassym.c b/sys/arm64/arm64/genassym.c index 3ca712ca3de3..e3977798b046 100644 --- a/sys/arm64/arm64/genassym.c +++ b/sys/arm64/arm64/genassym.c @@ -1,90 +1,85 @@ /*- * Copyright (c) 2004 Olivier Houchard * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include -#include -#include #include #include #include #include #include #include /* Sizeof arm64_bootparams, rounded to keep stack alignment */ ASSYM(BOOTPARAMS_SIZE, roundup2(sizeof(struct arm64_bootparams), STACKALIGNBYTES + 1)); ASSYM(BP_MODULEP, offsetof(struct arm64_bootparams, modulep)); ASSYM(BP_KERN_STACK, offsetof(struct arm64_bootparams, kern_stack)); ASSYM(BP_KERN_TTBR0, offsetof(struct arm64_bootparams, kern_ttbr0)); ASSYM(BP_BOOT_EL, offsetof(struct arm64_bootparams, boot_el)); ASSYM(EC_EFI_STATUS, offsetof(struct efirt_callinfo, ec_efi_status)); ASSYM(EC_FPTR, offsetof(struct efirt_callinfo, ec_fptr)); ASSYM(EC_ARG1, offsetof(struct efirt_callinfo, ec_arg1)); ASSYM(PCPU_SIZE, sizeof(struct pcpu)); ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(PC_SSBD, offsetof(struct pcpu, pc_ssbd)); /* Size of pcb, rounded to keep stack alignment */ ASSYM(PCB_SIZE, roundup2(sizeof(struct pcb), STACKALIGNBYTES + 1)); ASSYM(PCB_SINGLE_STEP_SHIFT, PCB_SINGLE_STEP_SHIFT); ASSYM(PCB_REGS, offsetof(struct pcb, pcb_x)); ASSYM(PCB_X19, PCB_X19); ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp)); ASSYM(PCB_TPIDRRO, offsetof(struct pcb, pcb_tpidrro_el0)); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags)); ASSYM(P_PID, offsetof(struct proc, p_pid)); ASSYM(SF_UC, offsetof(struct sigframe, sf_uc)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_AST, offsetof(struct thread, td_ast)); ASSYM(TD_FRAME, offsetof(struct thread, td_frame)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); ASSYM(TD_MD_CANARY, offsetof(struct thread, td_md.md_canary)); ASSYM(TD_MD_EFIRT_TMP, offsetof(struct thread, td_md.md_efirt_tmp)); ASSYM(TF_SIZE, sizeof(struct trapframe)); ASSYM(TF_SP, offsetof(struct trapframe, tf_sp)); ASSYM(TF_LR, offsetof(struct trapframe, tf_lr)); ASSYM(TF_ELR, offsetof(struct trapframe, tf_elr)); ASSYM(TF_SPSR, offsetof(struct trapframe, tf_spsr)); ASSYM(TF_ESR, offsetof(struct trapframe, tf_esr)); ASSYM(TF_X, offsetof(struct trapframe, tf_x)); - -ASSYM(INTR_ROOT_IRQ, INTR_ROOT_IRQ); -ASSYM(INTR_ROOT_FIQ, INTR_ROOT_FIQ); diff --git a/sys/riscv/riscv/genassym.c b/sys/riscv/riscv/genassym.c index 74b70858edab..77966913fd1b 100644 --- a/sys/riscv/riscv/genassym.c +++ b/sys/riscv/riscv/genassym.c @@ -1,140 +1,138 @@ /*- * Copyright (c) 2015-2016 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include -#include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include ASSYM(KERNBASE, KERNBASE); ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS); ASSYM(VM_MAX_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS); ASSYM(PMAP_MAPDEV_EARLY_SIZE, PMAP_MAPDEV_EARLY_SIZE); ASSYM(PM_SATP, offsetof(struct pmap, pm_satp)); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); ASSYM(PCB_SIZE, sizeof(struct pcb)); ASSYM(PCB_RA, offsetof(struct pcb, pcb_ra)); ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp)); ASSYM(PCB_GP, offsetof(struct pcb, pcb_gp)); ASSYM(PCB_TP, offsetof(struct pcb, pcb_tp)); ASSYM(PCB_S, offsetof(struct pcb, pcb_s)); ASSYM(PCB_X, offsetof(struct pcb, pcb_x)); ASSYM(PCB_FCSR, offsetof(struct pcb, pcb_fcsr)); ASSYM(SF_UC, offsetof(struct sigframe, sf_uc)); ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_AST, offsetof(struct thread, td_ast)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_FRAME, offsetof(struct thread, td_frame)); ASSYM(TD_MD, offsetof(struct thread, td_md)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); ASSYM(TF_SIZE, roundup2(sizeof(struct trapframe), STACKALIGNBYTES + 1)); ASSYM(TF_RA, offsetof(struct trapframe, tf_ra)); ASSYM(TF_SP, offsetof(struct trapframe, tf_sp)); ASSYM(TF_GP, offsetof(struct trapframe, tf_gp)); ASSYM(TF_TP, offsetof(struct trapframe, tf_tp)); ASSYM(TF_T, offsetof(struct trapframe, tf_t)); ASSYM(TF_S, offsetof(struct trapframe, tf_s)); ASSYM(TF_A, offsetof(struct trapframe, tf_a)); ASSYM(TF_SEPC, offsetof(struct trapframe, tf_sepc)); ASSYM(TF_STVAL, offsetof(struct trapframe, tf_stval)); ASSYM(TF_SCAUSE, offsetof(struct trapframe, tf_scause)); ASSYM(TF_SSTATUS, offsetof(struct trapframe, tf_sstatus)); ASSYM(HYP_H_RA, offsetof(struct hypctx, host_regs.hyp_ra)); ASSYM(HYP_H_SP, offsetof(struct hypctx, host_regs.hyp_sp)); ASSYM(HYP_H_GP, offsetof(struct hypctx, host_regs.hyp_gp)); ASSYM(HYP_H_TP, offsetof(struct hypctx, host_regs.hyp_tp)); ASSYM(HYP_H_T, offsetof(struct hypctx, host_regs.hyp_t)); ASSYM(HYP_H_S, offsetof(struct hypctx, host_regs.hyp_s)); ASSYM(HYP_H_A, offsetof(struct hypctx, host_regs.hyp_a)); ASSYM(HYP_H_SEPC, offsetof(struct hypctx, host_regs.hyp_sepc)); ASSYM(HYP_H_SSTATUS, offsetof(struct hypctx, host_regs.hyp_sstatus)); ASSYM(HYP_H_HSTATUS, offsetof(struct hypctx, host_regs.hyp_hstatus)); ASSYM(HYP_H_SSCRATCH, offsetof(struct hypctx, host_sscratch)); ASSYM(HYP_H_STVEC, offsetof(struct hypctx, host_stvec)); ASSYM(HYP_H_SCOUNTEREN, offsetof(struct hypctx, host_scounteren)); ASSYM(HYP_G_RA, offsetof(struct hypctx, guest_regs.hyp_ra)); ASSYM(HYP_G_SP, offsetof(struct hypctx, guest_regs.hyp_sp)); ASSYM(HYP_G_GP, offsetof(struct hypctx, guest_regs.hyp_gp)); ASSYM(HYP_G_TP, offsetof(struct hypctx, guest_regs.hyp_tp)); ASSYM(HYP_G_T, offsetof(struct hypctx, guest_regs.hyp_t)); ASSYM(HYP_G_S, offsetof(struct hypctx, guest_regs.hyp_s)); ASSYM(HYP_G_A, offsetof(struct hypctx, guest_regs.hyp_a)); ASSYM(HYP_G_SEPC, offsetof(struct hypctx, guest_regs.hyp_sepc)); ASSYM(HYP_G_SSTATUS, offsetof(struct hypctx, guest_regs.hyp_sstatus)); ASSYM(HYP_G_HSTATUS, offsetof(struct hypctx, guest_regs.hyp_hstatus)); ASSYM(HYP_G_SCOUNTEREN, offsetof(struct hypctx, guest_scounteren)); ASSYM(HYP_TRAP_SEPC, offsetof(struct hyptrap, sepc)); ASSYM(HYP_TRAP_SCAUSE, offsetof(struct hyptrap, scause)); ASSYM(HYP_TRAP_STVAL, offsetof(struct hyptrap, stval)); ASSYM(HYP_TRAP_HTVAL, offsetof(struct hyptrap, htval)); ASSYM(HYP_TRAP_HTINST, offsetof(struct hyptrap, htinst)); ASSYM(RISCV_BOOTPARAMS_SIZE, sizeof(struct riscv_bootparams)); ASSYM(RISCV_BOOTPARAMS_KERN_PHYS, offsetof(struct riscv_bootparams, kern_phys)); ASSYM(RISCV_BOOTPARAMS_KERN_STACK, offsetof(struct riscv_bootparams, kern_stack)); ASSYM(RISCV_BOOTPARAMS_DTBP_PHYS, offsetof(struct riscv_bootparams, dtbp_phys)); ASSYM(RISCV_BOOTPARAMS_MODULEP, offsetof(struct riscv_bootparams, modulep));