diff --git a/sys/arm64/arm64/db_trace.c b/sys/arm64/arm64/db_trace.c index ee20e9f14f0a..82b6e07fc261 100644 --- a/sys/arm64/arm64/db_trace.c +++ b/sys/arm64/arm64/db_trace.c @@ -1,133 +1,133 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_ddb.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include void db_md_list_watchpoints() { dbg_show_watchpoint(); } int db_md_clr_watchpoint(db_expr_t addr, db_expr_t size) { return (dbg_remove_watchpoint(addr, size, DBG_FROM_EL1)); } int db_md_set_watchpoint(db_expr_t addr, db_expr_t size) { return (dbg_setup_watchpoint(addr, size, DBG_FROM_EL1, HW_BREAKPOINT_RW)); } static void db_stack_trace_cmd(struct unwind_state *frame) { c_db_sym_t sym; const char *name; db_expr_t value; db_expr_t offset; while (1) { uint64_t pc = frame->pc; int ret; ret = unwind_frame(frame); if (ret < 0) break; sym = db_search_symbol(pc, DB_STGY_ANY, &offset); if (sym == C_DB_SYM_NULL) { value = 0; name = "(null)"; } else db_symbol_values(sym, &name, &value); db_printf("%s() at ", name); db_printsym(frame->pc, DB_STGY_PROC); db_printf("\n"); db_printf("\t pc = 0x%016lx lr = 0x%016lx\n", pc, frame->pc); db_printf("\t sp = 0x%016lx fp = 0x%016lx\n", frame->sp, frame->fp); /* TODO: Show some more registers */ db_printf("\n"); } } int db_trace_thread(struct thread *thr, int count) { struct unwind_state frame; struct pcb *ctx; if (thr != curthread) { ctx = kdb_thr_ctx(thr); frame.sp = (uint64_t)ctx->pcb_sp; frame.fp = (uint64_t)ctx->pcb_x[29]; - frame.pc = (uint64_t)ctx->pcb_x[30]; + frame.pc = (uintptr_t)ctx->pcb_lr; db_stack_trace_cmd(&frame); } else db_trace_self(); return (0); } void db_trace_self(void) { struct unwind_state frame; uint64_t sp; __asm __volatile("mov %0, sp" : "=&r" (sp)); frame.sp = sp; frame.fp = (uint64_t)__builtin_frame_address(0); frame.pc = (uint64_t)db_trace_self; db_stack_trace_cmd(&frame); } diff --git a/sys/arm64/arm64/exception.S b/sys/arm64/arm64/exception.S index d3242f4ac0b7..19dd9e9d85e2 100644 --- a/sys/arm64/arm64/exception.S +++ b/sys/arm64/arm64/exception.S @@ -1,246 +1,246 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include __FBSDID("$FreeBSD$"); #include "assym.inc" .text .macro save_registers el .if \el == 1 mov x18, sp sub sp, sp, #128 .endif sub sp, sp, #(TF_SIZE + 16) - stp x29, x30, [sp, #(TF_SIZE)] + stp x29, lr, [sp, #(TF_SIZE)] stp x28, x29, [sp, #(TF_X + 28 * 8)] stp x26, x27, [sp, #(TF_X + 26 * 8)] stp x24, x25, [sp, #(TF_X + 24 * 8)] stp x22, x23, [sp, #(TF_X + 22 * 8)] stp x20, x21, [sp, #(TF_X + 20 * 8)] stp x18, x19, [sp, #(TF_X + 18 * 8)] stp x16, x17, [sp, #(TF_X + 16 * 8)] stp x14, x15, [sp, #(TF_X + 14 * 8)] stp x12, x13, [sp, #(TF_X + 12 * 8)] stp x10, x11, [sp, #(TF_X + 10 * 8)] stp x8, x9, [sp, #(TF_X + 8 * 8)] stp x6, x7, [sp, #(TF_X + 6 * 8)] stp x4, x5, [sp, #(TF_X + 4 * 8)] stp x2, x3, [sp, #(TF_X + 2 * 8)] stp x0, x1, [sp, #(TF_X + 0 * 8)] mrs x10, elr_el1 mrs x11, spsr_el1 mrs x12, esr_el1 .if \el == 0 mrs x18, sp_el0 .endif str x10, [sp, #(TF_ELR)] stp w11, w12, [sp, #(TF_SPSR)] stp x18, lr, [sp, #(TF_SP)] mrs x18, tpidr_el1 add x29, sp, #(TF_SIZE) .if \el == 0 /* Apply the SSBD (CVE-2018-3639) workaround if needed */ ldr x1, [x18, #PC_SSBD] cbz x1, 1f mov w0, #1 blr x1 1: .endif .endm .macro restore_registers el .if \el == 1 msr daifset, #2 /* * Disable interrupts, x18 may change in the interrupt exception * handler. For EL0 exceptions, do_ast already did this. */ .endif .if \el == 0 /* Remove the SSBD (CVE-2018-3639) workaround if needed */ ldr x1, [x18, #PC_SSBD] cbz x1, 1f mov w0, #0 blr x1 1: .endif ldp x18, lr, [sp, #(TF_SP)] ldp x10, x11, [sp, #(TF_ELR)] .if \el == 0 msr sp_el0, x18 .endif msr spsr_el1, x11 msr elr_el1, x10 ldp x0, x1, [sp, #(TF_X + 0 * 8)] ldp x2, x3, [sp, #(TF_X + 2 * 8)] ldp x4, x5, [sp, #(TF_X + 4 * 8)] ldp x6, x7, [sp, #(TF_X + 6 * 8)] ldp x8, x9, [sp, #(TF_X + 8 * 8)] ldp x10, x11, [sp, #(TF_X + 10 * 8)] ldp x12, x13, [sp, #(TF_X + 12 * 8)] ldp x14, x15, [sp, #(TF_X + 14 * 8)] ldp x16, x17, [sp, #(TF_X + 16 * 8)] .if \el == 0 /* * We only restore the callee saved registers when returning to * userland as they may have been updated by a system call or signal. */ ldp x18, x19, [sp, #(TF_X + 18 * 8)] ldp x20, x21, [sp, #(TF_X + 20 * 8)] ldp x22, x23, [sp, #(TF_X + 22 * 8)] ldp x24, x25, [sp, #(TF_X + 24 * 8)] ldp x26, x27, [sp, #(TF_X + 26 * 8)] ldp x28, x29, [sp, #(TF_X + 28 * 8)] .else ldr x29, [sp, #(TF_X + 29 * 8)] .endif .if \el == 0 add sp, sp, #(TF_SIZE + 16) .else mov sp, x18 mrs x18, tpidr_el1 .endif .endm .macro do_ast mrs x19, daif /* Make sure the IRQs are enabled before calling ast() */ bic x19, x19, #PSR_I 1: /* Disable interrupts */ msr daifset, #2 /* Read the current thread flags */ ldr x1, [x18, #PC_CURTHREAD] /* Load curthread */ ldr x2, [x1, #TD_FLAGS] /* Check if we have either bits set */ mov x3, #((TDF_ASTPENDING|TDF_NEEDRESCHED) >> 8) lsl x3, x3, #8 and x2, x2, x3 cbz x2, 2f /* Restore interrupts */ msr daif, x19 /* handle the ast */ mov x0, sp bl _C_LABEL(ast) /* Re-check for new ast scheduled */ b 1b 2: .endm ENTRY(handle_el1h_sync) save_registers 1 ldr x0, [x18, #PC_CURTHREAD] mov x1, sp bl do_el1h_sync restore_registers 1 eret END(handle_el1h_sync) ENTRY(handle_el1h_irq) save_registers 1 mov x0, sp bl intr_irq_handler restore_registers 1 eret END(handle_el1h_irq) ENTRY(handle_el0_sync) save_registers 0 ldr x0, [x18, #PC_CURTHREAD] mov x1, sp str x1, [x0, #TD_FRAME] bl do_el0_sync do_ast restore_registers 0 eret END(handle_el0_sync) ENTRY(handle_el0_irq) save_registers 0 mov x0, sp bl intr_irq_handler do_ast restore_registers 0 eret END(handle_el0_irq) ENTRY(handle_serror) save_registers 0 mov x0, sp 1: bl do_serror b 1b END(handle_serror) ENTRY(handle_empty_exception) save_registers 0 mov x0, sp 1: bl unhandled_exception b 1b END(handle_unhandled_exception) .macro vempty .align 7 b handle_empty_exception .endm .macro vector name .align 7 b handle_\name .endm .align 11 .globl exception_vectors exception_vectors: vempty /* Synchronous EL1t */ vempty /* IRQ EL1t */ vempty /* FIQ EL1t */ vempty /* Error EL1t */ vector el1h_sync /* Synchronous EL1h */ vector el1h_irq /* IRQ EL1h */ vempty /* FIQ EL1h */ vector serror /* Error EL1h */ vector el0_sync /* Synchronous 64-bit EL0 */ vector el0_irq /* IRQ 64-bit EL0 */ vempty /* FIQ 64-bit EL0 */ vector serror /* Error 64-bit EL0 */ vector el0_sync /* Synchronous 32-bit EL0 */ vector el0_irq /* IRQ 32-bit EL0 */ vempty /* FIQ 32-bit EL0 */ vector serror /* Error 32-bit EL0 */ diff --git a/sys/arm64/arm64/genassym.c b/sys/arm64/arm64/genassym.c index debe63c62659..8bd4b791d417 100644 --- a/sys/arm64/arm64/genassym.c +++ b/sys/arm64/arm64/genassym.c @@ -1,72 +1,73 @@ /*- * Copyright (c) 2004 Olivier Houchard * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include ASSYM(TDF_ASTPENDING, TDF_ASTPENDING); ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); ASSYM(PCPU_SIZE, sizeof(struct pcpu)); ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(PC_SSBD, offsetof(struct pcpu, pc_ssbd)); /* Size of pcb, rounded to keep stack alignment */ ASSYM(PCB_SIZE, roundup2(sizeof(struct pcb), STACKALIGNBYTES + 1)); ASSYM(PCB_SINGLE_STEP_SHIFT, PCB_SINGLE_STEP_SHIFT); ASSYM(PCB_REGS, offsetof(struct pcb, pcb_x)); +ASSYM(PCB_LR, offsetof(struct pcb, pcb_lr)); ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp)); ASSYM(PCB_TPIDRRO, offsetof(struct pcb, pcb_tpidrro_el0)); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags)); ASSYM(P_MD, offsetof(struct proc, p_md)); ASSYM(MD_L0ADDR, offsetof(struct mdproc, md_l0addr)); ASSYM(SF_UC, offsetof(struct sigframe, sf_uc)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_FRAME, offsetof(struct thread, td_frame)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); ASSYM(TF_SIZE, sizeof(struct trapframe)); ASSYM(TF_SP, offsetof(struct trapframe, tf_sp)); ASSYM(TF_ELR, offsetof(struct trapframe, tf_elr)); ASSYM(TF_SPSR, offsetof(struct trapframe, tf_spsr)); ASSYM(TF_X, offsetof(struct trapframe, tf_x)); diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c index 90a2659d9521..c0df74bd1f6f 100644 --- a/sys/arm64/arm64/machdep.c +++ b/sys/arm64/arm64/machdep.c @@ -1,1237 +1,1237 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_acpi.h" #include "opt_platform.h" #include "opt_ddb.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef DEV_ACPI #include #include #endif #ifdef FDT #include #include #endif static void get_fpcontext(struct thread *td, mcontext_t *mcp); static void set_fpcontext(struct thread *td, mcontext_t *mcp); enum arm64_bus arm64_bus_method = ARM64_BUS_NONE; struct pcpu __pcpu[MAXCPU]; static struct trapframe proc0_tf; int early_boot = 1; int cold = 1; struct kva_md_info kmi; int64_t dcache_line_size; /* The minimum D cache line size */ int64_t icache_line_size; /* The minimum I cache line size */ int64_t idcache_line_size; /* The minimum cache line size */ int64_t dczva_line_size; /* The size of cache line the dc zva zeroes */ int has_pan; /* * Physical address of the EFI System Table. Stashed from the metadata hints * passed into the kernel and used by the EFI code to call runtime services. */ vm_paddr_t efi_systbl_phys; /* pagezero_* implementations are provided in support.S */ void pagezero_simple(void *); void pagezero_cache(void *); /* pagezero_simple is default pagezero */ void (*pagezero)(void *p) = pagezero_simple; int (*apei_nmi)(void); static void pan_setup(void) { uint64_t id_aa64mfr1; id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1); if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE) has_pan = 1; } void pan_enable(void) { /* * The LLVM integrated assembler doesn't understand the PAN * PSTATE field. Because of this we need to manually create * the instruction in an asm block. This is equivalent to: * msr pan, #1 * * This sets the PAN bit, stopping the kernel from accessing * memory when userspace can also access it unless the kernel * uses the userspace load/store instructions. */ if (has_pan) { WRITE_SPECIALREG(sctlr_el1, READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN); __asm __volatile(".inst 0xd500409f | (0x1 << 8)"); } } static void cpu_startup(void *dummy) { vm_paddr_t size; int i; printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)realmem), ptoa((uintmax_t)realmem) / 1024 / 1024); if (bootverbose) { printf("Physical memory chunk(s):\n"); for (i = 0; phys_avail[i + 1] != 0; i += 2) { size = phys_avail[i + 1] - phys_avail[i]; printf("%#016jx - %#016jx, %ju bytes (%ju pages)\n", (uintmax_t)phys_avail[i], (uintmax_t)phys_avail[i + 1] - 1, (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); } } printf("avail memory = %ju (%ju MB)\n", ptoa((uintmax_t)vm_free_count()), ptoa((uintmax_t)vm_free_count()) / 1024 / 1024); undef_init(); identify_cpu(); install_cpu_errata(); vm_ksubmap_init(&kmi); bufinit(); vm_pager_bufferinit(); } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); int cpu_idle_wakeup(int cpu) { return (0); } int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; regs->sp = frame->tf_sp; regs->lr = frame->tf_lr; regs->elr = frame->tf_elr; regs->spsr = frame->tf_spsr; memcpy(regs->x, frame->tf_x, sizeof(regs->x)); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; frame->tf_sp = regs->sp; frame->tf_lr = regs->lr; frame->tf_elr = regs->elr; frame->tf_spsr &= ~PSR_FLAGS; frame->tf_spsr |= regs->spsr & PSR_FLAGS; memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x)); return (0); } int fill_fpregs(struct thread *td, struct fpreg *regs) { #ifdef VFP struct pcb *pcb; pcb = td->td_pcb; if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running VFP instructions we will * need to save the state to memcpy it below. */ if (td == curthread) vfp_save_state(td, pcb); KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate, ("Called fill_fpregs while the kernel is using the VFP")); memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs, sizeof(regs->fp_q)); regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr; regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr; } else #endif memset(regs, 0, sizeof(*regs)); return (0); } int set_fpregs(struct thread *td, struct fpreg *regs) { #ifdef VFP struct pcb *pcb; pcb = td->td_pcb; KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate, ("Called set_fpregs while the kernel is using the VFP")); memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q)); pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr; pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr; #endif return (0); } int fill_dbregs(struct thread *td, struct dbreg *regs) { printf("ARM64TODO: fill_dbregs"); return (EDOOFUS); } int set_dbregs(struct thread *td, struct dbreg *regs) { printf("ARM64TODO: set_dbregs"); return (EDOOFUS); } #ifdef COMPAT_FREEBSD32 int fill_regs32(struct thread *td, struct reg32 *regs) { printf("ARM64TODO: fill_regs32"); return (EDOOFUS); } int set_regs32(struct thread *td, struct reg32 *regs) { printf("ARM64TODO: set_regs32"); return (EDOOFUS); } int fill_fpregs32(struct thread *td, struct fpreg32 *regs) { printf("ARM64TODO: fill_fpregs32"); return (EDOOFUS); } int set_fpregs32(struct thread *td, struct fpreg32 *regs) { printf("ARM64TODO: set_fpregs32"); return (EDOOFUS); } int fill_dbregs32(struct thread *td, struct dbreg32 *regs) { printf("ARM64TODO: fill_dbregs32"); return (EDOOFUS); } int set_dbregs32(struct thread *td, struct dbreg32 *regs) { printf("ARM64TODO: set_dbregs32"); return (EDOOFUS); } #endif int ptrace_set_pc(struct thread *td, u_long addr) { printf("ARM64TODO: ptrace_set_pc"); return (EDOOFUS); } int ptrace_single_step(struct thread *td) { td->td_frame->tf_spsr |= PSR_SS; td->td_pcb->pcb_flags |= PCB_SINGLE_STEP; return (0); } int ptrace_clear_single_step(struct thread *td) { td->td_frame->tf_spsr &= ~PSR_SS; td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP; return (0); } void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf = td->td_frame; memset(tf, 0, sizeof(struct trapframe)); tf->tf_x[0] = stack; tf->tf_sp = STACKALIGN(stack); tf->tf_lr = imgp->entry_addr; tf->tf_elr = imgp->entry_addr; } /* Sanity check these are the same size, they will be memcpy'd to and fro */ CTASSERT(sizeof(((struct trapframe *)0)->tf_x) == sizeof((struct gpregs *)0)->gp_x); CTASSERT(sizeof(((struct trapframe *)0)->tf_x) == sizeof((struct reg *)0)->x); int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; if (clear_ret & GET_MC_CLEAR_RET) { mcp->mc_gpregs.gp_x[0] = 0; mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C; } else { mcp->mc_gpregs.gp_x[0] = tf->tf_x[0]; mcp->mc_gpregs.gp_spsr = tf->tf_spsr; } memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1], sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1)); mcp->mc_gpregs.gp_sp = tf->tf_sp; mcp->mc_gpregs.gp_lr = tf->tf_lr; mcp->mc_gpregs.gp_elr = tf->tf_elr; get_fpcontext(td, mcp); return (0); } int set_mcontext(struct thread *td, mcontext_t *mcp) { struct trapframe *tf = td->td_frame; uint32_t spsr; spsr = mcp->mc_gpregs.gp_spsr; if ((spsr & PSR_M_MASK) != PSR_M_EL0t || (spsr & (PSR_AARCH32 | PSR_F | PSR_I | PSR_A | PSR_D)) != 0) return (EINVAL); memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x)); tf->tf_sp = mcp->mc_gpregs.gp_sp; tf->tf_lr = mcp->mc_gpregs.gp_lr; tf->tf_elr = mcp->mc_gpregs.gp_elr; tf->tf_spsr = mcp->mc_gpregs.gp_spsr; set_fpcontext(td, mcp); return (0); } static void get_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef VFP struct pcb *curpcb; critical_enter(); curpcb = curthread->td_pcb; if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running VFP instructions we will * need to save the state to memcpy it below. */ vfp_save_state(td, curpcb); KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate, ("Called get_fpcontext while the kernel is using the VFP")); KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0, ("Non-userspace FPU flags set in get_fpcontext")); memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs, sizeof(mcp->mc_fpregs)); mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr; mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr; mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags; mcp->mc_flags |= _MC_FP_VALID; } critical_exit(); #endif } static void set_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef VFP struct pcb *curpcb; critical_enter(); if ((mcp->mc_flags & _MC_FP_VALID) != 0) { curpcb = curthread->td_pcb; /* * Discard any vfp state for the current thread, we * are about to override it. */ vfp_discard(td); KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate, ("Called set_fpcontext while the kernel is using the VFP")); memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q, sizeof(mcp->mc_fpregs)); curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr; curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr; curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK; } critical_exit(); #endif } void cpu_idle(int busy) { spinlock_enter(); if (!busy) cpu_idleclock(); if (!sched_runnable()) __asm __volatile( "dsb sy \n" "wfi \n"); if (!busy) cpu_activeclock(); spinlock_exit(); } void cpu_halt(void) { /* We should have shutdown by now, if not enter a low power sleep */ intr_disable(); while (1) { __asm __volatile("wfi"); } } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* ARM64TODO TBD */ } /* Get current clock frequency for the given CPU ID. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { struct pcpu *pc; pc = pcpu_find(cpu_id); if (pc == NULL || rate == NULL) return (EINVAL); if (pc->pc_clock == 0) return (EOPNOTSUPP); *rate = pc->pc_clock; return (0); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { pcpu->pc_acpi_id = 0xffffffff; } void spinlock_enter(void) { struct thread *td; register_t daif; td = curthread; if (td->td_md.md_spinlock_count == 0) { daif = intr_disable(); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_daif = daif; } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t daif; td = curthread; critical_exit(); daif = td->td_md.md_saved_daif; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(daif); } #ifndef _SYS_SYSPROTO_H_ struct sigreturn_args { ucontext_t *ucp; }; #endif int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { ucontext_t uc; int error; if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { int i; - for (i = 0; i < PCB_LR; i++) + for (i = 0; i < nitems(pcb->pcb_x); i++) pcb->pcb_x[i] = tf->tf_x[i]; - pcb->pcb_x[PCB_LR] = tf->tf_lr; - pcb->pcb_pc = tf->tf_elr; + /* NB: pcb_lr is the PC, see PC_REGS() in db_machdep.h */ + pcb->pcb_lr = tf->tf_elr; pcb->pcb_sp = tf->tf_sp; } void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct thread *td; struct proc *p; struct trapframe *tf; struct sigframe *fp, frame; struct sigacts *psp; struct sysentvec *sysent; int onstack, sig; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else { fp = (struct sigframe *)td->td_frame->tf_sp; } /* Make room, keeping the stack aligned */ fp--; fp = (struct sigframe *)STACKALIGN(fp); /* Fill in the frame to copy out */ bzero(&frame, sizeof(frame)); get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack = td->td_sigstk; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } tf->tf_x[0]= sig; tf->tf_x[1] = (register_t)&fp->sf_si; tf->tf_x[2] = (register_t)&fp->sf_uc; tf->tf_elr = (register_t)catcher; tf->tf_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_lr = (register_t)sysent->sv_sigcode_base; else tf->tf_lr = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr, tf->tf_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } static void init_proc0(vm_offset_t kstack) { struct pcpu *pcpup = &__pcpu[0]; proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_kstack_pages = KSTACK_PAGES; thread0.td_pcb = (struct pcb *)(thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE) - 1; thread0.td_pcb->pcb_fpflags = 0; thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate; thread0.td_pcb->pcb_vfpcpu = UINT_MAX; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; /* Set the base address of translation table 0. */ thread0.td_proc->p_md.md_l0addr = READ_SPECIALREG(ttbr0_el1); } typedef struct { uint32_t type; uint64_t phys_start; uint64_t virt_start; uint64_t num_pages; uint64_t attr; } EFI_MEMORY_DESCRIPTOR; typedef void (*efi_map_entry_cb)(struct efi_md *); static void foreach_efi_map_entry(struct efi_map_header *efihdr, efi_map_entry_cb cb) { struct efi_md *map, *p; size_t efisz; int ndesc, i; /* * Memory map data provided by UEFI via the GetMemoryMap * Boot Services API. */ efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf; map = (struct efi_md *)((uint8_t *)efihdr + efisz); if (efihdr->descriptor_size == 0) return; ndesc = efihdr->memory_size / efihdr->descriptor_size; for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p, efihdr->descriptor_size)) { cb(p); } } static void exclude_efi_map_entry(struct efi_md *p) { switch (p->md_type) { case EFI_MD_TYPE_CODE: case EFI_MD_TYPE_DATA: case EFI_MD_TYPE_BS_CODE: case EFI_MD_TYPE_BS_DATA: case EFI_MD_TYPE_FREE: /* * We're allowed to use any entry with these types. */ break; default: physmem_exclude_region(p->md_phys, p->md_pages * PAGE_SIZE, EXFLAG_NOALLOC); } } static void exclude_efi_map_entries(struct efi_map_header *efihdr) { foreach_efi_map_entry(efihdr, exclude_efi_map_entry); } static void add_efi_map_entry(struct efi_md *p) { switch (p->md_type) { case EFI_MD_TYPE_RT_DATA: /* * Runtime data will be excluded after the DMAP * region is created to stop it from being added * to phys_avail. */ case EFI_MD_TYPE_CODE: case EFI_MD_TYPE_DATA: case EFI_MD_TYPE_BS_CODE: case EFI_MD_TYPE_BS_DATA: case EFI_MD_TYPE_FREE: /* * We're allowed to use any entry with these types. */ physmem_hardware_region(p->md_phys, p->md_pages * PAGE_SIZE); break; } } static void add_efi_map_entries(struct efi_map_header *efihdr) { foreach_efi_map_entry(efihdr, add_efi_map_entry); } static void print_efi_map_entry(struct efi_md *p) { const char *type; static const char *types[] = { "Reserved", "LoaderCode", "LoaderData", "BootServicesCode", "BootServicesData", "RuntimeServicesCode", "RuntimeServicesData", "ConventionalMemory", "UnusableMemory", "ACPIReclaimMemory", "ACPIMemoryNVS", "MemoryMappedIO", "MemoryMappedIOPortSpace", "PalCode", "PersistentMemory" }; if (p->md_type < nitems(types)) type = types[p->md_type]; else type = ""; printf("%23s %012lx %12p %08lx ", type, p->md_phys, p->md_virt, p->md_pages); if (p->md_attr & EFI_MD_ATTR_UC) printf("UC "); if (p->md_attr & EFI_MD_ATTR_WC) printf("WC "); if (p->md_attr & EFI_MD_ATTR_WT) printf("WT "); if (p->md_attr & EFI_MD_ATTR_WB) printf("WB "); if (p->md_attr & EFI_MD_ATTR_UCE) printf("UCE "); if (p->md_attr & EFI_MD_ATTR_WP) printf("WP "); if (p->md_attr & EFI_MD_ATTR_RP) printf("RP "); if (p->md_attr & EFI_MD_ATTR_XP) printf("XP "); if (p->md_attr & EFI_MD_ATTR_NV) printf("NV "); if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE) printf("MORE_RELIABLE "); if (p->md_attr & EFI_MD_ATTR_RO) printf("RO "); if (p->md_attr & EFI_MD_ATTR_RT) printf("RUNTIME"); printf("\n"); } static void print_efi_map_entries(struct efi_map_header *efihdr) { printf("%23s %12s %12s %8s %4s\n", "Type", "Physical", "Virtual", "#Pages", "Attr"); foreach_efi_map_entry(efihdr, print_efi_map_entry); } #ifdef FDT static void try_load_dtb(caddr_t kmdp) { vm_offset_t dtbp; dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); if (dtbp == (vm_offset_t)NULL) { printf("ERROR loading DTB\n"); return; } if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); } #endif static bool bus_probe(void) { bool has_acpi, has_fdt; char *order, *env; has_acpi = has_fdt = false; #ifdef FDT has_fdt = (OF_peer(0) != 0); #endif #ifdef DEV_ACPI has_acpi = (acpi_find_table(ACPI_SIG_SPCR) != 0); #endif env = kern_getenv("kern.cfg.order"); if (env != NULL) { order = env; while (order != NULL) { if (has_acpi && strncmp(order, "acpi", 4) == 0 && (order[4] == ',' || order[4] == '\0')) { arm64_bus_method = ARM64_BUS_ACPI; break; } if (has_fdt && strncmp(order, "fdt", 3) == 0 && (order[3] == ',' || order[3] == '\0')) { arm64_bus_method = ARM64_BUS_FDT; break; } order = strchr(order, ','); } freeenv(env); /* If we set the bus method it is valid */ if (arm64_bus_method != ARM64_BUS_NONE) return (true); } /* If no order or an invalid order was set use the default */ if (arm64_bus_method == ARM64_BUS_NONE) { if (has_fdt) arm64_bus_method = ARM64_BUS_FDT; else if (has_acpi) arm64_bus_method = ARM64_BUS_ACPI; } /* * If no option was set the default is valid, otherwise we are * setting one to get cninit() working, then calling panic to tell * the user about the invalid bus setup. */ return (env == NULL); } static void cache_setup(void) { int dcache_line_shift, icache_line_shift, dczva_line_shift; uint32_t ctr_el0; uint32_t dczid_el0; ctr_el0 = READ_SPECIALREG(ctr_el0); /* Read the log2 words in each D cache line */ dcache_line_shift = CTR_DLINE_SIZE(ctr_el0); /* Get the D cache line size */ dcache_line_size = sizeof(int) << dcache_line_shift; /* And the same for the I cache */ icache_line_shift = CTR_ILINE_SIZE(ctr_el0); icache_line_size = sizeof(int) << icache_line_shift; idcache_line_size = MIN(dcache_line_size, icache_line_size); dczid_el0 = READ_SPECIALREG(dczid_el0); /* Check if dc zva is not prohibited */ if (dczid_el0 & DCZID_DZP) dczva_line_size = 0; else { /* Same as with above calculations */ dczva_line_shift = DCZID_BS_SIZE(dczid_el0); dczva_line_size = sizeof(int) << dczva_line_shift; /* Change pagezero function */ pagezero = pagezero_cache; } } void initarm(struct arm64_bootparams *abp) { struct efi_fb *efifb; struct efi_map_header *efihdr; struct pcpu *pcpup; char *env; #ifdef FDT struct mem_region mem_regions[FDT_MEM_REGIONS]; int mem_regions_sz; #endif vm_offset_t lastaddr; caddr_t kmdp; bool valid; /* Set the module data location */ preload_metadata = (caddr_t)(uintptr_t)(abp->modulep); /* Find the kernel address */ kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0); link_elf_ireloc(kmdp); #ifdef FDT try_load_dtb(kmdp); #endif efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t); /* Find the address to start allocating from */ lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); /* Load the physical memory ranges */ efihdr = (struct efi_map_header *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP); if (efihdr != NULL) add_efi_map_entries(efihdr); #ifdef FDT else { /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, NULL) != 0) panic("Cannot get physical memory regions"); physmem_hardware_regions(mem_regions, mem_regions_sz); } if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0) physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); #endif /* Exclude the EFI framebuffer from our view of physical memory. */ efifb = (struct efi_fb *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_FB); if (efifb != NULL) physmem_exclude_region(efifb->fb_addr, efifb->fb_size, EXFLAG_NOALLOC); /* Set the pcpu data, this is needed by pmap_bootstrap */ pcpup = &__pcpu[0]; pcpu_init(pcpup, 0, sizeof(struct pcpu)); /* * Set the pcpu pointer with a backup in tpidr_el1 to be * loaded when entering the kernel from userland. */ __asm __volatile( "mov x18, %0 \n" "msr tpidr_el1, %0" :: "r"(pcpup)); PCPU_SET(curthread, &thread0); /* Do basic tuning, hz etc */ init_param1(); cache_setup(); pan_setup(); /* Bootstrap enough of pmap to enter the kernel proper */ pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt, KERNBASE - abp->kern_delta, lastaddr - KERNBASE); /* Exclude entries neexed in teh DMAP region, but not phys_avail */ if (efihdr != NULL) exclude_efi_map_entries(efihdr); physmem_init_kernel_globals(); devmap_bootstrap(0, NULL); valid = bus_probe(); cninit(); if (!valid) panic("Invalid bus configuration: %s", kern_getenv("kern.cfg.order")); init_proc0(abp->kern_stack); msgbufinit(msgbufp, msgbufsize); mutex_init(); init_param2(physmem); dbg_init(); kdb_init(); pan_enable(); env = kern_getenv("kernelname"); if (env != NULL) strlcpy(kernelname, env, sizeof(kernelname)); if (boothowto & RB_VERBOSE) { if (efihdr != NULL) print_efi_map_entries(efihdr); physmem_print_tables(); } early_boot = 0; } void dbg_init(void) { /* Clear OS lock */ WRITE_SPECIALREG(OSLAR_EL1, 0); /* This permits DDB to use debug registers for watchpoints. */ dbg_monitor_init(); /* TODO: Eventually will need to initialize debug registers here. */ } #ifdef DDB #include DB_SHOW_COMMAND(specialregs, db_show_spregs) { #define PRINT_REG(reg) \ db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg)) PRINT_REG(actlr_el1); PRINT_REG(afsr0_el1); PRINT_REG(afsr1_el1); PRINT_REG(aidr_el1); PRINT_REG(amair_el1); PRINT_REG(ccsidr_el1); PRINT_REG(clidr_el1); PRINT_REG(contextidr_el1); PRINT_REG(cpacr_el1); PRINT_REG(csselr_el1); PRINT_REG(ctr_el0); PRINT_REG(currentel); PRINT_REG(daif); PRINT_REG(dczid_el0); PRINT_REG(elr_el1); PRINT_REG(esr_el1); PRINT_REG(far_el1); #if 0 /* ARM64TODO: Enable VFP before reading floating-point registers */ PRINT_REG(fpcr); PRINT_REG(fpsr); #endif PRINT_REG(id_aa64afr0_el1); PRINT_REG(id_aa64afr1_el1); PRINT_REG(id_aa64dfr0_el1); PRINT_REG(id_aa64dfr1_el1); PRINT_REG(id_aa64isar0_el1); PRINT_REG(id_aa64isar1_el1); PRINT_REG(id_aa64pfr0_el1); PRINT_REG(id_aa64pfr1_el1); PRINT_REG(id_afr0_el1); PRINT_REG(id_dfr0_el1); PRINT_REG(id_isar0_el1); PRINT_REG(id_isar1_el1); PRINT_REG(id_isar2_el1); PRINT_REG(id_isar3_el1); PRINT_REG(id_isar4_el1); PRINT_REG(id_isar5_el1); PRINT_REG(id_mmfr0_el1); PRINT_REG(id_mmfr1_el1); PRINT_REG(id_mmfr2_el1); PRINT_REG(id_mmfr3_el1); #if 0 /* Missing from llvm */ PRINT_REG(id_mmfr4_el1); #endif PRINT_REG(id_pfr0_el1); PRINT_REG(id_pfr1_el1); PRINT_REG(isr_el1); PRINT_REG(mair_el1); PRINT_REG(midr_el1); PRINT_REG(mpidr_el1); PRINT_REG(mvfr0_el1); PRINT_REG(mvfr1_el1); PRINT_REG(mvfr2_el1); PRINT_REG(revidr_el1); PRINT_REG(sctlr_el1); PRINT_REG(sp_el0); PRINT_REG(spsel); PRINT_REG(spsr_el1); PRINT_REG(tcr_el1); PRINT_REG(tpidr_el0); PRINT_REG(tpidr_el1); PRINT_REG(tpidrro_el0); PRINT_REG(ttbr0_el1); PRINT_REG(ttbr1_el1); PRINT_REG(vbar_el1); #undef PRINT_REG } DB_SHOW_COMMAND(vtop, db_show_vtop) { uint64_t phys; if (have_addr) { phys = arm64_address_translate_s1e1r(addr); db_printf("EL1 physical address reg (read): 0x%016lx\n", phys); phys = arm64_address_translate_s1e1w(addr); db_printf("EL1 physical address reg (write): 0x%016lx\n", phys); phys = arm64_address_translate_s1e0r(addr); db_printf("EL0 physical address reg (read): 0x%016lx\n", phys); phys = arm64_address_translate_s1e0w(addr); db_printf("EL0 physical address reg (write): 0x%016lx\n", phys); } else db_printf("show vtop \n"); } #endif diff --git a/sys/arm64/arm64/stack_machdep.c b/sys/arm64/arm64/stack_machdep.c index 0212c6335a05..feba7e41286d 100644 --- a/sys/arm64/arm64/stack_machdep.c +++ b/sys/arm64/arm64/stack_machdep.c @@ -1,95 +1,95 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include static void stack_capture(struct stack *st, struct unwind_state *frame) { stack_zero(st); while (1) { unwind_frame(frame); if (!INKERNEL((vm_offset_t)frame->fp) || !INKERNEL((vm_offset_t)frame->pc)) break; if (stack_put(st, frame->pc) == -1) break; } } void stack_save_td(struct stack *st, struct thread *td) { struct unwind_state frame; if (TD_IS_SWAPPED(td)) panic("stack_save_td: swapped"); if (TD_IS_RUNNING(td)) panic("stack_save_td: running"); frame.sp = td->td_pcb->pcb_sp; frame.fp = td->td_pcb->pcb_x[29]; - frame.pc = td->td_pcb->pcb_x[30]; + frame.pc = td->td_pcb->pcb_lr; stack_capture(st, &frame); } int stack_save_td_running(struct stack *st, struct thread *td) { return (EOPNOTSUPP); } void stack_save(struct stack *st) { struct unwind_state frame; uint64_t sp; __asm __volatile("mov %0, sp" : "=&r" (sp)); frame.sp = sp; frame.fp = (uint64_t)__builtin_frame_address(0); frame.pc = (uint64_t)stack_save; stack_capture(st, &frame); } diff --git a/sys/arm64/arm64/swtch.S b/sys/arm64/arm64/swtch.S index d9921f7e9528..bc87114dd0ee 100644 --- a/sys/arm64/arm64/swtch.S +++ b/sys/arm64/arm64/swtch.S @@ -1,292 +1,292 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "assym.inc" #include "opt_kstack_pages.h" #include "opt_sched.h" #include __FBSDID("$FreeBSD$"); .macro clear_step_flag pcbflags, tmp tbz \pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f mrs \tmp, mdscr_el1 bic \tmp, \tmp, #1 msr mdscr_el1, \tmp isb 999: .endm .macro set_step_flag pcbflags, tmp tbz \pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f mrs \tmp, mdscr_el1 orr \tmp, \tmp, #1 msr mdscr_el1, \tmp isb 999: .endm /* * void cpu_throw(struct thread *old, struct thread *new) */ ENTRY(cpu_throw) /* Of old == NULL skip disabling stepping */ cbz x0, 1f /* If we were single stepping, disable it */ ldr x4, [x0, #TD_PCB] ldr w5, [x4, #PCB_FLAGS] clear_step_flag w5, x6 1: #ifdef VFP /* Backup the new thread pointer around a call to C code */ mov x19, x0 mov x20, x1 bl vfp_discard mov x1, x20 mov x0, x19 #endif bl pmap_switch mov x4, x0 /* If we are single stepping, enable it */ ldr w5, [x4, #PCB_FLAGS] set_step_flag w5, x6 /* Restore the registers */ ldp x5, x6, [x4, #PCB_SP] mov sp, x5 msr tpidr_el0, x6 ldr x6, [x4, #PCB_TPIDRRO] msr tpidrro_el0, x6 ldp x8, x9, [x4, #PCB_REGS + 8 * 8] ldp x10, x11, [x4, #PCB_REGS + 10 * 8] ldp x12, x13, [x4, #PCB_REGS + 12 * 8] ldp x14, x15, [x4, #PCB_REGS + 14 * 8] ldp x16, x17, [x4, #PCB_REGS + 16 * 8] ldr x19, [x4, #PCB_REGS + 19 * 8] ldp x20, x21, [x4, #PCB_REGS + 20 * 8] ldp x22, x23, [x4, #PCB_REGS + 22 * 8] ldp x24, x25, [x4, #PCB_REGS + 24 * 8] ldp x26, x27, [x4, #PCB_REGS + 26 * 8] ldp x28, x29, [x4, #PCB_REGS + 28 * 8] - ldr x30, [x4, #PCB_REGS + 30 * 8] + ldr lr, [x4, #PCB_LR] ret END(cpu_throw) /* * void cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx) * * x0 = old * x1 = new * x2 = mtx * x3 to x7, x16 and x17 are caller saved */ ENTRY(cpu_switch) /* * Save the old context. */ ldr x4, [x0, #TD_PCB] /* Store the callee-saved registers */ stp x8, x9, [x4, #PCB_REGS + 8 * 8] stp x10, x11, [x4, #PCB_REGS + 10 * 8] stp x12, x13, [x4, #PCB_REGS + 12 * 8] stp x14, x15, [x4, #PCB_REGS + 14 * 8] stp x16, x17, [x4, #PCB_REGS + 16 * 8] stp x18, x19, [x4, #PCB_REGS + 18 * 8] stp x20, x21, [x4, #PCB_REGS + 20 * 8] stp x22, x23, [x4, #PCB_REGS + 22 * 8] stp x24, x25, [x4, #PCB_REGS + 24 * 8] stp x26, x27, [x4, #PCB_REGS + 26 * 8] stp x28, x29, [x4, #PCB_REGS + 28 * 8] - str x30, [x4, #PCB_REGS + 30 * 8] + str lr, [x4, #PCB_LR] /* And the old stack pointer */ mov x5, sp mrs x6, tpidrro_el0 str x6, [x4, #PCB_TPIDRRO] mrs x6, tpidr_el0 stp x5, x6, [x4, #PCB_SP] /* If we were single stepping, disable it */ ldr w5, [x4, #PCB_FLAGS] clear_step_flag w5, x6 mov x19, x0 mov x20, x1 mov x21, x2 #ifdef VFP /* Load the pcb address */ mov x1, x4 bl vfp_save_state mov x1, x20 mov x0, x19 #endif bl pmap_switch /* Move the new pcb out of the way */ mov x4, x0 mov x2, x21 mov x1, x20 mov x0, x19 /* * Release the old thread. */ stlr x2, [x0, #TD_LOCK] #if defined(SCHED_ULE) && defined(SMP) /* Spin if TD_LOCK points to a blocked_lock */ ldr x2, =_C_LABEL(blocked_lock) 1: ldar x3, [x1, #TD_LOCK] cmp x3, x2 b.eq 1b #endif /* If we are single stepping, enable it */ ldr w5, [x4, #PCB_FLAGS] set_step_flag w5, x6 /* Restore the registers */ ldp x5, x6, [x4, #PCB_SP] mov sp, x5 msr tpidr_el0, x6 ldr x6, [x4, #PCB_TPIDRRO] msr tpidrro_el0, x6 ldp x8, x9, [x4, #PCB_REGS + 8 * 8] ldp x10, x11, [x4, #PCB_REGS + 10 * 8] ldp x12, x13, [x4, #PCB_REGS + 12 * 8] ldp x14, x15, [x4, #PCB_REGS + 14 * 8] ldp x16, x17, [x4, #PCB_REGS + 16 * 8] ldr x19, [x4, #PCB_REGS + 19 * 8] ldp x20, x21, [x4, #PCB_REGS + 20 * 8] ldp x22, x23, [x4, #PCB_REGS + 22 * 8] ldp x24, x25, [x4, #PCB_REGS + 24 * 8] ldp x26, x27, [x4, #PCB_REGS + 26 * 8] ldp x28, x29, [x4, #PCB_REGS + 28 * 8] - ldr x30, [x4, #PCB_REGS + 30 * 8] + ldr lr, [x4, #PCB_LR] str xzr, [x4, #PCB_REGS + 18 * 8] ret .Lcpu_switch_panic_str: .asciz "cpu_switch: %p\0" END(cpu_switch) ENTRY(fork_trampoline) mov x0, x8 mov x1, x9 mov x2, sp mov fp, #0 /* Stack traceback stops here. */ bl _C_LABEL(fork_exit) /* Restore the registers other than x0 and x1 */ ldp x2, x3, [sp, #TF_X + 2 * 8] ldp x4, x5, [sp, #TF_X + 4 * 8] ldp x6, x7, [sp, #TF_X + 6 * 8] ldp x8, x9, [sp, #TF_X + 8 * 8] ldp x10, x11, [sp, #TF_X + 10 * 8] ldp x12, x13, [sp, #TF_X + 12 * 8] ldp x14, x15, [sp, #TF_X + 14 * 8] ldp x16, x17, [sp, #TF_X + 16 * 8] ldr x19, [sp, #TF_X + 19 * 8] ldp x20, x21, [sp, #TF_X + 20 * 8] ldp x22, x23, [sp, #TF_X + 22 * 8] ldp x24, x25, [sp, #TF_X + 24 * 8] ldp x26, x27, [sp, #TF_X + 26 * 8] ldp x28, x29, [sp, #TF_X + 28 * 8] /* * Disable interrupts to avoid * overwriting spsr_el1 and sp_el0 by an IRQ exception. */ msr daifset, #2 /* Restore sp and lr */ ldp x0, x1, [sp, #TF_SP] msr sp_el0, x0 mov lr, x1 /* Restore elr and spsr */ ldp x0, x1, [sp, #TF_ELR] msr elr_el1, x0 msr spsr_el1, x1 /* Finally x0 and x1 */ ldp x0, x1, [sp, #TF_X + 0 * 8] ldr x18, [sp, #TF_X + 18 * 8] /* * No need for interrupts reenabling since PSR * will be set to the desired value anyway. */ eret END(fork_trampoline) ENTRY(savectx) /* Store the callee-saved registers */ stp x8, x9, [x0, #PCB_REGS + 8 * 8] stp x10, x11, [x0, #PCB_REGS + 10 * 8] stp x12, x13, [x0, #PCB_REGS + 12 * 8] stp x14, x15, [x0, #PCB_REGS + 14 * 8] stp x16, x17, [x0, #PCB_REGS + 16 * 8] stp x18, x19, [x0, #PCB_REGS + 18 * 8] stp x20, x21, [x0, #PCB_REGS + 20 * 8] stp x22, x23, [x0, #PCB_REGS + 22 * 8] stp x24, x25, [x0, #PCB_REGS + 24 * 8] stp x26, x27, [x0, #PCB_REGS + 26 * 8] stp x28, x29, [x0, #PCB_REGS + 28 * 8] - str x30, [x0, #PCB_REGS + 30 * 8] + str lr, [x0, #PCB_LR] /* And the old stack pointer */ mov x5, sp mrs x6, tpidrro_el0 str x6, [x0, #PCB_TPIDRRO] mrs x6, tpidr_el0 stp x5, x6, [x0, #PCB_SP] /* Store the VFP registers */ #ifdef VFP mov x28, lr mov x1, x0 /* move pcb to the correct register */ mov x0, xzr /* td = NULL */ bl vfp_save_state mov lr, x28 #endif ret END(savectx) diff --git a/sys/arm64/arm64/vm_machdep.c b/sys/arm64/arm64/vm_machdep.c index 8f0dfa1a15e7..1d6ccace82ed 100644 --- a/sys/arm64/arm64/vm_machdep.c +++ b/sys/arm64/arm64/vm_machdep.c @@ -1,288 +1,288 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #include /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) { struct pcb *pcb2; struct trapframe *tf; if ((flags & RFPROC) == 0) return; if (td1 == curthread) { /* * Save the tpidr_el0 and the vfp state, these normally happen * in cpu_switch, but if userland changes these then forks * this may not have happened. */ td1->td_pcb->pcb_tpidr_el0 = READ_SPECIALREG(tpidr_el0); #ifdef VFP if ((td1->td_pcb->pcb_fpflags & PCB_FP_STARTED) != 0) vfp_save_state(td1, td1->td_pcb); #endif } pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; td2->td_pcb = pcb2; bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); td2->td_proc->p_md.md_l0addr = vtophys(vmspace_pmap(td2->td_proc->p_vmspace)->pm_l0); tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1); bcopy(td1->td_frame, tf, sizeof(*tf)); tf->tf_x[0] = 0; tf->tf_x[1] = 0; tf->tf_spsr = 0; td2->td_frame = tf; /* Set the return value registers for fork() */ td2->td_pcb->pcb_x[8] = (uintptr_t)fork_return; td2->td_pcb->pcb_x[9] = (uintptr_t)td2; - td2->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline; + td2->td_pcb->pcb_lr = (uintptr_t)fork_trampoline; td2->td_pcb->pcb_sp = (uintptr_t)td2->td_frame; td2->td_pcb->pcb_fpusaved = &td2->td_pcb->pcb_fpustate; td2->td_pcb->pcb_vfpcpu = UINT_MAX; /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_daif = td1->td_md.md_saved_daif & ~DAIF_I_MASKED; } void cpu_reset(void) { psci_reset(); printf("cpu_reset failed"); while(1) __asm volatile("wfi" ::: "memory"); } void cpu_thread_swapin(struct thread *td) { } void cpu_thread_swapout(struct thread *td) { } void cpu_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame; frame = td->td_frame; switch (error) { case 0: frame->tf_x[0] = td->td_retval[0]; frame->tf_x[1] = td->td_retval[1]; frame->tf_spsr &= ~PSR_C; /* carry bit */ break; case ERESTART: frame->tf_elr -= 4; break; case EJUSTRETURN: break; default: frame->tf_spsr |= PSR_C; /* carry bit */ frame->tf_x[0] = SV_ABI_ERRNO(td->td_proc, error); break; } } /* * Initialize machine state, mostly pcb and trap frame for a new * thread, about to return to userspace. Put enough state in the new * thread's PCB to get it to go back to the fork_return(), which * finalizes the thread state and handles peculiarities of the first * return to userspace for the new thread. */ void cpu_copy_thread(struct thread *td, struct thread *td0) { bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb)); td->td_pcb->pcb_x[8] = (uintptr_t)fork_return; td->td_pcb->pcb_x[9] = (uintptr_t)td; - td->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline; + td->td_pcb->pcb_lr = (uintptr_t)fork_trampoline; td->td_pcb->pcb_sp = (uintptr_t)td->td_frame; td->td_pcb->pcb_fpusaved = &td->td_pcb->pcb_fpustate; td->td_pcb->pcb_vfpcpu = UINT_MAX; /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_daif = td0->td_md.md_saved_daif & ~DAIF_I_MASKED; } /* * Set that machine state for performing an upcall that starts * the entry function with the given argument. */ void cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf = td->td_frame; tf->tf_sp = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size); tf->tf_elr = (register_t)entry; tf->tf_x[0] = (register_t)arg; } int cpu_set_user_tls(struct thread *td, void *tls_base) { struct pcb *pcb; if ((uintptr_t)tls_base >= VM_MAXUSER_ADDRESS) return (EINVAL); pcb = td->td_pcb; pcb->pcb_tpidr_el0 = (register_t)tls_base; if (td == curthread) WRITE_SPECIALREG(tpidr_el0, tls_base); return (0); } void cpu_thread_exit(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * PAGE_SIZE) - 1; td->td_frame = (struct trapframe *)STACKALIGN( (struct trapframe *)td->td_pcb - 1); } void cpu_thread_free(struct thread *td) { } void cpu_thread_clean(struct thread *td) { } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) { td->td_pcb->pcb_x[8] = (uintptr_t)func; td->td_pcb->pcb_x[9] = (uintptr_t)arg; - td->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline; + td->td_pcb->pcb_lr = (uintptr_t)fork_trampoline; td->td_pcb->pcb_sp = (uintptr_t)td->td_frame; td->td_pcb->pcb_fpusaved = &td->td_pcb->pcb_fpustate; td->td_pcb->pcb_vfpcpu = UINT_MAX; } void cpu_exit(struct thread *td) { } bool cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) { return (true); } int cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused, int com __unused, void *data __unused) { return (EINVAL); } void swi_vm(void *v) { if (busdma_swi_pending != 0) busdma_swi(); } diff --git a/sys/arm64/include/db_machdep.h b/sys/arm64/include/db_machdep.h index 45d548c750bc..f2fd2a57a9c3 100644 --- a/sys/arm64/include/db_machdep.h +++ b/sys/arm64/include/db_machdep.h @@ -1,123 +1,123 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014-2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_DB_MACHDEP_H_ #define _MACHINE_DB_MACHDEP_H_ #include #include #include #define T_BREAKPOINT (EXCP_BRK) #define T_WATCHPOINT (EXCP_WATCHPT_EL1) typedef vm_offset_t db_addr_t; typedef long db_expr_t; -#define PC_REGS() ((db_addr_t)kdb_thrctx->pcb_pc) +#define PC_REGS() ((db_addr_t)kdb_thrctx->pcb_lr) #define BKPT_INST (0xd4200000) #define BKPT_SIZE (4) #define BKPT_SET(inst) (BKPT_INST) #define BKPT_SKIP do { \ kdb_frame->tf_elr += BKPT_SIZE; \ } while (0) #define db_clear_single_step kdb_cpu_clear_singlestep #define db_set_single_step kdb_cpu_set_singlestep #define IS_BREAKPOINT_TRAP(type, code) (type == T_BREAKPOINT) #define IS_WATCHPOINT_TRAP(type, code) (type == T_WATCHPOINT) #define inst_trap_return(ins) (0) /* ret */ #define inst_return(ins) (((ins) & 0xfffffc1fu) == 0xd65f0000) #define inst_call(ins) (((ins) & 0xfc000000u) == 0x94000000u || /* BL */ \ ((ins) & 0xfffffc1fu) == 0xd63f0000u) /* BLR */ #define inst_load(ins) ({ \ uint32_t tmp_instr = db_get_value(PC_REGS(), sizeof(uint32_t), FALSE); \ is_load_instr(tmp_instr); \ }) #define inst_store(ins) ({ \ uint32_t tmp_instr = db_get_value(PC_REGS(), sizeof(uint32_t), FALSE); \ is_store_instr(tmp_instr); \ }) #define is_load_instr(ins) ((((ins) & 0x3b000000u) == 0x18000000u) || /* literal */ \ (((ins) & 0x3f400000u) == 0x08400000u) || /* exclusive */ \ (((ins) & 0x3bc00000u) == 0x28400000u) || /* no-allocate pair */ \ ((((ins) & 0x3b200c00u) == 0x38000400u) && \ (((ins) & 0x3be00c00u) != 0x38000400u) && \ (((ins) & 0xffe00c00u) != 0x3c800400u)) || /* immediate post-indexed */ \ ((((ins) & 0x3b200c00u) == 0x38000c00u) && \ (((ins) & 0x3be00c00u) != 0x38000c00u) && \ (((ins) & 0xffe00c00u) != 0x3c800c00u)) || /* immediate pre-indexed */ \ ((((ins) & 0x3b200c00u) == 0x38200800u) && \ (((ins) & 0x3be00c00u) != 0x38200800u) && \ (((ins) & 0xffe00c00u) != 0x3ca00c80u)) || /* register offset */ \ ((((ins) & 0x3b200c00u) == 0x38000800u) && \ (((ins) & 0x3be00c00u) != 0x38000800u)) || /* unprivileged */ \ ((((ins) & 0x3b200c00u) == 0x38000000u) && \ (((ins) & 0x3be00c00u) != 0x38000000u) && \ (((ins) & 0xffe00c00u) != 0x3c800000u)) || /* unscaled immediate */ \ ((((ins) & 0x3b000000u) == 0x39000000u) && \ (((ins) & 0x3bc00000u) != 0x39000000u) && \ (((ins) & 0xffc00000u) != 0x3d800000u)) && /* unsigned immediate */ \ (((ins) & 0x3bc00000u) == 0x28400000u) || /* pair (offset) */ \ (((ins) & 0x3bc00000u) == 0x28c00000u) || /* pair (post-indexed) */ \ (((ins) & 0x3bc00000u) == 0x29800000u)) /* pair (pre-indexed) */ #define is_store_instr(ins) ((((ins) & 0x3f400000u) == 0x08000000u) || /* exclusive */ \ (((ins) & 0x3bc00000u) == 0x28000000u) || /* no-allocate pair */ \ ((((ins) & 0x3be00c00u) == 0x38000400u) || \ (((ins) & 0xffe00c00u) == 0x3c800400u)) || /* immediate post-indexed */ \ ((((ins) & 0x3be00c00u) == 0x38000c00u) || \ (((ins) & 0xffe00c00u) == 0x3c800c00u)) || /* immediate pre-indexed */ \ ((((ins) & 0x3be00c00u) == 0x38200800u) || \ (((ins) & 0xffe00c00u) == 0x3ca00800u)) || /* register offset */ \ (((ins) & 0x3be00c00u) == 0x38000800u) || /* unprivileged */ \ ((((ins) & 0x3be00c00u) == 0x38000000u) || \ (((ins) & 0xffe00c00u) == 0x3c800000u)) || /* unscaled immediate */ \ ((((ins) & 0x3bc00000u) == 0x39000000u) || \ (((ins) & 0xffc00000u) == 0x3d800000u)) || /* unsigned immediate */ \ (((ins) & 0x3bc00000u) == 0x28000000u) || /* pair (offset) */ \ (((ins) & 0x3bc00000u) == 0x28800000u) || /* pair (post-indexed) */ \ (((ins) & 0x3bc00000u) == 0x29800000u)) /* pair (pre-indexed) */ #define next_instr_address(pc, bd) ((bd) ? (pc) : ((pc) + 4)) #define DB_ELFSIZE 64 #endif /* !_MACHINE_DB_MACHDEP_H_ */ diff --git a/sys/arm64/include/pcb.h b/sys/arm64/include/pcb.h index bd8b1b4235a8..cbf43133e9ad 100644 --- a/sys/arm64/include/pcb.h +++ b/sys/arm64/include/pcb.h @@ -1,78 +1,78 @@ /*- * Copyright (c) 2001 Jake Burkholder. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_PCB_H_ #define _MACHINE_PCB_H_ #ifndef LOCORE #include struct trapframe; -#define PCB_LR 30 struct pcb { - uint64_t pcb_x[31]; - uint64_t pcb_pc; + uint64_t pcb_x[30]; + uint64_t pcb_lr; + uint64_t _reserved; /* Was pcb_pc */ /* These two need to be in order as we access them together */ uint64_t pcb_sp; uint64_t pcb_tpidr_el0; uint64_t pcb_tpidrro_el0; /* Fault handler, the error value is passed in x0 */ vm_offset_t pcb_onfault; u_int pcb_flags; #define PCB_SINGLE_STEP_SHIFT 0 #define PCB_SINGLE_STEP (1 << PCB_SINGLE_STEP_SHIFT) struct vfpstate *pcb_fpusaved; int pcb_fpflags; #define PCB_FP_STARTED 0x01 #define PCB_FP_KERN 0x02 #define PCB_FP_NOSAVE 0x04 /* The bits passed to userspace in get_fpcontext */ #define PCB_FP_USERMASK (PCB_FP_STARTED) u_int pcb_vfpcpu; /* Last cpu this thread ran VFP code */ /* * The userspace VFP state. The pcb_fpusaved pointer will point to * this unless the kernel has allocated a VFP context. * Place last to simplify the asm to access the rest if the struct. */ struct vfpstate pcb_fpustate; }; #ifdef _KERNEL void makectx(struct trapframe *tf, struct pcb *pcb); int savectx(struct pcb *pcb) __returns_twice; #endif #endif /* !LOCORE */ #endif /* !_MACHINE_PCB_H_ */