Index: head/sys/arm64/arm64/copyinout.S =================================================================== --- head/sys/arm64/arm64/copyinout.S (revision 316755) +++ head/sys/arm64/arm64/copyinout.S (revision 316756) @@ -1,221 +1,226 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include "assym.s" /* * Fault handler for the copy{in,out} functions below. */ ENTRY(copyio_fault) SET_FAULT_HANDLER(xzr, x1) /* Clear the handler */ + EXIT_USER_ACCESS_CHECK(w0, x1) copyio_fault_nopcb: mov x0, #EFAULT ret END(copyio_fault) /* * Copies from a kernel to user address * * int copyout(const void *kaddr, void *udaddr, size_t len) */ ENTRY(copyout) cbz x2, 1f adds x3, x1, x2 b.cs copyio_fault_nopcb ldr x4, =VM_MAXUSER_ADDRESS cmp x3, x4 b.hi copyio_fault_nopcb b copycommon 1: mov x0, xzr /* return 0 */ ret END(copyout) /* * Copies from a user to kernel address * * int copyin(const void *uaddr, void *kdaddr, size_t len) */ ENTRY(copyin) cbz x2, 1f adds x3, x0, x2 b.cs copyio_fault_nopcb ldr x4, =VM_MAXUSER_ADDRESS cmp x3, x4 b.hi copyio_fault_nopcb b copycommon 1: mov x0, xzr /* return 0 */ ret END(copyin) /* * Copies a string from a user to kernel address * * int copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *done) */ ENTRY(copyinstr) mov x5, xzr /* count = 0 */ mov w4, #1 /* If zero return faulure */ cbz x2, 3f /* If len == 0 then skip loop */ adr x6, copyio_fault /* Get the handler address */ SET_FAULT_HANDLER(x6, x7) /* Set the handler */ + ENTER_USER_ACCESS(w6, x7) ldr x7, =VM_MAXUSER_ADDRESS 1: cmp x0, x7 b.cs copyio_fault ldtrb w4, [x0] /* Load from uaddr */ add x0, x0, #1 /* Next char */ strb w4, [x1], #1 /* Store in kaddr */ add x5, x5, #1 /* count++ */ cbz w4, 2f /* Break when NUL-terminated */ sub x2, x2, #1 /* len-- */ cbnz x2, 1b -2: SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */ +2: EXIT_USER_ACCESS(w6) + SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */ + 3: cbz x3, 4f /* Check if done != NULL */ str x5, [x3] /* done = count */ 4: mov w1, #ENAMETOOLONG /* Load ENAMETOOLONG to return if failed */ cmp w4, #0 /* Check if we saved the NUL-terminator */ csel w0, wzr, w1, eq /* If so return success, else failure */ ret END(copyinstr) /* * Local helper * * x0 - src pointer * x1 - dst pointer * x2 - size * lr - the return address, so jump here instead of calling * * This function is optimized to minimize concurrent memory accesses. In * present form it is suited for cores with a single memory prefetching * unit. * ARM64TODO: * Consider using separate functions for each ARM64 core. Adding memory * access interleaving might increase a total throughput on A57 or A72. */ .text .align 4 .local copycommon .type copycommon,@function copycommon: adr x6, copyio_fault /* Get the handler address */ SET_FAULT_HANDLER(x6, x7) /* Set the handler */ + ENTER_USER_ACCESS(w6, x7) - /* Check alignment */ orr x3, x0, x1 ands x3, x3, 0x07 b.eq aligned /* Unaligned is byte by byte copy */ byte_by_byte: ldrb w3, [x0], #0x01 strb w3, [x1], #0x01 subs x2, x2, #0x01 b.ne byte_by_byte b ending aligned: cmp x2, #0x10 b.lt lead_out cmp x2, #0x40 b.lt by_dwords_start /* Block copy */ lsr x15, x2, #0x06 by_blocks: ldp x3, x4, [x0], #0x10 ldp x5, x6, [x0], #0x10 ldp x7, x8, [x0], #0x10 ldp x9, x10, [x0], #0x10 stp x3, x4, [x1], #0x10 stp x5, x6, [x1], #0x10 stp x7, x8, [x1], #0x10 stp x9, x10, [x1], #0x10 subs x15, x15, #0x01 b.ne by_blocks and x2, x2, #0x3f by_dwords_start: lsr x15, x2, #0x04 cbz x15, lead_out by_dwords: ldp x3, x4, [x0], #0x10 stp x3, x4, [x1], #0x10 subs x15, x15, #0x01 b.ne by_dwords /* Less than 16 bytes to copy */ lead_out: tbz x2, #0x03, last_word ldr x3, [x0], #0x08 str x3, [x1], #0x08 last_word: tbz x2, #0x02, last_hword ldr w3, [x0], #0x04 str w3, [x1], #0x04 last_hword: tbz x2, #0x01, last_byte ldrh w3, [x0], #0x02 strh w3, [x1], #0x02 last_byte: tbz x2, #0x00, ending ldrb w3, [x0] strb w3, [x1] ending: + EXIT_USER_ACCESS_CHECK(w6, x7) SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */ mov x0, xzr /* return 0 */ ret .size copycommon, . - copycommon Index: head/sys/arm64/arm64/machdep.c =================================================================== --- head/sys/arm64/arm64/machdep.c (revision 316755) +++ head/sys/arm64/arm64/machdep.c (revision 316756) @@ -1,1116 +1,1150 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_acpi.h" #include "opt_platform.h" #include "opt_ddb.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef DEV_ACPI #include #include #endif #ifdef FDT #include #include #endif enum arm64_bus arm64_bus_method = ARM64_BUS_NONE; struct pcpu __pcpu[MAXCPU]; static struct trapframe proc0_tf; vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2]; vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2]; int early_boot = 1; int cold = 1; long realmem = 0; long Maxmem = 0; #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) vm_paddr_t physmap[PHYSMAP_SIZE]; u_int physmap_idx; struct kva_md_info kmi; int64_t dcache_line_size; /* The minimum D cache line size */ int64_t icache_line_size; /* The minimum I cache line size */ int64_t idcache_line_size; /* The minimum cache line size */ int64_t dczva_line_size; /* The size of cache line the dc zva zeroes */ +int has_pan; /* pagezero_* implementations are provided in support.S */ void pagezero_simple(void *); void pagezero_cache(void *); /* pagezero_simple is default pagezero */ void (*pagezero)(void *p) = pagezero_simple; static void +pan_setup(void) +{ + uint64_t id_aa64mfr1; + + id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1); + if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE) + has_pan = 1; +} + +void +pan_enable(void) +{ + + /* + * The LLVM integrated assembler doesn't understand the PAN + * PSTATE field. Because of this we need to manually create + * the instruction in an asm block. This is equivalent to: + * msr pan, #1 + * + * This sets the PAN bit, stopping the kernel from accessing + * memory when userspace can also access it unless the kernel + * uses the userspace load/store instructions. + */ + if (has_pan) { + WRITE_SPECIALREG(sctlr_el1, + READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN); + __asm __volatile(".inst 0xd500409f | (0x1 << 8)"); + } +} + +static void cpu_startup(void *dummy) { identify_cpu(); vm_ksubmap_init(&kmi); bufinit(); vm_pager_bufferinit(); } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); int cpu_idle_wakeup(int cpu) { return (0); } int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; regs->sp = frame->tf_sp; regs->lr = frame->tf_lr; regs->elr = frame->tf_elr; regs->spsr = frame->tf_spsr; memcpy(regs->x, frame->tf_x, sizeof(regs->x)); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; frame->tf_sp = regs->sp; frame->tf_lr = regs->lr; frame->tf_elr = regs->elr; frame->tf_spsr = regs->spsr; memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x)); return (0); } int fill_fpregs(struct thread *td, struct fpreg *regs) { #ifdef VFP struct pcb *pcb; pcb = td->td_pcb; if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running VFP instructions we will * need to save the state to memcpy it below. */ vfp_save_state(td, pcb); KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate, ("Called fill_fpregs while the kernel is using the VFP")); memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs, sizeof(regs->fp_q)); regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr; regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr; } else #endif memset(regs->fp_q, 0, sizeof(regs->fp_q)); return (0); } int set_fpregs(struct thread *td, struct fpreg *regs) { #ifdef VFP struct pcb *pcb; pcb = td->td_pcb; KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate, ("Called set_fpregs while the kernel is using the VFP")); memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q)); pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr; pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr; #endif return (0); } int fill_dbregs(struct thread *td, struct dbreg *regs) { panic("ARM64TODO: fill_dbregs"); } int set_dbregs(struct thread *td, struct dbreg *regs) { panic("ARM64TODO: set_dbregs"); } int ptrace_set_pc(struct thread *td, u_long addr) { panic("ARM64TODO: ptrace_set_pc"); return (0); } int ptrace_single_step(struct thread *td) { td->td_frame->tf_spsr |= PSR_SS; td->td_pcb->pcb_flags |= PCB_SINGLE_STEP; return (0); } int ptrace_clear_single_step(struct thread *td) { td->td_frame->tf_spsr &= ~PSR_SS; td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP; return (0); } void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf = td->td_frame; memset(tf, 0, sizeof(struct trapframe)); /* * We need to set x0 for init as it doesn't call * cpu_set_syscall_retval to copy the value. We also * need to set td_retval for the cases where we do. */ tf->tf_x[0] = td->td_retval[0] = stack; tf->tf_sp = STACKALIGN(stack); tf->tf_lr = imgp->entry_addr; tf->tf_elr = imgp->entry_addr; } /* Sanity check these are the same size, they will be memcpy'd to and fro */ CTASSERT(sizeof(((struct trapframe *)0)->tf_x) == sizeof((struct gpregs *)0)->gp_x); CTASSERT(sizeof(((struct trapframe *)0)->tf_x) == sizeof((struct reg *)0)->x); int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; if (clear_ret & GET_MC_CLEAR_RET) { mcp->mc_gpregs.gp_x[0] = 0; mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C; } else { mcp->mc_gpregs.gp_x[0] = tf->tf_x[0]; mcp->mc_gpregs.gp_spsr = tf->tf_spsr; } memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1], sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1)); mcp->mc_gpregs.gp_sp = tf->tf_sp; mcp->mc_gpregs.gp_lr = tf->tf_lr; mcp->mc_gpregs.gp_elr = tf->tf_elr; return (0); } int set_mcontext(struct thread *td, mcontext_t *mcp) { struct trapframe *tf = td->td_frame; memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x)); tf->tf_sp = mcp->mc_gpregs.gp_sp; tf->tf_lr = mcp->mc_gpregs.gp_lr; tf->tf_elr = mcp->mc_gpregs.gp_elr; tf->tf_spsr = mcp->mc_gpregs.gp_spsr; return (0); } static void get_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef VFP struct pcb *curpcb; critical_enter(); curpcb = curthread->td_pcb; if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running VFP instructions we will * need to save the state to memcpy it below. */ vfp_save_state(td, curpcb); KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate, ("Called get_fpcontext while the kernel is using the VFP")); KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0, ("Non-userspace FPU flags set in get_fpcontext")); memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs, sizeof(mcp->mc_fpregs)); mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr; mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr; mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags; mcp->mc_flags |= _MC_FP_VALID; } critical_exit(); #endif } static void set_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef VFP struct pcb *curpcb; critical_enter(); if ((mcp->mc_flags & _MC_FP_VALID) != 0) { curpcb = curthread->td_pcb; /* * Discard any vfp state for the current thread, we * are about to override it. */ vfp_discard(td); KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate, ("Called set_fpcontext while the kernel is using the VFP")); memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q, sizeof(mcp->mc_fpregs)); curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr; curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr; curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK; } critical_exit(); #endif } void cpu_idle(int busy) { spinlock_enter(); if (!busy) cpu_idleclock(); if (!sched_runnable()) __asm __volatile( "dsb sy \n" "wfi \n"); if (!busy) cpu_activeclock(); spinlock_exit(); } void cpu_halt(void) { /* We should have shutdown by now, if not enter a low power sleep */ intr_disable(); while (1) { __asm __volatile("wfi"); } } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* ARM64TODO TBD */ } /* Get current clock frequency for the given CPU ID. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { struct pcpu *pc; pc = pcpu_find(cpu_id); if (pc == NULL || rate == NULL) return (EINVAL); if (pc->pc_clock == 0) return (EOPNOTSUPP); *rate = pc->pc_clock; return (0); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { pcpu->pc_acpi_id = 0xffffffff; } void spinlock_enter(void) { struct thread *td; register_t daif; td = curthread; if (td->td_md.md_spinlock_count == 0) { daif = intr_disable(); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_daif = daif; } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t daif; td = curthread; critical_exit(); daif = td->td_md.md_saved_daif; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(daif); } #ifndef _SYS_SYSPROTO_H_ struct sigreturn_args { ucontext_t *ucp; }; #endif int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { ucontext_t uc; uint32_t spsr; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); spsr = uc.uc_mcontext.mc_gpregs.gp_spsr; if ((spsr & PSR_M_MASK) != PSR_M_EL0t || (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0) return (EINVAL); set_mcontext(td, &uc.uc_mcontext); set_fpcontext(td, &uc.uc_mcontext); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { int i; for (i = 0; i < PCB_LR; i++) pcb->pcb_x[i] = tf->tf_x[i]; pcb->pcb_x[PCB_LR] = tf->tf_lr; pcb->pcb_pc = tf->tf_elr; pcb->pcb_sp = tf->tf_sp; } void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct thread *td; struct proc *p; struct trapframe *tf; struct sigframe *fp, frame; struct sigacts *psp; struct sysentvec *sysent; int code, onstack, sig; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; code = ksi->ksi_code; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else { fp = (struct sigframe *)td->td_frame->tf_sp; } /* Make room, keeping the stack aligned */ fp--; fp = (struct sigframe *)STACKALIGN(fp); /* Fill in the frame to copy out */ get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); get_fpcontext(td, &frame.sf_uc.uc_mcontext); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack = td->td_sigstk; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } tf->tf_x[0]= sig; tf->tf_x[1] = (register_t)&fp->sf_si; tf->tf_x[2] = (register_t)&fp->sf_uc; tf->tf_elr = (register_t)catcher; tf->tf_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_lr = (register_t)sysent->sv_sigcode_base; else tf->tf_lr = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr, tf->tf_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } static void init_proc0(vm_offset_t kstack) { struct pcpu *pcpup = &__pcpu[0]; proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1; thread0.td_pcb->pcb_fpflags = 0; thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate; thread0.td_pcb->pcb_vfpcpu = UINT_MAX; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; } typedef struct { uint32_t type; uint64_t phys_start; uint64_t virt_start; uint64_t num_pages; uint64_t attr; } EFI_MEMORY_DESCRIPTOR; static int add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap, u_int *physmap_idxp) { u_int i, insert_idx, _physmap_idx; _physmap_idx = *physmap_idxp; if (length == 0) return (1); /* * Find insertion point while checking for overlap. Start off by * assuming the new entry will be added to the end. */ insert_idx = _physmap_idx; for (i = 0; i <= _physmap_idx; i += 2) { if (base < physmap[i + 1]) { if (base + length <= physmap[i]) { insert_idx = i; break; } if (boothowto & RB_VERBOSE) printf( "Overlapping memory regions, ignoring second region\n"); return (1); } } /* See if we can prepend to the next entry. */ if (insert_idx <= _physmap_idx && base + length == physmap[insert_idx]) { physmap[insert_idx] = base; return (1); } /* See if we can append to the previous entry. */ if (insert_idx > 0 && base == physmap[insert_idx - 1]) { physmap[insert_idx - 1] += length; return (1); } _physmap_idx += 2; *physmap_idxp = _physmap_idx; if (_physmap_idx == PHYSMAP_SIZE) { printf( "Too many segments in the physical address map, giving up\n"); return (0); } /* * Move the last 'N' entries down to make room for the new * entry if needed. */ for (i = _physmap_idx; i > insert_idx; i -= 2) { physmap[i] = physmap[i - 2]; physmap[i + 1] = physmap[i - 1]; } /* Insert the new entry. */ physmap[insert_idx] = base; physmap[insert_idx + 1] = base + length; return (1); } #ifdef FDT static void add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap, u_int *physmap_idxp) { for (int i = 0; i < mrcnt; i++) { if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap, physmap_idxp)) break; } } #endif static void add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap, u_int *physmap_idxp) { struct efi_md *map, *p; const char *type; size_t efisz; int ndesc, i; static const char *types[] = { "Reserved", "LoaderCode", "LoaderData", "BootServicesCode", "BootServicesData", "RuntimeServicesCode", "RuntimeServicesData", "ConventionalMemory", "UnusableMemory", "ACPIReclaimMemory", "ACPIMemoryNVS", "MemoryMappedIO", "MemoryMappedIOPortSpace", "PalCode", "PersistentMemory" }; /* * Memory map data provided by UEFI via the GetMemoryMap * Boot Services API. */ efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf; map = (struct efi_md *)((uint8_t *)efihdr + efisz); if (efihdr->descriptor_size == 0) return; ndesc = efihdr->memory_size / efihdr->descriptor_size; if (boothowto & RB_VERBOSE) printf("%23s %12s %12s %8s %4s\n", "Type", "Physical", "Virtual", "#Pages", "Attr"); for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p, efihdr->descriptor_size)) { if (boothowto & RB_VERBOSE) { if (p->md_type < nitems(types)) type = types[p->md_type]; else type = ""; printf("%23s %012lx %12p %08lx ", type, p->md_phys, p->md_virt, p->md_pages); if (p->md_attr & EFI_MD_ATTR_UC) printf("UC "); if (p->md_attr & EFI_MD_ATTR_WC) printf("WC "); if (p->md_attr & EFI_MD_ATTR_WT) printf("WT "); if (p->md_attr & EFI_MD_ATTR_WB) printf("WB "); if (p->md_attr & EFI_MD_ATTR_UCE) printf("UCE "); if (p->md_attr & EFI_MD_ATTR_WP) printf("WP "); if (p->md_attr & EFI_MD_ATTR_RP) printf("RP "); if (p->md_attr & EFI_MD_ATTR_XP) printf("XP "); if (p->md_attr & EFI_MD_ATTR_NV) printf("NV "); if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE) printf("MORE_RELIABLE "); if (p->md_attr & EFI_MD_ATTR_RO) printf("RO "); if (p->md_attr & EFI_MD_ATTR_RT) printf("RUNTIME"); printf("\n"); } switch (p->md_type) { case EFI_MD_TYPE_CODE: case EFI_MD_TYPE_DATA: case EFI_MD_TYPE_BS_CODE: case EFI_MD_TYPE_BS_DATA: case EFI_MD_TYPE_FREE: /* * We're allowed to use any entry with these types. */ break; default: continue; } if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE), physmap, physmap_idxp)) break; } } #ifdef FDT static void try_load_dtb(caddr_t kmdp) { vm_offset_t dtbp; dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); if (dtbp == (vm_offset_t)NULL) { printf("ERROR loading DTB\n"); return; } if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); } #endif static bool bus_probe(void) { bool has_acpi, has_fdt; char *order, *env; has_acpi = has_fdt = false; #ifdef FDT has_fdt = (OF_peer(0) != 0); #endif #ifdef DEV_ACPI has_acpi = (acpi_find_table(ACPI_SIG_SPCR) != 0); #endif env = kern_getenv("kern.cfg.order"); if (env != NULL) { order = env; while (order != NULL) { if (has_acpi && strncmp(order, "acpi", 4) == 0 && (order[4] == ',' || order[4] == '\0')) { arm64_bus_method = ARM64_BUS_ACPI; break; } if (has_fdt && strncmp(order, "fdt", 3) == 0 && (order[3] == ',' || order[3] == '\0')) { arm64_bus_method = ARM64_BUS_FDT; break; } order = strchr(order, ','); } freeenv(env); /* If we set the bus method it is valid */ if (arm64_bus_method != ARM64_BUS_NONE) return (true); } /* If no order or an invalid order was set use the default */ if (arm64_bus_method == ARM64_BUS_NONE) { if (has_fdt) arm64_bus_method = ARM64_BUS_FDT; else if (has_acpi) arm64_bus_method = ARM64_BUS_ACPI; } /* * If no option was set the default is valid, otherwise we are * setting one to get cninit() working, then calling panic to tell * the user about the invalid bus setup. */ return (env == NULL); } static void cache_setup(void) { int dcache_line_shift, icache_line_shift, dczva_line_shift; uint32_t ctr_el0; uint32_t dczid_el0; ctr_el0 = READ_SPECIALREG(ctr_el0); /* Read the log2 words in each D cache line */ dcache_line_shift = CTR_DLINE_SIZE(ctr_el0); /* Get the D cache line size */ dcache_line_size = sizeof(int) << dcache_line_shift; /* And the same for the I cache */ icache_line_shift = CTR_ILINE_SIZE(ctr_el0); icache_line_size = sizeof(int) << icache_line_shift; idcache_line_size = MIN(dcache_line_size, icache_line_size); dczid_el0 = READ_SPECIALREG(dczid_el0); /* Check if dc zva is not prohibited */ if (dczid_el0 & DCZID_DZP) dczva_line_size = 0; else { /* Same as with above calculations */ dczva_line_shift = DCZID_BS_SIZE(dczid_el0); dczva_line_size = sizeof(int) << dczva_line_shift; /* Change pagezero function */ pagezero = pagezero_cache; } } void initarm(struct arm64_bootparams *abp) { struct efi_map_header *efihdr; struct pcpu *pcpup; #ifdef FDT struct mem_region mem_regions[FDT_MEM_REGIONS]; int mem_regions_sz; #endif vm_offset_t lastaddr; caddr_t kmdp; vm_paddr_t mem_len; bool valid; int i; /* Set the module data location */ preload_metadata = (caddr_t)(uintptr_t)(abp->modulep); /* Find the kernel address */ kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0); #ifdef FDT try_load_dtb(kmdp); #endif /* Find the address to start allocating from */ lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); /* Load the physical memory ranges */ physmap_idx = 0; efihdr = (struct efi_map_header *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP); if (efihdr != NULL) add_efi_map_entries(efihdr, physmap, &physmap_idx); #ifdef FDT else { /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, NULL) != 0) panic("Cannot get physical memory regions"); add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap, &physmap_idx); } #endif /* Print the memory map */ mem_len = 0; for (i = 0; i < physmap_idx; i += 2) { dump_avail[i] = physmap[i]; dump_avail[i + 1] = physmap[i + 1]; mem_len += physmap[i + 1] - physmap[i]; } dump_avail[i] = 0; dump_avail[i + 1] = 0; /* Set the pcpu data, this is needed by pmap_bootstrap */ pcpup = &__pcpu[0]; pcpu_init(pcpup, 0, sizeof(struct pcpu)); /* * Set the pcpu pointer with a backup in tpidr_el1 to be * loaded when entering the kernel from userland. */ __asm __volatile( "mov x18, %0 \n" "msr tpidr_el1, %0" :: "r"(pcpup)); PCPU_SET(curthread, &thread0); /* Do basic tuning, hz etc */ init_param1(); cache_setup(); + pan_setup(); /* Bootstrap enough of pmap to enter the kernel proper */ pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt, KERNBASE - abp->kern_delta, lastaddr - KERNBASE); devmap_bootstrap(0, NULL); valid = bus_probe(); cninit(); if (!valid) panic("Invalid bus configuration: %s", kern_getenv("kern.cfg.order")); init_proc0(abp->kern_stack); msgbufinit(msgbufp, msgbufsize); mutex_init(); init_param2(physmem); dbg_monitor_init(); kdb_init(); + pan_enable(); early_boot = 0; } #ifdef DDB #include DB_SHOW_COMMAND(specialregs, db_show_spregs) { #define PRINT_REG(reg) \ db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg)) PRINT_REG(actlr_el1); PRINT_REG(afsr0_el1); PRINT_REG(afsr1_el1); PRINT_REG(aidr_el1); PRINT_REG(amair_el1); PRINT_REG(ccsidr_el1); PRINT_REG(clidr_el1); PRINT_REG(contextidr_el1); PRINT_REG(cpacr_el1); PRINT_REG(csselr_el1); PRINT_REG(ctr_el0); PRINT_REG(currentel); PRINT_REG(daif); PRINT_REG(dczid_el0); PRINT_REG(elr_el1); PRINT_REG(esr_el1); PRINT_REG(far_el1); #if 0 /* ARM64TODO: Enable VFP before reading floating-point registers */ PRINT_REG(fpcr); PRINT_REG(fpsr); #endif PRINT_REG(id_aa64afr0_el1); PRINT_REG(id_aa64afr1_el1); PRINT_REG(id_aa64dfr0_el1); PRINT_REG(id_aa64dfr1_el1); PRINT_REG(id_aa64isar0_el1); PRINT_REG(id_aa64isar1_el1); PRINT_REG(id_aa64pfr0_el1); PRINT_REG(id_aa64pfr1_el1); PRINT_REG(id_afr0_el1); PRINT_REG(id_dfr0_el1); PRINT_REG(id_isar0_el1); PRINT_REG(id_isar1_el1); PRINT_REG(id_isar2_el1); PRINT_REG(id_isar3_el1); PRINT_REG(id_isar4_el1); PRINT_REG(id_isar5_el1); PRINT_REG(id_mmfr0_el1); PRINT_REG(id_mmfr1_el1); PRINT_REG(id_mmfr2_el1); PRINT_REG(id_mmfr3_el1); #if 0 /* Missing from llvm */ PRINT_REG(id_mmfr4_el1); #endif PRINT_REG(id_pfr0_el1); PRINT_REG(id_pfr1_el1); PRINT_REG(isr_el1); PRINT_REG(mair_el1); PRINT_REG(midr_el1); PRINT_REG(mpidr_el1); PRINT_REG(mvfr0_el1); PRINT_REG(mvfr1_el1); PRINT_REG(mvfr2_el1); PRINT_REG(revidr_el1); PRINT_REG(sctlr_el1); PRINT_REG(sp_el0); PRINT_REG(spsel); PRINT_REG(spsr_el1); PRINT_REG(tcr_el1); PRINT_REG(tpidr_el0); PRINT_REG(tpidr_el1); PRINT_REG(tpidrro_el0); PRINT_REG(ttbr0_el1); PRINT_REG(ttbr1_el1); PRINT_REG(vbar_el1); #undef PRINT_REG } DB_SHOW_COMMAND(vtop, db_show_vtop) { uint64_t phys; if (have_addr) { phys = arm64_address_translate_s1e1r(addr); db_printf("Physical address reg (read): 0x%016lx\n", phys); phys = arm64_address_translate_s1e1w(addr); db_printf("Physical address reg (write): 0x%016lx\n", phys); } else db_printf("show vtop \n"); } #endif Index: head/sys/arm64/arm64/mp_machdep.c =================================================================== --- head/sys/arm64/arm64/mp_machdep.c (revision 316755) +++ head/sys/arm64/arm64/mp_machdep.c (revision 316756) @@ -1,837 +1,838 @@ /*- * Copyright (c) 2015-2016 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_acpi.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef DEV_ACPI #include #include #endif #ifdef FDT #include #include #endif #include #include "pic_if.h" typedef void intr_ipi_send_t(void *, cpuset_t, u_int); typedef void intr_ipi_handler_t(void *); #define INTR_IPI_NAMELEN (MAXCOMLEN + 1) struct intr_ipi { intr_ipi_handler_t * ii_handler; void * ii_handler_arg; intr_ipi_send_t * ii_send; void * ii_send_arg; char ii_name[INTR_IPI_NAMELEN]; u_long * ii_count; }; static struct intr_ipi ipi_sources[INTR_IPI_COUNT]; static struct intr_ipi *intr_ipi_lookup(u_int); static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *, void *); extern struct pcpu __pcpu[]; static device_identify_t arm64_cpu_identify; static device_probe_t arm64_cpu_probe; static device_attach_t arm64_cpu_attach; static void ipi_ast(void *); static void ipi_hardclock(void *); static void ipi_preempt(void *); static void ipi_rendezvous(void *); static void ipi_stop(void *); struct mtx ap_boot_mtx; struct pcb stoppcbs[MAXCPU]; static device_t cpu_list[MAXCPU]; /* * Not all systems boot from the first CPU in the device tree. To work around * this we need to find which CPU we have booted from so when we later * enable the secondary CPUs we skip this one. */ static int cpu0 = -1; void mpentry(unsigned long cpuid); void init_secondary(uint64_t); uint8_t secondary_stacks[MAXCPU - 1][PAGE_SIZE * KSTACK_PAGES] __aligned(16); /* Set to 1 once we're ready to let the APs out of the pen. */ volatile int aps_ready = 0; /* Temporary variables for init_secondary() */ void *dpcpu[MAXCPU - 1]; static device_method_t arm64_cpu_methods[] = { /* Device interface */ DEVMETHOD(device_identify, arm64_cpu_identify), DEVMETHOD(device_probe, arm64_cpu_probe), DEVMETHOD(device_attach, arm64_cpu_attach), DEVMETHOD_END }; static devclass_t arm64_cpu_devclass; static driver_t arm64_cpu_driver = { "arm64_cpu", arm64_cpu_methods, 0 }; DRIVER_MODULE(arm64_cpu, cpu, arm64_cpu_driver, arm64_cpu_devclass, 0, 0); static void arm64_cpu_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "arm64_cpu", -1) != NULL) return; if (BUS_ADD_CHILD(parent, 0, "arm64_cpu", -1) == NULL) device_printf(parent, "add child failed\n"); } static int arm64_cpu_probe(device_t dev) { u_int cpuid; cpuid = device_get_unit(dev); if (cpuid >= MAXCPU || cpuid > mp_maxid) return (EINVAL); device_quiet(dev); return (0); } static int arm64_cpu_attach(device_t dev) { const uint32_t *reg; size_t reg_size; u_int cpuid; int i; cpuid = device_get_unit(dev); if (cpuid >= MAXCPU || cpuid > mp_maxid) return (EINVAL); KASSERT(cpu_list[cpuid] == NULL, ("Already have cpu %u", cpuid)); reg = cpu_get_cpuid(dev, ®_size); if (reg == NULL) return (EINVAL); if (bootverbose) { device_printf(dev, "register <"); for (i = 0; i < reg_size; i++) printf("%s%x", (i == 0) ? "" : " ", reg[i]); printf(">\n"); } /* Set the device to start it later */ cpu_list[cpuid] = dev; return (0); } static void release_aps(void *dummy __unused) { int i; /* Only release CPUs if they exist */ if (mp_ncpus == 1) return; intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL); intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL); intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL); intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL); intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL); intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL); atomic_store_rel_int(&aps_ready, 1); /* Wake up the other CPUs */ __asm __volatile("sev"); printf("Release APs\n"); for (i = 0; i < 2000; i++) { if (smp_started) return; DELAY(1000); } printf("APs not started\n"); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); void init_secondary(uint64_t cpu) { struct pcpu *pcpup; pcpup = &__pcpu[cpu]; /* * Set the pcpu pointer with a backup in tpidr_el1 to be * loaded when entering the kernel from userland. */ __asm __volatile( "mov x18, %0 \n" "msr tpidr_el1, %0" :: "r"(pcpup)); /* Spin until the BSP releases the APs */ while (!aps_ready) __asm __volatile("wfe"); /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pcpup->pc_curthread = pcpup->pc_idlethread; pcpup->pc_curpcb = pcpup->pc_idlethread->td_pcb; /* * Identify current CPU. This is necessary to setup * affinity registers and to provide support for * runtime chip identification. */ identify_cpu(); intr_pic_init_secondary(); /* Start per-CPU event timers. */ cpu_initclocks_ap(); #ifdef VFP vfp_init(); #endif dbg_monitor_init(); + pan_enable(); /* Enable interrupts */ intr_enable(); mtx_lock_spin(&ap_boot_mtx); atomic_add_rel_32(&smp_cpus, 1); if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); /* Enter the scheduler */ sched_throw(NULL); panic("scheduler returned us to init_secondary"); /* NOTREACHED */ } /* * Send IPI thru interrupt controller. */ static void pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi) { KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi); } /* * Setup IPI handler on interrupt controller. * * Not SMP coherent. */ static void intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand, void *arg) { struct intr_irqsrc *isrc; struct intr_ipi *ii; int error; KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi)); error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc); if (error != 0) return; isrc->isrc_handlers++; ii = intr_ipi_lookup(ipi); KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi)); ii->ii_handler = hand; ii->ii_handler_arg = arg; ii->ii_send = pic_ipi_send; ii->ii_send_arg = isrc; strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN); ii->ii_count = intr_ipi_setup_counters(name); } static void intr_ipi_send(cpuset_t cpus, u_int ipi) { struct intr_ipi *ii; ii = intr_ipi_lookup(ipi); if (ii->ii_count == NULL) panic("%s: not setup IPI %u", __func__, ipi); ii->ii_send(ii->ii_send_arg, cpus, ipi); } static void ipi_ast(void *dummy __unused) { CTR0(KTR_SMP, "IPI_AST"); } static void ipi_hardclock(void *dummy __unused) { CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); } static void ipi_preempt(void *dummy __unused) { CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); } static void ipi_rendezvous(void *dummy __unused) { CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); } static void ipi_stop(void *dummy __unused) { u_int cpu; CTR0(KTR_SMP, "IPI_STOP"); cpu = PCPU_GET(cpuid); savectx(&stoppcbs[cpu]); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); CTR0(KTR_SMP, "IPI_STOP (restart)"); } struct cpu_group * cpu_topo(void) { return (smp_topo_none()); } /* Determine if we running MP machine */ int cpu_mp_probe(void) { /* ARM64TODO: Read the u bit of mpidr_el1 to determine this */ return (1); } static bool start_cpu(u_int id, uint64_t target_cpu) { struct pcpu *pcpup; vm_paddr_t pa; u_int cpuid; int err; /* Check we are able to start this cpu */ if (id > mp_maxid) return (false); KASSERT(id < MAXCPU, ("Too many CPUs")); /* We are already running on cpu 0 */ if (id == cpu0) return (true); /* * Rotate the CPU IDs to put the boot CPU as CPU 0. We keep the other * CPUs ordered as the are likely grouped into clusters so it can be * useful to keep that property, e.g. for the GICv3 driver to send * an IPI to all CPUs in the cluster. */ cpuid = id; if (cpuid < cpu0) cpuid += mp_maxid + 1; cpuid -= cpu0; pcpup = &__pcpu[cpuid]; pcpu_init(pcpup, cpuid, sizeof(struct pcpu)); dpcpu[cpuid - 1] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO); dpcpu_init(dpcpu[cpuid - 1], cpuid); printf("Starting CPU %u (%lx)\n", cpuid, target_cpu); pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry); err = psci_cpu_on(target_cpu, pa, cpuid); if (err != PSCI_RETVAL_SUCCESS) { /* * Panic here if INVARIANTS are enabled and PSCI failed to * start the requested CPU. If psci_cpu_on returns PSCI_MISSING * to indicate we are unable to use it to start the given CPU. */ KASSERT(err == PSCI_MISSING, ("Failed to start CPU %u (%lx)\n", id, target_cpu)); pcpu_destroy(pcpup); kmem_free(kernel_arena, (vm_offset_t)dpcpu[cpuid - 1], DPCPU_SIZE); dpcpu[cpuid - 1] = NULL; /* Notify the user that the CPU failed to start */ printf("Failed to start CPU %u (%lx)\n", id, target_cpu); } else CPU_SET(cpuid, &all_cpus); return (true); } #ifdef DEV_ACPI static void madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg) { ACPI_MADT_GENERIC_INTERRUPT *intr; u_int *cpuid; switch(entry->Type) { case ACPI_MADT_TYPE_GENERIC_INTERRUPT: intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry; cpuid = arg; start_cpu((*cpuid), intr->ArmMpidr); (*cpuid)++; break; default: break; } } static void cpu_init_acpi(void) { ACPI_TABLE_MADT *madt; vm_paddr_t physaddr; u_int cpuid; physaddr = acpi_find_table(ACPI_SIG_MADT); if (physaddr == 0) return; madt = acpi_map_table(physaddr, ACPI_SIG_MADT); if (madt == NULL) { printf("Unable to map the MADT, not starting APs\n"); return; } cpuid = 0; acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length, madt_handler, &cpuid); acpi_unmap_table(madt); } #endif #ifdef FDT static boolean_t cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg) { uint64_t target_cpu; target_cpu = reg[0]; if (addr_size == 2) { target_cpu <<= 32; target_cpu |= reg[1]; } return (start_cpu(id, target_cpu) ? TRUE : FALSE); } #endif /* Initialize and fire up non-boot processors */ void cpu_mp_start(void) { mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); CPU_SET(0, &all_cpus); switch(arm64_bus_method) { #ifdef DEV_ACPI case ARM64_BUS_ACPI: KASSERT(cpu0 >= 0, ("Current CPU was not found")); cpu_init_acpi(); break; #endif #ifdef FDT case ARM64_BUS_FDT: KASSERT(cpu0 >= 0, ("Current CPU was not found")); ofw_cpu_early_foreach(cpu_init_fdt, true); break; #endif default: break; } } /* Introduce rest of cores to the world */ void cpu_mp_announce(void) { } #ifdef DEV_ACPI static void cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg) { ACPI_MADT_GENERIC_INTERRUPT *intr; u_int *cores = arg; uint64_t mpidr_reg; switch(entry->Type) { case ACPI_MADT_TYPE_GENERIC_INTERRUPT: intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry; if (cpu0 < 0) { mpidr_reg = READ_SPECIALREG(mpidr_el1); if ((mpidr_reg & 0xff00fffffful) == intr->ArmMpidr) cpu0 = *cores; } (*cores)++; break; default: break; } } static u_int cpu_count_acpi(void) { ACPI_TABLE_MADT *madt; vm_paddr_t physaddr; u_int cores; physaddr = acpi_find_table(ACPI_SIG_MADT); if (physaddr == 0) return (0); madt = acpi_map_table(physaddr, ACPI_SIG_MADT); if (madt == NULL) { printf("Unable to map the MADT, not starting APs\n"); return (0); } cores = 0; acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length, cpu_count_acpi_handler, &cores); acpi_unmap_table(madt); return (cores); } #endif #ifdef FDT static boolean_t cpu_find_cpu0_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg) { uint64_t mpidr_fdt, mpidr_reg; if (cpu0 < 0) { mpidr_fdt = reg[0]; if (addr_size == 2) { mpidr_fdt <<= 32; mpidr_fdt |= reg[1]; } mpidr_reg = READ_SPECIALREG(mpidr_el1); if ((mpidr_reg & 0xff00fffffful) == mpidr_fdt) cpu0 = id; } return (TRUE); } #endif void cpu_mp_setmaxid(void) { #if defined(DEV_ACPI) || defined(FDT) int cores; #endif switch(arm64_bus_method) { #ifdef DEV_ACPI case ARM64_BUS_ACPI: cores = cpu_count_acpi(); if (cores > 0) { cores = MIN(cores, MAXCPU); if (bootverbose) printf("Found %d CPUs in the ACPI tables\n", cores); mp_ncpus = cores; mp_maxid = cores - 1; return; } break; #endif #ifdef FDT case ARM64_BUS_FDT: cores = ofw_cpu_early_foreach(cpu_find_cpu0_fdt, false); if (cores > 0) { cores = MIN(cores, MAXCPU); if (bootverbose) printf("Found %d CPUs in the device tree\n", cores); mp_ncpus = cores; mp_maxid = cores - 1; return; } break; #endif default: break; } if (bootverbose) printf("No CPU data, limiting to 1 core\n"); mp_ncpus = 1; mp_maxid = 0; } /* * Lookup IPI source. */ static struct intr_ipi * intr_ipi_lookup(u_int ipi) { if (ipi >= INTR_IPI_COUNT) panic("%s: no such IPI %u", __func__, ipi); return (&ipi_sources[ipi]); } /* * interrupt controller dispatch function for IPIs. It should * be called straight from the interrupt controller, when associated * interrupt source is learned. Or from anybody who has an interrupt * source mapped. */ void intr_ipi_dispatch(u_int ipi, struct trapframe *tf) { void *arg; struct intr_ipi *ii; ii = intr_ipi_lookup(ipi); if (ii->ii_count == NULL) panic("%s: not setup IPI %u", __func__, ipi); intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid)); /* * Supply ipi filter with trapframe argument * if none is registered. */ arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf; ii->ii_handler(arg); } #ifdef notyet /* * Map IPI into interrupt controller. * * Not SMP coherent. */ static int ipi_map(struct intr_irqsrc *isrc, u_int ipi) { boolean_t is_percpu; int error; if (ipi >= INTR_IPI_COUNT) panic("%s: no such IPI %u", __func__, ipi); KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); isrc->isrc_type = INTR_ISRCT_NAMESPACE; isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI; isrc->isrc_nspc_num = ipi_next_num; error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu); if (error == 0) { isrc->isrc_dev = intr_irq_root_dev; ipi_next_num++; } return (error); } /* * Setup IPI handler to interrupt source. * * Note that there could be more ways how to send and receive IPIs * on a platform like fast interrupts for example. In that case, * one can call this function with ASIF_NOALLOC flag set and then * call intr_ipi_dispatch() when appropriate. * * Not SMP coherent. */ int intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter, void *arg, u_int flags) { struct intr_irqsrc *isrc; int error; if (filter == NULL) return(EINVAL); isrc = intr_ipi_lookup(ipi); if (isrc->isrc_ipifilter != NULL) return (EEXIST); if ((flags & AISHF_NOALLOC) == 0) { error = ipi_map(isrc, ipi); if (error != 0) return (error); } isrc->isrc_ipifilter = filter; isrc->isrc_arg = arg; isrc->isrc_handlers = 1; isrc->isrc_count = intr_ipi_setup_counters(name); isrc->isrc_index = 0; /* it should not be used in IPI case */ if (isrc->isrc_dev != NULL) { PIC_ENABLE_INTR(isrc->isrc_dev, isrc); PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc); } return (0); } #endif /* Sending IPI */ void ipi_all_but_self(u_int ipi) { cpuset_t cpus; cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &cpus); CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); intr_ipi_send(cpus, ipi); } void ipi_cpu(int cpu, u_int ipi) { cpuset_t cpus; CPU_ZERO(&cpus); CPU_SET(cpu, &cpus); CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi); intr_ipi_send(cpus, ipi); } void ipi_selected(cpuset_t cpus, u_int ipi) { CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); intr_ipi_send(cpus, ipi); } Index: head/sys/arm64/arm64/support.S =================================================================== --- head/sys/arm64/arm64/support.S (revision 316755) +++ head/sys/arm64/arm64/support.S (revision 316756) @@ -1,329 +1,335 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014-2015 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Andrew Turner * under sponsorship from the FreeBSD Foundation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "assym.s" /* * One of the fu* or su* functions failed, return -1. */ ENTRY(fsu_fault) SET_FAULT_HANDLER(xzr, x1) /* Reset the handler function */ + EXIT_USER_ACCESS_CHECK(w0, x1) fsu_fault_nopcb: mov x0, #-1 ret END(fsu_fault) /* * int casueword32(volatile uint32_t *, uint32_t, uint32_t *, uint32_t) */ ENTRY(casueword32) ldr x4, =(VM_MAXUSER_ADDRESS-3) cmp x0, x4 b.cs fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x4) /* And set it */ + ENTER_USER_ACCESS(w6, x4) 1: ldxr w4, [x0] /* Load-exclusive the data */ cmp w4, w1 /* Compare */ b.ne 2f /* Not equal, exit */ stxr w5, w3, [x0] /* Store the new data */ cbnz w5, 1b /* Retry on failure */ + EXIT_USER_ACCESS(w6) 2: SET_FAULT_HANDLER(xzr, x5) /* Reset the fault handler */ str w4, [x2] /* Store the read data */ mov x0, #0 /* Success */ ret /* Return */ END(casueword32) /* * int casueword(volatile u_long *, u_long, u_long *, u_long) */ ENTRY(casueword) ldr x4, =(VM_MAXUSER_ADDRESS-7) cmp x0, x4 b.cs fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x4) /* And set it */ + ENTER_USER_ACCESS(w6, x4) 1: ldxr x4, [x0] /* Load-exclusive the data */ cmp x4, x1 /* Compare */ b.ne 2f /* Not equal, exit */ stxr w5, x3, [x0] /* Store the new data */ cbnz w5, 1b /* Retry on failure */ + EXIT_USER_ACCESS(w6) 2: SET_FAULT_HANDLER(xzr, x5) /* Reset the fault handler */ str x4, [x2] /* Store the read data */ mov x0, #0 /* Success */ ret /* Return */ END(casueword) /* * int fubyte(volatile const void *) */ ENTRY(fubyte) ldr x1, =VM_MAXUSER_ADDRESS cmp x0, x1 b.cs fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x1) /* And set it */ ldtrb w0, [x0] /* Try loading the data */ SET_FAULT_HANDLER(xzr, x1) /* Reset the fault handler */ ret /* Return */ END(fubyte) /* * int fuword(volatile const void *) */ ENTRY(fuword16) ldr x1, =(VM_MAXUSER_ADDRESS-1) cmp x0, x1 b.cs fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x1) /* And set it */ ldtrh w0, [x0] /* Try loading the data */ SET_FAULT_HANDLER(xzr, x1) /* Reset the fault handler */ ret /* Return */ END(fuword16) /* * int32_t fueword32(volatile const void *, int32_t *) */ ENTRY(fueword32) ldr x2, =(VM_MAXUSER_ADDRESS-3) cmp x0, x2 b.cs fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x2) /* And set it */ ldtr w0, [x0] /* Try loading the data */ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */ str w0, [x1] /* Save the data in kernel space */ mov w0, #0 /* Success */ ret /* Return */ END(fueword32) /* * long fueword(volatile const void *, int64_t *) * int64_t fueword64(volatile const void *, int64_t *) */ ENTRY(fueword) EENTRY(fueword64) ldr x2, =(VM_MAXUSER_ADDRESS-7) cmp x0, x2 b.cs fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x2) /* And set it */ ldtr x0, [x0] /* Try loading the data */ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */ str x0, [x1] /* Save the data in kernel space */ mov x0, #0 /* Success */ ret /* Return */ EEND(fueword64) END(fueword) /* * int subyte(volatile void *, int) */ ENTRY(subyte) ldr x2, =VM_MAXUSER_ADDRESS cmp x0, x2 b.cs fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x2) /* And set it */ sttrb w1, [x0] /* Try storing the data */ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */ mov x0, #0 /* Success */ ret /* Return */ END(subyte) /* * int suword16(volatile void *, int) */ ENTRY(suword16) ldr x2, =(VM_MAXUSER_ADDRESS-1) cmp x0, x2 b.cs fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x2) /* And set it */ sttrh w1, [x0] /* Try storing the data */ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */ mov x0, #0 /* Success */ ret /* Return */ END(suword16) /* * int suword32(volatile void *, int) */ ENTRY(suword32) ldr x2, =(VM_MAXUSER_ADDRESS-3) cmp x0, x2 b.cs fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x2) /* And set it */ sttr w1, [x0] /* Try storing the data */ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */ mov x0, #0 /* Success */ ret /* Return */ END(suword32) /* * int suword(volatile void *, long) */ ENTRY(suword) EENTRY(suword64) ldr x2, =(VM_MAXUSER_ADDRESS-7) cmp x0, x2 b.cs fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x2) /* And set it */ sttr x1, [x0] /* Try storing the data */ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */ mov x0, #0 /* Success */ ret /* Return */ EEND(suword64) END(suword) /* * fuswintr and suswintr are just like fusword and susword except that if * the page is not in memory or would cause a trap, then we return an error. * The important thing is to prevent sleep() and switch(). */ /* * Special handler so the trap code knows not to sleep. */ ENTRY(fsu_intr_fault) SET_FAULT_HANDLER(xzr, x1) /* Reset the handler function */ + EXIT_USER_ACCESS_CHECK(w0, x1) mov x0, #-1 ret END(fsu_fault) /* * int fuswintr(void *) */ ENTRY(fuswintr) ldr x1, =(VM_MAXUSER_ADDRESS-3) cmp x0, x1 b.cs fsu_fault_nopcb adr x6, fsu_intr_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x1) /* And set it */ ldtr w0, [x0] /* Try loading the data */ SET_FAULT_HANDLER(xzr, x1) /* Reset the fault handler */ ret /* Return */ END(fuswintr) /* * int suswintr(void *base, int word) */ ENTRY(suswintr) ldr x2, =(VM_MAXUSER_ADDRESS-3) cmp x0, x2 b.cs fsu_fault_nopcb adr x6, fsu_intr_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x2) /* And set it */ sttr w1, [x0] /* Try storing the data */ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */ mov x0, #0 /* Success */ ret /* Return */ END(suswintr) ENTRY(setjmp) /* Store the stack pointer */ mov x8, sp str x8, [x0], #8 /* Store the general purpose registers and lr */ stp x19, x20, [x0], #16 stp x21, x22, [x0], #16 stp x23, x24, [x0], #16 stp x25, x26, [x0], #16 stp x27, x28, [x0], #16 stp x29, lr, [x0], #16 /* Return value */ mov x0, #0 ret END(setjmp) ENTRY(longjmp) /* Restore the stack pointer */ ldr x8, [x0], #8 mov sp, x8 /* Restore the general purpose registers and lr */ ldp x19, x20, [x0], #16 ldp x21, x22, [x0], #16 ldp x23, x24, [x0], #16 ldp x25, x26, [x0], #16 ldp x27, x28, [x0], #16 ldp x29, lr, [x0], #16 /* Load the return value */ mov x0, x1 ret END(longjmp) /* * pagezero, simple implementation */ ENTRY(pagezero_simple) add x1, x0, #PAGE_SIZE 1: stp xzr, xzr, [x0], #0x10 stp xzr, xzr, [x0], #0x10 stp xzr, xzr, [x0], #0x10 stp xzr, xzr, [x0], #0x10 cmp x0, x1 b.ne 1b ret END(pagezero_simple) /* * pagezero, cache assisted */ ENTRY(pagezero_cache) add x1, x0, #PAGE_SIZE ldr x2, =dczva_line_size ldr x2, [x2] 1: dc zva, x0 add x0, x0, x2 cmp x0, x1 b.ne 1b ret END(pagezero_cache) Index: head/sys/arm64/include/armreg.h =================================================================== --- head/sys/arm64/include/armreg.h (revision 316755) +++ head/sys/arm64/include/armreg.h (revision 316756) @@ -1,512 +1,513 @@ /*- * Copyright (c) 2013, 2014 Andrew Turner * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_ARMREG_H_ #define _MACHINE_ARMREG_H_ #define INSN_SIZE 4 #define READ_SPECIALREG(reg) \ ({ uint64_t val; \ __asm __volatile("mrs %0, " __STRING(reg) : "=&r" (val)); \ val; \ }) #define WRITE_SPECIALREG(reg, val) \ __asm __volatile("msr " __STRING(reg) ", %0" : : "r"((uint64_t)val)) /* CNTHCTL_EL2 - Counter-timer Hypervisor Control register */ #define CNTHCTL_EVNTI_MASK (0xf << 4) /* Bit to trigger event stream */ #define CNTHCTL_EVNTDIR (1 << 3) /* Control transition trigger bit */ #define CNTHCTL_EVNTEN (1 << 2) /* Enable event stream */ #define CNTHCTL_EL1PCEN (1 << 1) /* Allow EL0/1 physical timer access */ #define CNTHCTL_EL1PCTEN (1 << 0) /*Allow EL0/1 physical counter access*/ /* CPACR_EL1 */ #define CPACR_FPEN_MASK (0x3 << 20) #define CPACR_FPEN_TRAP_ALL1 (0x0 << 20) /* Traps from EL0 and EL1 */ #define CPACR_FPEN_TRAP_EL0 (0x1 << 20) /* Traps from EL0 */ #define CPACR_FPEN_TRAP_ALL2 (0x2 << 20) /* Traps from EL0 and EL1 */ #define CPACR_FPEN_TRAP_NONE (0x3 << 20) /* No traps */ #define CPACR_TTA (0x1 << 28) /* CTR_EL0 - Cache Type Register */ #define CTR_DLINE_SHIFT 16 #define CTR_DLINE_MASK (0xf << CTR_DLINE_SHIFT) #define CTR_DLINE_SIZE(reg) (((reg) & CTR_DLINE_MASK) >> CTR_DLINE_SHIFT) #define CTR_ILINE_SHIFT 0 #define CTR_ILINE_MASK (0xf << CTR_ILINE_SHIFT) #define CTR_ILINE_SIZE(reg) (((reg) & CTR_ILINE_MASK) >> CTR_ILINE_SHIFT) /* DCZID_EL0 - Data Cache Zero ID register */ #define DCZID_DZP (1 << 4) /* DC ZVA prohibited if non-0 */ #define DCZID_BS_SHIFT 0 #define DCZID_BS_MASK (0xf << DCZID_BS_SHIFT) #define DCZID_BS_SIZE(reg) (((reg) & DCZID_BS_MASK) >> DCZID_BS_SHIFT) /* ESR_ELx */ #define ESR_ELx_ISS_MASK 0x00ffffff #define ISS_INSN_FnV (0x01 << 10) #define ISS_INSN_EA (0x01 << 9) #define ISS_INSN_S1PTW (0x01 << 7) #define ISS_INSN_IFSC_MASK (0x1f << 0) #define ISS_DATA_ISV (0x01 << 24) #define ISS_DATA_SAS_MASK (0x03 << 22) #define ISS_DATA_SSE (0x01 << 21) #define ISS_DATA_SRT_MASK (0x1f << 16) #define ISS_DATA_SF (0x01 << 15) #define ISS_DATA_AR (0x01 << 14) #define ISS_DATA_FnV (0x01 << 10) #define ISS_DATa_EA (0x01 << 9) #define ISS_DATa_CM (0x01 << 8) #define ISS_INSN_S1PTW (0x01 << 7) #define ISS_DATa_WnR (0x01 << 6) #define ISS_DATA_DFSC_MASK (0x3f << 0) #define ISS_DATA_DFSC_ASF_L0 (0x00 << 0) #define ISS_DATA_DFSC_ASF_L1 (0x01 << 0) #define ISS_DATA_DFSC_ASF_L2 (0x02 << 0) #define ISS_DATA_DFSC_ASF_L3 (0x03 << 0) #define ISS_DATA_DFSC_TF_L0 (0x04 << 0) #define ISS_DATA_DFSC_TF_L1 (0x05 << 0) #define ISS_DATA_DFSC_TF_L2 (0x06 << 0) #define ISS_DATA_DFSC_TF_L3 (0x07 << 0) #define ISS_DATA_DFSC_AFF_L1 (0x09 << 0) #define ISS_DATA_DFSC_AFF_L2 (0x0a << 0) #define ISS_DATA_DFSC_AFF_L3 (0x0b << 0) #define ISS_DATA_DFSC_PF_L1 (0x0d << 0) #define ISS_DATA_DFSC_PF_L2 (0x0e << 0) #define ISS_DATA_DFSC_PF_L3 (0x0f << 0) #define ISS_DATA_DFSC_EXT (0x10 << 0) #define ISS_DATA_DFSC_EXT_L0 (0x14 << 0) #define ISS_DATA_DFSC_EXT_L1 (0x15 << 0) #define ISS_DATA_DFSC_EXT_L2 (0x16 << 0) #define ISS_DATA_DFSC_EXT_L3 (0x17 << 0) #define ISS_DATA_DFSC_ECC (0x18 << 0) #define ISS_DATA_DFSC_ECC_L0 (0x1c << 0) #define ISS_DATA_DFSC_ECC_L1 (0x1d << 0) #define ISS_DATA_DFSC_ECC_L2 (0x1e << 0) #define ISS_DATA_DFSC_ECC_L3 (0x1f << 0) #define ISS_DATA_DFSC_ALIGN (0x21 << 0) #define ISS_DATA_DFSC_TLB_CONFLICT (0x30 << 0) #define ESR_ELx_IL (0x01 << 25) #define ESR_ELx_EC_SHIFT 26 #define ESR_ELx_EC_MASK (0x3f << 26) #define ESR_ELx_EXCEPTION(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT) #define EXCP_UNKNOWN 0x00 /* Unkwn exception */ #define EXCP_FP_SIMD 0x07 /* VFP/SIMD trap */ #define EXCP_ILL_STATE 0x0e /* Illegal execution state */ #define EXCP_SVC 0x15 /* SVC trap */ #define EXCP_MSR 0x18 /* MSR/MRS trap */ #define EXCP_INSN_ABORT_L 0x20 /* Instruction abort, from lower EL */ #define EXCP_INSN_ABORT 0x21 /* Instruction abort, from same EL */ #define EXCP_PC_ALIGN 0x22 /* PC alignment fault */ #define EXCP_DATA_ABORT_L 0x24 /* Data abort, from lower EL */ #define EXCP_DATA_ABORT 0x25 /* Data abort, from same EL */ #define EXCP_SP_ALIGN 0x26 /* SP slignment fault */ #define EXCP_TRAP_FP 0x2c /* Trapped FP exception */ #define EXCP_SERROR 0x2f /* SError interrupt */ #define EXCP_SOFTSTP_EL0 0x32 /* Software Step, from lower EL */ #define EXCP_SOFTSTP_EL1 0x33 /* Software Step, from same EL */ #define EXCP_WATCHPT_EL1 0x35 /* Watchpoint, from same EL */ #define EXCP_BRK 0x3c /* Breakpoint */ /* ICC_CTLR_EL1 */ #define ICC_CTLR_EL1_EOIMODE (1U << 1) /* ICC_IAR1_EL1 */ #define ICC_IAR1_EL1_SPUR (0x03ff) /* ICC_IGRPEN0_EL1 */ #define ICC_IGRPEN0_EL1_EN (1U << 0) /* ICC_PMR_EL1 */ #define ICC_PMR_EL1_PRIO_MASK (0xFFUL) /* ICC_SGI1R_EL1 */ #define ICC_SGI1R_EL1_TL_MASK 0xffffUL #define ICC_SGI1R_EL1_AFF1_SHIFT 16 #define ICC_SGI1R_EL1_SGIID_SHIFT 24 #define ICC_SGI1R_EL1_AFF2_SHIFT 32 #define ICC_SGI1R_EL1_AFF3_SHIFT 48 #define ICC_SGI1R_EL1_SGIID_MASK 0xfUL #define ICC_SGI1R_EL1_IRM (0x1UL << 40) /* ICC_SRE_EL1 */ #define ICC_SRE_EL1_SRE (1U << 0) /* ICC_SRE_EL2 */ #define ICC_SRE_EL2_SRE (1U << 0) #define ICC_SRE_EL2_EN (1U << 3) /* ID_AA64DFR0_EL1 */ #define ID_AA64DFR0_MASK 0xf0f0ffff #define ID_AA64DFR0_DEBUG_VER_SHIFT 0 #define ID_AA64DFR0_DEBUG_VER_MASK (0xf << ID_AA64DFR0_DEBUG_VER_SHIFT) #define ID_AA64DFR0_DEBUG_VER(x) ((x) & ID_AA64DFR0_DEBUG_VER_MASK) #define ID_AA64DFR0_DEBUG_VER_8 (0x6 << ID_AA64DFR0_DEBUG_VER_SHIFT) #define ID_AA64DFR0_DEBUG_VER_8_VHE (0x7 << ID_AA64DFR0_DEBUG_VER_SHIFT) #define ID_AA64DFR0_TRACE_VER_SHIFT 4 #define ID_AA64DFR0_TRACE_VER_MASK (0xf << ID_AA64DFR0_TRACE_VER_SHIFT) #define ID_AA64DFR0_TRACE_VER(x) ((x) & ID_AA64DFR0_TRACE_VER_MASK) #define ID_AA64DFR0_TRACE_VER_NONE (0x0 << ID_AA64DFR0_TRACE_VER_SHIFT) #define ID_AA64DFR0_TRACE_VER_IMPL (0x1 << ID_AA64DFR0_TRACE_VER_SHIFT) #define ID_AA64DFR0_PMU_VER_SHIFT 8 #define ID_AA64DFR0_PMU_VER_MASK (0xf << ID_AA64DFR0_PMU_VER_SHIFT) #define ID_AA64DFR0_PMU_VER(x) ((x) & ID_AA64DFR0_PMU_VER_MASK) #define ID_AA64DFR0_PMU_VER_NONE (0x0 << ID_AA64DFR0_PMU_VER_SHIFT) #define ID_AA64DFR0_PMU_VER_3 (0x1 << ID_AA64DFR0_PMU_VER_SHIFT) #define ID_AA64DFR0_PMU_VER_3_1 (0x4 << ID_AA64DFR0_PMU_VER_SHIFT) #define ID_AA64DFR0_PMU_VER_IMPL (0xf << ID_AA64DFR0_PMU_VER_SHIFT) #define ID_AA64DFR0_BRPS_SHIFT 12 #define ID_AA64DFR0_BRPS_MASK (0xf << ID_AA64DFR0_BRPS_SHIFT) #define ID_AA64DFR0_BRPS(x) \ ((((x) >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) + 1) #define ID_AA64DFR0_WRPS_SHIFT 20 #define ID_AA64DFR0_WRPS_MASK (0xf << ID_AA64DFR0_WRPS_SHIFT) #define ID_AA64DFR0_WRPS(x) \ ((((x) >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) + 1) #define ID_AA64DFR0_CTX_CMPS_SHIFT 28 #define ID_AA64DFR0_CTX_CMPS_MASK (0xf << ID_AA64DFR0_CTX_CMPS_SHIFT) #define ID_AA64DFR0_CTX_CMPS(x) \ ((((x) >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) + 1) /* ID_AA64ISAR0_EL1 */ #define ID_AA64ISAR0_MASK 0xf0fffff0 #define ID_AA64ISAR0_AES_SHIFT 4 #define ID_AA64ISAR0_AES_MASK (0xf << ID_AA64ISAR0_AES_SHIFT) #define ID_AA64ISAR0_AES(x) ((x) & ID_AA64ISAR0_AES_MASK) #define ID_AA64ISAR0_AES_NONE (0x0 << ID_AA64ISAR0_AES_SHIFT) #define ID_AA64ISAR0_AES_BASE (0x1 << ID_AA64ISAR0_AES_SHIFT) #define ID_AA64ISAR0_AES_PMULL (0x2 << ID_AA64ISAR0_AES_SHIFT) #define ID_AA64ISAR0_SHA1_SHIFT 8 #define ID_AA64ISAR0_SHA1_MASK (0xf << ID_AA64ISAR0_SHA1_SHIFT) #define ID_AA64ISAR0_SHA1(x) ((x) & ID_AA64ISAR0_SHA1_MASK) #define ID_AA64ISAR0_SHA1_NONE (0x0 << ID_AA64ISAR0_SHA1_SHIFT) #define ID_AA64ISAR0_SHA1_BASE (0x1 << ID_AA64ISAR0_SHA1_SHIFT) #define ID_AA64ISAR0_SHA2_SHIFT 12 #define ID_AA64ISAR0_SHA2_MASK (0xf << ID_AA64ISAR0_SHA2_SHIFT) #define ID_AA64ISAR0_SHA2(x) ((x) & ID_AA64ISAR0_SHA2_MASK) #define ID_AA64ISAR0_SHA2_NONE (0x0 << ID_AA64ISAR0_SHA2_SHIFT) #define ID_AA64ISAR0_SHA2_BASE (0x1 << ID_AA64ISAR0_SHA2_SHIFT) #define ID_AA64ISAR0_CRC32_SHIFT 16 #define ID_AA64ISAR0_CRC32_MASK (0xf << ID_AA64ISAR0_CRC32_SHIFT) #define ID_AA64ISAR0_CRC32(x) ((x) & ID_AA64ISAR0_CRC32_MASK) #define ID_AA64ISAR0_CRC32_NONE (0x0 << ID_AA64ISAR0_CRC32_SHIFT) #define ID_AA64ISAR0_CRC32_BASE (0x1 << ID_AA64ISAR0_CRC32_SHIFT) #define ID_AA64ISAR0_ATOMIC_SHIFT 20 #define ID_AA64ISAR0_ATOMIC_MASK (0xf << ID_AA64ISAR0_ATOMIC_SHIFT) #define ID_AA64ISAR0_ATOMIC(x) ((x) & ID_AA64ISAR0_ATOMIC_MASK) #define ID_AA64ISAR0_ATOMIC_NONE (0x0 << ID_AA64ISAR0_ATOMIC_SHIFT) #define ID_AA64ISAR0_ATOMIC_IMPL (0x2 << ID_AA64ISAR0_ATOMIC_SHIFT) #define ID_AA64ISAR0_RDM_SHIFT 28 #define ID_AA64ISAR0_RDM_MASK (0xf << ID_AA64ISAR0_RDM_SHIFT) #define ID_AA64ISAR0_RDM(x) ((x) & ID_AA64ISAR0_RDM_MASK) #define ID_AA64ISAR0_RDM_NONE (0x0 << ID_AA64ISAR0_RDM_SHIFT) #define ID_AA64ISAR0_RDM_IMPL (0x1 << ID_AA64ISAR0_RDM_SHIFT) /* ID_AA64MMFR0_EL1 */ #define ID_AA64MMFR0_MASK 0xffffffff #define ID_AA64MMFR0_PA_RANGE_SHIFT 0 #define ID_AA64MMFR0_PA_RANGE_MASK (0xf << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE(x) ((x) & ID_AA64MMFR0_PA_RANGE_MASK) #define ID_AA64MMFR0_PA_RANGE_4G (0x0 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_64G (0x1 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_1T (0x2 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_4T (0x3 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_16T (0x4 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_256T (0x5 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_ASID_BITS_SHIFT 4 #define ID_AA64MMFR0_ASID_BITS_MASK (0xf << ID_AA64MMFR0_ASID_BITS_SHIFT) #define ID_AA64MMFR0_ASID_BITS(x) ((x) & ID_AA64MMFR0_ASID_BITS_MASK) #define ID_AA64MMFR0_ASID_BITS_8 (0x0 << ID_AA64MMFR0_ASID_BITS_SHIFT) #define ID_AA64MMFR0_ASID_BITS_16 (0x2 << ID_AA64MMFR0_ASID_BITS_SHIFT) #define ID_AA64MMFR0_BIGEND_SHIFT 8 #define ID_AA64MMFR0_BIGEND_MASK (0xf << ID_AA64MMFR0_BIGEND_SHIFT) #define ID_AA64MMFR0_BIGEND(x) ((x) & ID_AA64MMFR0_BIGEND_MASK) #define ID_AA64MMFR0_BIGEND_FIXED (0x0 << ID_AA64MMFR0_BIGEND_SHIFT) #define ID_AA64MMFR0_BIGEND_MIXED (0x1 << ID_AA64MMFR0_BIGEND_SHIFT) #define ID_AA64MMFR0_S_NS_MEM_SHIFT 12 #define ID_AA64MMFR0_S_NS_MEM_MASK (0xf << ID_AA64MMFR0_S_NS_MEM_SHIFT) #define ID_AA64MMFR0_S_NS_MEM(x) ((x) & ID_AA64MMFR0_S_NS_MEM_MASK) #define ID_AA64MMFR0_S_NS_MEM_NONE (0x0 << ID_AA64MMFR0_S_NS_MEM_SHIFT) #define ID_AA64MMFR0_S_NS_MEM_DISTINCT (0x1 << ID_AA64MMFR0_S_NS_MEM_SHIFT) #define ID_AA64MMFR0_BIGEND_EL0_SHIFT 16 #define ID_AA64MMFR0_BIGEND_EL0_MASK (0xf << ID_AA64MMFR0_BIGEND_EL0_SHIFT) #define ID_AA64MMFR0_BIGEND_EL0(x) ((x) & ID_AA64MMFR0_BIGEND_EL0_MASK) #define ID_AA64MMFR0_BIGEND_EL0_FIXED (0x0 << ID_AA64MMFR0_BIGEND_EL0_SHIFT) #define ID_AA64MMFR0_BIGEND_EL0_MIXED (0x1 << ID_AA64MMFR0_BIGEND_EL0_SHIFT) #define ID_AA64MMFR0_TGRAN16_SHIFT 20 #define ID_AA64MMFR0_TGRAN16_MASK (0xf << ID_AA64MMFR0_TGRAN16_SHIFT) #define ID_AA64MMFR0_TGRAN16(x) ((x) & ID_AA64MMFR0_TGRAN16_MASK) #define ID_AA64MMFR0_TGRAN16_NONE (0x0 << ID_AA64MMFR0_TGRAN16_SHIFT) #define ID_AA64MMFR0_TGRAN16_IMPL (0x1 << ID_AA64MMFR0_TGRAN16_SHIFT) #define ID_AA64MMFR0_TGRAN64_SHIFT 24 #define ID_AA64MMFR0_TGRAN64_MASK (0xf << ID_AA64MMFR0_TGRAN64_SHIFT) #define ID_AA64MMFR0_TGRAN64(x) ((x) & ID_AA64MMFR0_TGRAN64_MASK) #define ID_AA64MMFR0_TGRAN64_IMPL (0x0 << ID_AA64MMFR0_TGRAN64_SHIFT) #define ID_AA64MMFR0_TGRAN64_NONE (0xf << ID_AA64MMFR0_TGRAN64_SHIFT) #define ID_AA64MMFR0_TGRAN4_SHIFT 28 #define ID_AA64MMFR0_TGRAN4_MASK (0xf << ID_AA64MMFR0_TGRAN4_SHIFT) #define ID_AA64MMFR0_TGRAN4(x) ((x) & ID_AA64MMFR0_TGRAN4_MASK) #define ID_AA64MMFR0_TGRAN4_IMPL (0x0 << ID_AA64MMFR0_TGRAN4_SHIFT) #define ID_AA64MMFR0_TGRAN4_NONE (0xf << ID_AA64MMFR0_TGRAN4_SHIFT) /* ID_AA64MMFR1_EL1 */ #define ID_AA64MMFR1_MASK 0x00ffffff #define ID_AA64MMFR1_HAFDBS_SHIFT 0 #define ID_AA64MMFR1_HAFDBS_MASK (0xf << ID_AA64MMFR1_HAFDBS_SHIFT) #define ID_AA64MMFR1_HAFDBS(x) ((x) & ID_AA64MMFR1_HAFDBS_MASK) #define ID_AA64MMFR1_HAFDBS_NONE (0x0 << ID_AA64MMFR1_HAFDBS_SHIFT) #define ID_AA64MMFR1_HAFDBS_AF (0x1 << ID_AA64MMFR1_HAFDBS_SHIFT) #define ID_AA64MMFR1_HAFDBS_AF_DBS (0x2 << ID_AA64MMFR1_HAFDBS_SHIFT) #define ID_AA64MMFR1_VMIDBITS_SHIFT 4 #define ID_AA64MMFR1_VMIDBITS_MASK (0xf << ID_AA64MMFR1_VMIDBITS_SHIFT) #define ID_AA64MMFR1_VMIDBITS(x) ((x) & ID_AA64MMFR1_VMIDBITS_MASK) #define ID_AA64MMFR1_VMIDBITS_8 (0x0 << ID_AA64MMFR1_VMIDBITS_SHIFT) #define ID_AA64MMFR1_VMIDBITS_16 (0x2 << ID_AA64MMFR1_VMIDBITS_SHIFT) #define ID_AA64MMFR1_VH_SHIFT 8 #define ID_AA64MMFR1_VH_MASK (0xf << ID_AA64MMFR1_VH_SHIFT) #define ID_AA64MMFR1_VH(x) ((x) & ID_AA64MMFR1_VH_MASK) #define ID_AA64MMFR1_VH_NONE (0x0 << ID_AA64MMFR1_VH_SHIFT) #define ID_AA64MMFR1_VH_IMPL (0x1 << ID_AA64MMFR1_VH_SHIFT) #define ID_AA64MMFR1_HPDS_SHIFT 12 #define ID_AA64MMFR1_HPDS_MASK (0xf << ID_AA64MMFR1_HPDS_SHIFT) #define ID_AA64MMFR1_HPDS(x) ((x) & ID_AA64MMFR1_HPDS_MASK) #define ID_AA64MMFR1_HPDS_NONE (0x0 << ID_AA64MMFR1_HPDS_SHIFT) #define ID_AA64MMFR1_HPDS_IMPL (0x1 << ID_AA64MMFR1_HPDS_SHIFT) #define ID_AA64MMFR1_LO_SHIFT 16 #define ID_AA64MMFR1_LO_MASK (0xf << ID_AA64MMFR1_LO_SHIFT) #define ID_AA64MMFR1_LO(x) ((x) & ID_AA64MMFR1_LO_MASK) #define ID_AA64MMFR1_LO_NONE (0x0 << ID_AA64MMFR1_LO_SHIFT) #define ID_AA64MMFR1_LO_IMPL (0x1 << ID_AA64MMFR1_LO_SHIFT) #define ID_AA64MMFR1_PAN_SHIFT 20 #define ID_AA64MMFR1_PAN_MASK (0xf << ID_AA64MMFR1_PAN_SHIFT) #define ID_AA64MMFR1_PAN(x) ((x) & ID_AA64MMFR1_PAN_MASK) #define ID_AA64MMFR1_PAN_NONE (0x0 << ID_AA64MMFR1_PAN_SHIFT) #define ID_AA64MMFR1_PAN_IMPL (0x1 << ID_AA64MMFR1_PAN_SHIFT) +#define ID_AA64MMFR1_PAN_ATS1E1 (0x2 << ID_AA64MMFR1_PAN_SHIFT) /* ID_AA64PFR0_EL1 */ #define ID_AA64PFR0_MASK 0x0fffffff #define ID_AA64PFR0_EL0_SHIFT 0 #define ID_AA64PFR0_EL0_MASK (0xf << ID_AA64PFR0_EL0_SHIFT) #define ID_AA64PFR0_EL0(x) ((x) & ID_AA64PFR0_EL0_MASK) #define ID_AA64PFR0_EL0_64 (1 << ID_AA64PFR0_EL0_SHIFT) #define ID_AA64PFR0_EL0_64_32 (2 << ID_AA64PFR0_EL0_SHIFT) #define ID_AA64PFR0_EL1_SHIFT 4 #define ID_AA64PFR0_EL1_MASK (0xf << ID_AA64PFR0_EL1_SHIFT) #define ID_AA64PFR0_EL1(x) ((x) & ID_AA64PFR0_EL1_MASK) #define ID_AA64PFR0_EL1_64 (1 << ID_AA64PFR0_EL1_SHIFT) #define ID_AA64PFR0_EL1_64_32 (2 << ID_AA64PFR0_EL1_SHIFT) #define ID_AA64PFR0_EL2_SHIFT 8 #define ID_AA64PFR0_EL2_MASK (0xf << ID_AA64PFR0_EL2_SHIFT) #define ID_AA64PFR0_EL2(x) ((x) & ID_AA64PFR0_EL2_MASK) #define ID_AA64PFR0_EL2_NONE (0 << ID_AA64PFR0_EL2_SHIFT) #define ID_AA64PFR0_EL2_64 (1 << ID_AA64PFR0_EL2_SHIFT) #define ID_AA64PFR0_EL2_64_32 (2 << ID_AA64PFR0_EL2_SHIFT) #define ID_AA64PFR0_EL3_SHIFT 12 #define ID_AA64PFR0_EL3_MASK (0xf << ID_AA64PFR0_EL3_SHIFT) #define ID_AA64PFR0_EL3(x) ((x) & ID_AA64PFR0_EL3_MASK) #define ID_AA64PFR0_EL3_NONE (0 << ID_AA64PFR0_EL3_SHIFT) #define ID_AA64PFR0_EL3_64 (1 << ID_AA64PFR0_EL3_SHIFT) #define ID_AA64PFR0_EL3_64_32 (2 << ID_AA64PFR0_EL3_SHIFT) #define ID_AA64PFR0_FP_SHIFT 16 #define ID_AA64PFR0_FP_MASK (0xf << ID_AA64PFR0_FP_SHIFT) #define ID_AA64PFR0_FP(x) ((x) & ID_AA64PFR0_FP_MASK) #define ID_AA64PFR0_FP_IMPL (0x0 << ID_AA64PFR0_FP_SHIFT) #define ID_AA64PFR0_FP_NONE (0xf << ID_AA64PFR0_FP_SHIFT) #define ID_AA64PFR0_ADV_SIMD_SHIFT 20 #define ID_AA64PFR0_ADV_SIMD_MASK (0xf << ID_AA64PFR0_ADV_SIMD_SHIFT) #define ID_AA64PFR0_ADV_SIMD(x) ((x) & ID_AA64PFR0_ADV_SIMD_MASK) #define ID_AA64PFR0_ADV_SIMD_IMPL (0x0 << ID_AA64PFR0_ADV_SIMD_SHIFT) #define ID_AA64PFR0_ADV_SIMD_NONE (0xf << ID_AA64PFR0_ADV_SIMD_SHIFT) #define ID_AA64PFR0_GIC_BITS 0x4 /* Number of bits in GIC field */ #define ID_AA64PFR0_GIC_SHIFT 24 #define ID_AA64PFR0_GIC_MASK (0xf << ID_AA64PFR0_GIC_SHIFT) #define ID_AA64PFR0_GIC(x) ((x) & ID_AA64PFR0_GIC_MASK) #define ID_AA64PFR0_GIC_CPUIF_NONE (0x0 << ID_AA64PFR0_GIC_SHIFT) #define ID_AA64PFR0_GIC_CPUIF_EN (0x1 << ID_AA64PFR0_GIC_SHIFT) /* MAIR_EL1 - Memory Attribute Indirection Register */ #define MAIR_ATTR_MASK(idx) (0xff << ((n)* 8)) #define MAIR_ATTR(attr, idx) ((attr) << ((idx) * 8)) #define MAIR_DEVICE_nGnRnE 0x00 #define MAIR_NORMAL_NC 0x44 #define MAIR_NORMAL_WT 0x88 #define MAIR_NORMAL_WB 0xff /* PAR_EL1 - Physical Address Register */ #define PAR_F_SHIFT 0 #define PAR_F (0x1 << PAR_F_SHIFT) #define PAR_SUCCESS(x) (((x) & PAR_F) == 0) /* When PAR_F == 0 (success) */ #define PAR_SH_SHIFT 7 #define PAR_SH_MASK (0x3 << PAR_SH_SHIFT) #define PAR_NS_SHIFT 9 #define PAR_NS_MASK (0x3 << PAR_NS_SHIFT) #define PAR_PA_SHIFT 12 #define PAR_PA_MASK 0x0000fffffffff000 #define PAR_ATTR_SHIFT 56 #define PAR_ATTR_MASK (0xff << PAR_ATTR_SHIFT) /* When PAR_F == 1 (aborted) */ #define PAR_FST_SHIFT 1 #define PAR_FST_MASK (0x3f << PAR_FST_SHIFT) #define PAR_PTW_SHIFT 8 #define PAR_PTW_MASK (0x1 << PAR_PTW_SHIFT) #define PAR_S_SHIFT 9 #define PAR_S_MASK (0x1 << PAR_S_SHIFT) /* SCTLR_EL1 - System Control Register */ #define SCTLR_RES0 0xc8222400 /* Reserved ARMv8.0, write 0 */ #define SCTLR_RES1 0x30d00800 /* Reserved ARMv8.0, write 1 */ #define SCTLR_M 0x00000001 #define SCTLR_A 0x00000002 #define SCTLR_C 0x00000004 #define SCTLR_SA 0x00000008 #define SCTLR_SA0 0x00000010 #define SCTLR_CP15BEN 0x00000020 #define SCTLR_THEE 0x00000040 #define SCTLR_ITD 0x00000080 #define SCTLR_SED 0x00000100 #define SCTLR_UMA 0x00000200 #define SCTLR_I 0x00001000 #define SCTLR_DZE 0x00004000 #define SCTLR_UCT 0x00008000 #define SCTLR_nTWI 0x00010000 #define SCTLR_nTWE 0x00040000 #define SCTLR_WXN 0x00080000 #define SCTLR_IESB 0x00200000 #define SCTLR_SPAN 0x00800000 #define SCTLR_EOE 0x01000000 #define SCTLR_EE 0x02000000 #define SCTLR_UCI 0x04000000 #define SCTLR_nTLSMD 0x10000000 #define SCTLR_LSMAOE 0x20000000 /* SPSR_EL1 */ /* * When the exception is taken in AArch64: * M[4] is 0 for AArch64 mode * M[3:2] is the exception level * M[1] is unused * M[0] is the SP select: * 0: always SP0 * 1: current ELs SP */ #define PSR_M_EL0t 0x00000000 #define PSR_M_EL1t 0x00000004 #define PSR_M_EL1h 0x00000005 #define PSR_M_EL2t 0x00000008 #define PSR_M_EL2h 0x00000009 #define PSR_M_MASK 0x0000001f #define PSR_F 0x00000040 #define PSR_I 0x00000080 #define PSR_A 0x00000100 #define PSR_D 0x00000200 #define PSR_IL 0x00100000 #define PSR_SS 0x00200000 #define PSR_V 0x10000000 #define PSR_C 0x20000000 #define PSR_Z 0x40000000 #define PSR_N 0x80000000 /* TCR_EL1 - Translation Control Register */ #define TCR_ASID_16 (1 << 36) #define TCR_IPS_SHIFT 32 #define TCR_IPS_32BIT (0 << TCR_IPS_SHIFT) #define TCR_IPS_36BIT (1 << TCR_IPS_SHIFT) #define TCR_IPS_40BIT (2 << TCR_IPS_SHIFT) #define TCR_IPS_42BIT (3 << TCR_IPS_SHIFT) #define TCR_IPS_44BIT (4 << TCR_IPS_SHIFT) #define TCR_IPS_48BIT (5 << TCR_IPS_SHIFT) #define TCR_TG1_SHIFT 30 #define TCR_TG1_16K (1 << TCR_TG1_SHIFT) #define TCR_TG1_4K (2 << TCR_TG1_SHIFT) #define TCR_TG1_64K (3 << TCR_TG1_SHIFT) #define TCR_SH1_SHIFT 28 #define TCR_SH1_IS (0x3UL << TCR_SH1_SHIFT) #define TCR_ORGN1_SHIFT 26 #define TCR_ORGN1_WBWA (0x1UL << TCR_ORGN1_SHIFT) #define TCR_IRGN1_SHIFT 24 #define TCR_IRGN1_WBWA (0x1UL << TCR_IRGN1_SHIFT) #define TCR_SH0_SHIFT 12 #define TCR_SH0_IS (0x3UL << TCR_SH0_SHIFT) #define TCR_ORGN0_SHIFT 10 #define TCR_ORGN0_WBWA (0x1UL << TCR_ORGN0_SHIFT) #define TCR_IRGN0_SHIFT 8 #define TCR_IRGN0_WBWA (0x1UL << TCR_IRGN0_SHIFT) #define TCR_CACHE_ATTRS ((TCR_IRGN0_WBWA | TCR_IRGN1_WBWA) |\ (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA)) #ifdef SMP #define TCR_SMP_ATTRS (TCR_SH0_IS | TCR_SH1_IS) #else #define TCR_SMP_ATTRS 0 #endif #define TCR_T1SZ_SHIFT 16 #define TCR_T0SZ_SHIFT 0 #define TCR_T1SZ(x) ((x) << TCR_T1SZ_SHIFT) #define TCR_T0SZ(x) ((x) << TCR_T0SZ_SHIFT) #define TCR_TxSZ(x) (TCR_T1SZ(x) | TCR_T0SZ(x)) /* Saved Program Status Register */ #define DBG_SPSR_SS (0x1 << 21) /* Monitor Debug System Control Register */ #define DBG_MDSCR_SS (0x1 << 0) #define DBG_MDSCR_KDE (0x1 << 13) #define DBG_MDSCR_MDE (0x1 << 15) /* Perfomance Monitoring Counters */ #define PMCR_E (1 << 0) /* Enable all counters */ #define PMCR_P (1 << 1) /* Reset all counters */ #define PMCR_C (1 << 2) /* Clock counter reset */ #define PMCR_D (1 << 3) /* CNTR counts every 64 clk cycles */ #define PMCR_X (1 << 4) /* Export to ext. monitoring (ETM) */ #define PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ #define PMCR_LC (1 << 6) /* Long cycle count enable */ #define PMCR_IMP_SHIFT 24 /* Implementer code */ #define PMCR_IMP_MASK (0xff << PMCR_IMP_SHIFT) #define PMCR_IDCODE_SHIFT 16 /* Identification code */ #define PMCR_IDCODE_MASK (0xff << PMCR_IDCODE_SHIFT) #define PMCR_IDCODE_CORTEX_A57 0x01 #define PMCR_IDCODE_CORTEX_A72 0x02 #define PMCR_IDCODE_CORTEX_A53 0x03 #define PMCR_N_SHIFT 11 /* Number of counters implemented */ #define PMCR_N_MASK (0x1f << PMCR_N_SHIFT) #endif /* !_MACHINE_ARMREG_H_ */ Index: head/sys/arm64/include/asm.h =================================================================== --- head/sys/arm64/include/asm.h (revision 316755) +++ head/sys/arm64/include/asm.h (revision 316756) @@ -1,74 +1,93 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_ASM_H_ #define _MACHINE_ASM_H_ #undef __FBSDID #if !defined(lint) && !defined(STRIP_FBSDID) #define __FBSDID(s) .ident s #else #define __FBSDID(s) /* nothing */ #endif #define _C_LABEL(x) x #define ENTRY(sym) \ .text; .globl sym; .align 2; .type sym,#function; sym: #define EENTRY(sym) \ .globl sym; sym: #define END(sym) .size sym, . - sym #define EEND(sym) #define WEAK_REFERENCE(sym, alias) \ .weak alias; \ .set alias,sym #define UINT64_C(x) (x) #if defined(PIC) #define PIC_SYM(x,y) x ## @ ## y #else #define PIC_SYM(x,y) x #endif /* Alias for link register x30 */ #define lr x30 /* * Sets the trap fault handler. The exception handler will return to the * address in the handler register on a data abort or the xzr register to * clear the handler. The tmp parameter should be a register able to hold * the temporary data. */ #define SET_FAULT_HANDLER(handler, tmp) \ ldr tmp, [x18, #PC_CURTHREAD]; /* Load curthread */ \ ldr tmp, [tmp, #TD_PCB]; /* Load the pcb */ \ str handler, [tmp, #PCB_ONFAULT] /* Set the handler */ +#define ENTER_USER_ACCESS(reg, tmp) \ + ldr tmp, =has_pan; /* Get the addr of has_pan */ \ + ldr reg, [tmp]; /* Read it */ \ + cbz reg, 997f; /* If no PAN skip */ \ + .inst 0xd500409f | (0 << 8); /* Clear PAN */ \ + 997: + +#define EXIT_USER_ACCESS(reg) \ + cbz reg, 998f; /* If no PAN skip */ \ + .inst 0xd500409f | (1 << 8); /* Set PAN */ \ + 998: + +#define EXIT_USER_ACCESS_CHECK(reg, tmp) \ + ldr tmp, =has_pan; /* Get the addr of has_pan */ \ + ldr reg, [tmp]; /* Read it */ \ + cbz reg, 999f; /* If no PAN skip */ \ + .inst 0xd500409f | (1 << 8); /* Set PAN */ \ + 999: + #endif /* _MACHINE_ASM_H_ */ Index: head/sys/arm64/include/cpufunc.h =================================================================== --- head/sys/arm64/include/cpufunc.h (revision 316755) +++ head/sys/arm64/include/cpufunc.h (revision 316756) @@ -1,151 +1,153 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_CPUFUNC_H_ #define _MACHINE_CPUFUNC_H_ #ifdef _KERNEL #include +void pan_enable(void); + static __inline void breakpoint(void) { __asm("brk #0"); } static __inline register_t dbg_disable(void) { uint32_t ret; __asm __volatile( "mrs %x0, daif \n" "msr daifset, #8 \n" : "=&r" (ret)); return (ret); } static __inline void dbg_enable(void) { __asm __volatile("msr daifclr, #8"); } static __inline register_t intr_disable(void) { /* DAIF is a 32-bit register */ uint32_t ret; __asm __volatile( "mrs %x0, daif \n" "msr daifset, #2 \n" : "=&r" (ret)); return (ret); } static __inline void intr_restore(register_t s) { WRITE_SPECIALREG(daif, s); } static __inline void intr_enable(void) { __asm __volatile("msr daifclr, #2"); } static __inline register_t get_midr(void) { uint64_t midr; midr = READ_SPECIALREG(midr_el1); return (midr); } static __inline register_t get_mpidr(void) { uint64_t mpidr; mpidr = READ_SPECIALREG(mpidr_el1); return (mpidr); } static __inline void clrex(void) { /* * Ensure compiler barrier, otherwise the monitor clear might * occur too late for us ? */ __asm __volatile("clrex" : : : "memory"); } extern int64_t dcache_line_size; extern int64_t icache_line_size; extern int64_t idcache_line_size; extern int64_t dczva_line_size; #define cpu_nullop() arm64_nullop() #define cpufunc_nullop() arm64_nullop() #define cpu_setttb(a) arm64_setttb(a) #define cpu_tlb_flushID() arm64_tlb_flushID() #define cpu_dcache_wbinv_range(a, s) arm64_dcache_wbinv_range((a), (s)) #define cpu_dcache_inv_range(a, s) arm64_dcache_inv_range((a), (s)) #define cpu_dcache_wb_range(a, s) arm64_dcache_wb_range((a), (s)) #define cpu_idcache_wbinv_range(a, s) arm64_idcache_wbinv_range((a), (s)) #define cpu_icache_sync_range(a, s) arm64_icache_sync_range((a), (s)) void arm64_nullop(void); void arm64_setttb(vm_offset_t); void arm64_tlb_flushID(void); void arm64_tlb_flushID_SE(vm_offset_t); void arm64_icache_sync_range(vm_offset_t, vm_size_t); void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t); void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t); void arm64_dcache_inv_range(vm_offset_t, vm_size_t); void arm64_dcache_wb_range(vm_offset_t, vm_size_t); #endif /* _KERNEL */ #endif /* _MACHINE_CPUFUNC_H_ */