Index: head/sys/arm64/arm64/elf32_machdep.c =================================================================== --- head/sys/arm64/arm64/elf32_machdep.c (revision 343003) +++ head/sys/arm64/arm64/elf32_machdep.c (revision 343004) @@ -1,38 +1,259 @@ /*- - * Copyright (c) 2017 Nuxi, https://nuxi.nl/ + * Copyright (c) 2014, 2015 The FreeBSD Foundation. + * Copyright (c) 2014, 2017 Andrew Turner. + * Copyright (c) 2018 Olivier Houchard + * All rights reserved. * + * This software was developed by Andrew Turner under + * sponsorship from the FreeBSD Foundation. + * + * Portions of this software were developed by Konstantin Belousov + * under sponsorship from the FreeBSD Foundation. + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); -#include -#define __ELF_WORD_SIZE 32 +#define __ELF_WORD_SIZE 32 + +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include -void -elf32_dump_thread(struct thread *td __unused, void *dst __unused, - size_t *off __unused) +#include + +#include + +#define FREEBSD32_MINUSER 0x00001000 +#define FREEBSD32_MAXUSER ((1ul << 32) - PAGE_SIZE) +#define FREEBSD32_SHAREDPAGE (FREEBSD32_MAXUSER - PAGE_SIZE) +#define FREEBSD32_USRSTACK FREEBSD32_SHAREDPAGE + +extern const char *freebsd32_syscallnames[]; + +extern char aarch32_sigcode[]; +extern int sz_aarch32_sigcode; + +static int freebsd32_fetch_syscall_args(struct thread *td); +static void freebsd32_setregs(struct thread *td, struct image_params *imgp, + u_long stack); +static void freebsd32_set_syscall_retval(struct thread *, int); + +static boolean_t elf32_arm_abi_supported(struct image_params *); + +extern void freebsd32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask); + +static struct sysentvec elf32_freebsd_sysvec = { + .sv_size = SYS_MAXSYSCALL, + .sv_table = freebsd32_sysent, + .sv_errsize = 0, + .sv_errtbl = NULL, + .sv_transtrap = NULL, + .sv_fixup = elf32_freebsd_fixup, + .sv_sendsig = freebsd32_sendsig, + .sv_sigcode = aarch32_sigcode, + .sv_szsigcode = &sz_aarch32_sigcode, + .sv_name = "FreeBSD ELF32", + .sv_coredump = elf32_coredump, + .sv_imgact_try = NULL, + .sv_minsigstksz = MINSIGSTKSZ, + .sv_pagesize = PAGE_SIZE, + .sv_minuser = FREEBSD32_MINUSER, + .sv_maxuser = FREEBSD32_MAXUSER, + .sv_usrstack = FREEBSD32_USRSTACK, + .sv_psstrings = FREEBSD32_PS_STRINGS, + .sv_stackprot = VM_PROT_READ | VM_PROT_WRITE, + .sv_copyout_strings = freebsd32_copyout_strings, + .sv_setregs = freebsd32_setregs, + .sv_fixlimit = NULL, // XXX + .sv_maxssiz = NULL, + .sv_flags = SV_ABI_FREEBSD | SV_ILP32 | SV_SHP | SV_TIMEKEEP, + .sv_set_syscall_retval = freebsd32_set_syscall_retval, + .sv_fetch_syscall_args = freebsd32_fetch_syscall_args, + .sv_syscallnames = freebsd32_syscallnames, + .sv_shared_page_base = FREEBSD32_SHAREDPAGE, + .sv_shared_page_len = PAGE_SIZE, + .sv_schedtail = NULL, + .sv_thread_detach = NULL, + .sv_trap = NULL, +}; +INIT_SYSENTVEC(elf32_sysvec, &elf32_freebsd_sysvec); + +static Elf32_Brandinfo freebsd32_brand_info = { + .brand = ELFOSABI_FREEBSD, + .machine = EM_ARM, + .compat_3_brand = "FreeBSD", + .emul_path = NULL, + .interp_path = "/libexec/ld-elf.so.1", + .sysvec = &elf32_freebsd_sysvec, + .interp_newpath = NULL, + .brand_note = &elf32_freebsd_brandnote, + .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE, + .header_supported= elf32_arm_abi_supported, +}; + +SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_FIRST, + (sysinit_cfunc_t)elf32_insert_brand_entry, &freebsd32_brand_info); + +static boolean_t +elf32_arm_abi_supported(struct image_params *imgp) { + const Elf32_Ehdr *hdr; + /* Check if we support AArch32 */ + if (ID_AA64PFR0_EL0(READ_SPECIALREG(id_aa64pfr0_el1)) != + ID_AA64PFR0_EL0_64_32) + return (FALSE); + +#define EF_ARM_EABI_VERSION(x) (((x) & EF_ARM_EABIMASK) >> 24) +#define EF_ARM_EABI_FREEBSD_MIN 4 + hdr = (const Elf32_Ehdr *)imgp->image_header; + if (EF_ARM_EABI_VERSION(hdr->e_flags) < EF_ARM_EABI_FREEBSD_MIN) { + if (bootverbose) + uprintf("Attempting to execute non EABI binary " + "(rev %d) image %s", + EF_ARM_EABI_VERSION(hdr->e_flags), + imgp->args->fname); + return (FALSE); + } + + return (TRUE); +} + +static int +freebsd32_fetch_syscall_args(struct thread *td) +{ + struct proc *p; + register_t *ap; + struct syscall_args *sa; + int error, i, nap; + unsigned int args[4]; + + nap = 4; + p = td->td_proc; + ap = td->td_frame->tf_x; + sa = &td->td_sa; + + /* r7 is the syscall id */ + sa->code = td->td_frame->tf_x[7]; + + if (sa->code == SYS_syscall) { + sa->code = *ap++; + nap--; + } else if (sa->code == SYS___syscall) { + sa->code = ap[1]; + nap -= 2; + ap += 2; + } + + if (sa->code >= p->p_sysent->sv_size) + sa->callp = &p->p_sysent->sv_table[0]; + else + sa->callp = &p->p_sysent->sv_table[sa->code]; + + sa->narg = sa->callp->sy_narg; + for (i = 0; i < nap; i++) + sa->args[i] = ap[i]; + if (sa->narg > nap) { + if ((sa->narg - nap) > nitems(args)) + panic("Too many system call arguiments"); + error = copyin((void *)td->td_frame->tf_x[13], args, + (sa->narg - nap) * sizeof(int)); + for (i = 0; i < (sa->narg - nap); i++) + sa->args[i + nap] = args[i]; + } + + td->td_retval[0] = 0; + td->td_retval[1] = 0; + + return (0); +} + +static void +freebsd32_set_syscall_retval(struct thread *td, int error) +{ + struct trapframe *frame; + + frame = td->td_frame; + switch (error) { + case 0: + frame->tf_x[0] = td->td_retval[0]; + frame->tf_x[1] = td->td_retval[1]; + frame->tf_spsr &= ~PSR_C; + break; + case ERESTART: + /* + * Reconstruct the pc to point at the swi. + */ + if ((frame->tf_spsr & PSR_T) != 0) + frame->tf_elr -= 2; //THUMB_INSN_SIZE; + else + frame->tf_elr -= 4; //INSN_SIZE; + break; + case EJUSTRETURN: + /* nothing to do */ + break; + default: + frame->tf_x[0] = error; + frame->tf_spsr |= PSR_C; + break; + } +} + +static void +freebsd32_setregs(struct thread *td, struct image_params *imgp, + u_long stack) +{ + struct trapframe *tf = td->td_frame; + + memset(tf, 0, sizeof(struct trapframe)); + + /* + * We need to set x0 for init as it doesn't call + * cpu_set_syscall_retval to copy the value. We also + * need to set td_retval for the cases where we do. + */ + tf->tf_x[0] = stack; + /* SP_usr is mapped to x13 */ + tf->tf_x[13] = stack; + /* LR_usr is mapped to x14 */ + tf->tf_x[14] = imgp->entry_addr; + tf->tf_elr = imgp->entry_addr; + tf->tf_spsr = PSR_M_32; +} + +void +elf32_dump_thread(struct thread *td, void *dst, size_t *off) +{ + /* XXX: VFP */ } Index: head/sys/arm64/arm64/freebsd32_machdep.c =================================================================== --- head/sys/arm64/arm64/freebsd32_machdep.c (revision 343003) +++ head/sys/arm64/arm64/freebsd32_machdep.c (revision 343004) @@ -1,70 +1,409 @@ /*- + * Copyright (c) 2018 Olivier Houchard * Copyright (c) 2017 Nuxi, https://nuxi.nl/ * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include - +#include +#include +#include +#include +#include +#include +#include +#ifdef VFP +#include +#endif #include +#include +extern void freebsd32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask); + /* + * The first two fields of a ucontext_t are the signal mask and the machine + * context. The next field is uc_link; we want to avoid destroying the link + * when copying out contexts. + */ +#define UC32_COPY_SIZE offsetof(ucontext32_t, uc_link) + +#ifdef VFP +static void get_fpcontext32(struct thread *td, mcontext32_vfp_t *); +#endif + +/* * Stubs for machine dependent 32-bits system calls. */ int +freebsd32_sysarch(struct thread *td, struct freebsd32_sysarch_args *uap) +{ + int error; + +#define ARM_SYNC_ICACHE 0 +#define ARM_DRAIN_WRITEBUF 1 +#define ARM_SET_TP 2 +#define ARM_GET_TP 3 +#define ARM_GET_VFPSTATE 4 + + switch(uap->op) { + case ARM_SET_TP: + WRITE_SPECIALREG(TPIDR_EL0, uap->parms); + WRITE_SPECIALREG(TPIDRRO_EL0, uap->parms); + return 0; + case ARM_SYNC_ICACHE: + { + struct { + uint32_t addr; + uint32_t size; + } args; + + if ((error = copyin(uap->parms, &args, sizeof(args))) != 0) + return (error); + if ((uint64_t)args.addr + (uint64_t)args.size > 0xffffffff) + return (EINVAL); + cpu_icache_sync_range(args.addr, args.size); + return 0; + } + case ARM_GET_VFPSTATE: + { + mcontext32_vfp_t mcontext_vfp; + + struct { + uint32_t mc_vfp_size; + uint32_t mc_vfp; + } args; + if ((error = copyin(uap->parms, &args, sizeof(args))) != 0) + return (error); + if (args.mc_vfp_size != sizeof(mcontext_vfp)) + return (EINVAL); +#ifdef VFP + get_fpcontext32(td, &mcontext_vfp); +#else + bzero(&mcontext_vfp, sizeof(mcontext_vfp)); +#endif + error = copyout(&mcontext_vfp, + (void *)(uintptr_t)args.mc_vfp, + sizeof(mcontext_vfp)); + return error; + } + } + + return (EINVAL); +} + + + +#ifdef VFP +static void +get_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp) +{ + struct pcb *curpcb; + + critical_enter(); + curpcb = curthread->td_pcb; + + if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) { + /* + * If we have just been running VFP instructions we will + * need to save the state to memcpy it below. + */ + vfp_save_state(td, curpcb); + + KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate, + ("Called get_fpcontext while the kernel is using the VFP")); + KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0, + ("Non-userspace FPU flags set in get_fpcontext")); + memcpy(mcp->mcv_reg, curpcb->pcb_fpustate.vfp_regs, + sizeof(mcp->mcv_reg)); + mcp->mcv_fpscr = VFP_FPSCR_FROM_SRCR(curpcb->pcb_fpustate.vfp_fpcr, + curpcb->pcb_fpustate.vfp_fpsr); + } + critical_exit(); +} + +static void +set_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp) +{ + struct pcb *pcb; + + critical_enter(); + pcb = td->td_pcb; + if (td == curthread) + vfp_discard(td); + memcpy(pcb->pcb_fpustate.vfp_regs, mcp->mcv_reg, + sizeof(pcb->pcb_fpustate.vfp_regs)); + pcb->pcb_fpustate.vfp_fpsr = VFP_FPSR_FROM_FPSCR(mcp->mcv_fpscr); + pcb->pcb_fpustate.vfp_fpcr = VFP_FPSR_FROM_FPSCR(mcp->mcv_fpscr); + critical_exit(); +} +#endif +static void +get_mcontext32(struct thread *td, mcontext32_t *mcp, int flags) +{ + struct pcb *pcb; + struct trapframe *tf; + int i; + + pcb = td->td_pcb; + tf = td->td_frame; + + if ((flags & GET_MC_CLEAR_RET) != 0) { + mcp->mc_gregset[0] = 0; + mcp->mc_gregset[16] = tf->tf_spsr & ~PSR_C; + } else { + mcp->mc_gregset[0] = tf->tf_x[0]; + mcp->mc_gregset[16] = tf->tf_spsr; + } + for (i = 1; i < 15; i++) + mcp->mc_gregset[i] = tf->tf_x[i]; + mcp->mc_gregset[15] = tf->tf_elr; + + mcp->mc_vfp_size = 0; + mcp->mc_vfp_ptr = 0; + + memset(mcp->mc_spare, 0, sizeof(mcp->mc_spare)); +} + +static int +set_mcontext32(struct thread *td, mcontext32_t *mcp) +{ + struct trapframe *tf; + mcontext32_vfp_t mc_vfp; + int i; + + tf = td->td_frame; + + for (i = 0; i < 15; i++) + tf->tf_x[i] = mcp->mc_gregset[i]; + tf->tf_elr = mcp->mc_gregset[15]; + tf->tf_spsr = mcp->mc_gregset[16]; +#ifdef VFP + if (mcp->mc_vfp_size == sizeof(mc_vfp) && mcp->mc_vfp_ptr != 0) { + if (copyin((void *)(uintptr_t)mcp->mc_vfp_ptr, &mc_vfp, + sizeof(mc_vfp)) != 0) + return (EFAULT); + set_fpcontext32(td, &mc_vfp); + } +#endif + + return (0); +} + +#define UC_COPY_SIZE offsetof(ucontext32_t, uc_link) + +int freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap) { + ucontext32_t uc; + int ret; - return (ENOSYS); + if (uap->ucp == NULL) + ret = EINVAL; + else { + memset(&uc, 0, sizeof(uc)); + get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); + PROC_LOCK(td->td_proc); + uc.uc_sigmask = td->td_sigmask; + PROC_UNLOCK(td->td_proc); + ret = copyout(&uc, uap->ucp, UC_COPY_SIZE); + } + return (ret); } int freebsd32_setcontext(struct thread *td, struct freebsd32_setcontext_args *uap) { + ucontext32_t uc; + int ret; - return (ENOSYS); + if (uap->ucp == NULL) + ret = EINVAL; + else { + ret = copyin(uap->ucp, &uc, UC_COPY_SIZE); + if (ret == 0) { + ret = set_mcontext32(td, &uc.uc_mcontext); + if (ret == 0) + kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, + NULL, 0); + } + } + return (ret); } int freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap) { + ucontext32_t uc; + int error; - return (ENOSYS); + if (uap == NULL) + return (EFAULT); + if (copyin(uap->sigcntxp, &uc, sizeof(uc))) + return (EFAULT); + error = set_mcontext32(td, &uc.uc_mcontext); + if (error != 0) + return (0); + + /* Restore signal mask. */ + kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); + + return (EJUSTRETURN); + } int freebsd32_swapcontext(struct thread *td, struct freebsd32_swapcontext_args *uap) { + ucontext32_t uc; + int ret; - return (ENOSYS); + if (uap->oucp == NULL || uap->ucp == NULL) + ret = EINVAL; + else { + get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); + PROC_LOCK(td->td_proc); + uc.uc_sigmask = td->td_sigmask; + PROC_UNLOCK(td->td_proc); + ret = copyout(&uc, uap->oucp, UC32_COPY_SIZE); + if (ret == 0) { + ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE); + if (ret == 0) { + ret = set_mcontext32(td, &uc.uc_mcontext); + kern_sigprocmask(td, SIG_SETMASK, + &uc.uc_sigmask, NULL, 0); + } + } + } + return (ret); } -int -freebsd32_sysarch(struct thread *td, struct freebsd32_sysarch_args *uap) +void +freebsd32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { + struct thread *td; + struct proc *p; + struct trapframe *tf; + struct sigframe32 *fp, frame; + struct sigacts *psp; + struct siginfo32 siginfo; + struct sysentvec *sysent; + int onstack; + int sig; + int code; - return (ENOSYS); + siginfo_to_siginfo32(&ksi->ksi_info, &siginfo); + td = curthread; + p = td->td_proc; + PROC_LOCK_ASSERT(p, MA_OWNED); + sig = ksi->ksi_signo; + code = ksi->ksi_code; + psp = p->p_sigacts; + mtx_assert(&psp->ps_mtx, MA_OWNED); + tf = td->td_frame; + onstack = sigonstack(tf->tf_x[13]); + + CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, + catcher, sig); + + /* Allocate and validate space for the signal handler context. */ + if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) && + SIGISMEMBER(psp->ps_sigonstack, sig)) { + fp = (struct sigframe32 *)((uintptr_t)td->td_sigstk.ss_sp + + td->td_sigstk.ss_size); +#if defined(COMPAT_43) + td->td_sigstk.ss_flags |= SS_ONSTACK; +#endif + } else + fp = (struct sigframe32 *)td->td_frame->tf_x[13]; + + /* make room on the stack */ + fp--; + + /* make the stack aligned */ + fp = (struct sigframe32 *)((unsigned long)(fp) &~ (8 - 1)); + /* Populate the siginfo frame. */ + get_mcontext32(td, &frame.sf_uc.uc_mcontext, 0); +#ifdef VFP + get_fpcontext32(td, &frame.sf_vfp); + frame.sf_uc.uc_mcontext.mc_vfp_size = sizeof(fp->sf_vfp); + frame.sf_uc.uc_mcontext.mc_vfp_ptr = (uint32_t)(uintptr_t)&fp->sf_vfp; +#else + frame.sf_uc.uc_mcontext.mc_vfp_size = 0; + frame.sf_uc.uc_mcontext.mc_vfp_ptr = (uint32_t)NULL; +#endif + frame.sf_si = siginfo; + frame.sf_uc.uc_sigmask = *mask; + frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK ) + ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; + frame.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp; + frame.sf_uc.uc_stack.ss_size = td->td_sigstk.ss_size; + + mtx_unlock(&psp->ps_mtx); + PROC_UNLOCK(td->td_proc); + + /* Copy the sigframe out to the user's stack. */ + if (copyout(&frame, fp, sizeof(*fp)) != 0) { + /* Process has trashed its stack. Kill it. */ + CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); + PROC_LOCK(p); + sigexit(td, SIGILL); + } + + /* + * Build context to run handler in. We invoke the handler + * directly, only returning via the trampoline. Note the + * trampoline version numbers are coordinated with machine- + * dependent code in libc. + */ + + tf->tf_x[0] = sig; + tf->tf_x[1] = (register_t)&fp->sf_si; + tf->tf_x[2] = (register_t)&fp->sf_uc; + + /* the trampoline uses r5 as the uc address */ + tf->tf_x[5] = (register_t)&fp->sf_uc; + tf->tf_elr = (register_t)catcher; + tf->tf_x[13] = (register_t)fp; + sysent = p->p_sysent; + if (sysent->sv_sigcode_base != 0) + tf->tf_x[14] = (register_t)sysent->sv_sigcode_base; + else + tf->tf_x[14] = (register_t)(sysent->sv_psstrings - + *(sysent->sv_szsigcode)); + /* Set the mode to enter in the signal handler */ + if ((register_t)catcher & 1) + tf->tf_spsr |= PSR_T; + else + tf->tf_spsr &= ~PSR_T; + + CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_x[14], + tf->tf_x[13]); + + PROC_LOCK(p); + mtx_lock(&psp->ps_mtx); + } Index: head/sys/arm64/arm64/identcpu.c =================================================================== --- head/sys/arm64/arm64/identcpu.c (revision 343003) +++ head/sys/arm64/arm64/identcpu.c (revision 343004) @@ -1,1373 +1,1393 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Semihalf * under sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include static int ident_lock; char machine[] = "arm64"; -SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, - "Machine class"); +#ifdef SCTL_MASK32 +extern int adaptive_machine_arch; +#endif + +static int +sysctl_hw_machine(SYSCTL_HANDLER_ARGS) +{ +#ifdef SCTL_MASK32 + static const char machine32[] = "arm"; +#endif + int error; +#ifdef SCTL_MASK32 + if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch) + error = SYSCTL_OUT(req, machine32, sizeof(machine32)); + else +#endif + error = SYSCTL_OUT(req, machine, sizeof(machine)); + return (error); +} + +SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | + CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class"); /* * Per-CPU affinity as provided in MPIDR_EL1 * Indexed by CPU number in logical order selected by the system. * Relevant fields can be extracted using CPU_AFFn macros, * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system. * * Fields used by us: * Aff1 - Cluster number * Aff0 - CPU number in Aff1 cluster */ uint64_t __cpu_affinity[MAXCPU]; static u_int cpu_aff_levels; struct cpu_desc { u_int cpu_impl; u_int cpu_part_num; u_int cpu_variant; u_int cpu_revision; const char *cpu_impl_name; const char *cpu_part_name; uint64_t mpidr; uint64_t id_aa64afr0; uint64_t id_aa64afr1; uint64_t id_aa64dfr0; uint64_t id_aa64dfr1; uint64_t id_aa64isar0; uint64_t id_aa64isar1; uint64_t id_aa64mmfr0; uint64_t id_aa64mmfr1; uint64_t id_aa64mmfr2; uint64_t id_aa64pfr0; uint64_t id_aa64pfr1; }; struct cpu_desc cpu_desc[MAXCPU]; struct cpu_desc user_cpu_desc; static u_int cpu_print_regs; #define PRINT_ID_AA64_AFR0 0x00000001 #define PRINT_ID_AA64_AFR1 0x00000002 #define PRINT_ID_AA64_DFR0 0x00000010 #define PRINT_ID_AA64_DFR1 0x00000020 #define PRINT_ID_AA64_ISAR0 0x00000100 #define PRINT_ID_AA64_ISAR1 0x00000200 #define PRINT_ID_AA64_MMFR0 0x00001000 #define PRINT_ID_AA64_MMFR1 0x00002000 #define PRINT_ID_AA64_MMFR2 0x00004000 #define PRINT_ID_AA64_PFR0 0x00010000 #define PRINT_ID_AA64_PFR1 0x00020000 struct cpu_parts { u_int part_id; const char *part_name; }; #define CPU_PART_NONE { 0, "Unknown Processor" } struct cpu_implementers { u_int impl_id; const char *impl_name; /* * Part number is implementation defined * so each vendor will have its own set of values and names. */ const struct cpu_parts *cpu_parts; }; #define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none } /* * Per-implementer table of (PartNum, CPU Name) pairs. */ /* ARM Ltd. */ static const struct cpu_parts cpu_parts_arm[] = { { CPU_PART_FOUNDATION, "Foundation-Model" }, { CPU_PART_CORTEX_A35, "Cortex-A35" }, { CPU_PART_CORTEX_A53, "Cortex-A53" }, { CPU_PART_CORTEX_A55, "Cortex-A55" }, { CPU_PART_CORTEX_A57, "Cortex-A57" }, { CPU_PART_CORTEX_A72, "Cortex-A72" }, { CPU_PART_CORTEX_A73, "Cortex-A73" }, { CPU_PART_CORTEX_A75, "Cortex-A75" }, CPU_PART_NONE, }; /* Cavium */ static const struct cpu_parts cpu_parts_cavium[] = { { CPU_PART_THUNDERX, "ThunderX" }, { CPU_PART_THUNDERX2, "ThunderX2" }, CPU_PART_NONE, }; /* Unknown */ static const struct cpu_parts cpu_parts_none[] = { CPU_PART_NONE, }; /* * Implementers table. */ const struct cpu_implementers cpu_implementers[] = { { CPU_IMPL_ARM, "ARM", cpu_parts_arm }, { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none }, { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium }, { CPU_IMPL_DEC, "DEC", cpu_parts_none }, { CPU_IMPL_INFINEON, "IFX", cpu_parts_none }, { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none }, { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none }, { CPU_IMPL_APM, "APM", cpu_parts_none }, { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_none }, { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none }, { CPU_IMPL_INTEL, "Intel", cpu_parts_none }, CPU_IMPLEMENTER_NONE, }; #define MRS_TYPE_MASK 0xf #define MRS_INVALID 0 #define MRS_EXACT 1 #define MRS_EXACT_VAL(x) (MRS_EXACT | ((x) << 4)) #define MRS_EXACT_FIELD(x) ((x) >> 4) #define MRS_LOWER 2 struct mrs_field { bool sign; u_int type; u_int shift; }; #define MRS_FIELD(_sign, _type, _shift) \ { \ .sign = (_sign), \ .type = (_type), \ .shift = (_shift), \ } #define MRS_FIELD_END { .type = MRS_INVALID, } static struct mrs_field id_aa64isar0_fields[] = { MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_DP_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SM4_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SM3_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA3_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_RDM_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_ATOMIC_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_CRC32_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA2_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA1_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_AES_SHIFT), MRS_FIELD_END, }; static struct mrs_field id_aa64isar1_fields[] = { MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_GPI_SHIFT), MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_GPA_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_LRCPC_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_FCMA_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_JSCVT_SHIFT), MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_API_SHIFT), MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_APA_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_DPB_SHIFT), MRS_FIELD_END, }; static struct mrs_field id_aa64pfr0_fields[] = { MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_SVE_SHIFT), MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_RAS_SHIFT), MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_GIC_SHIFT), MRS_FIELD(true, MRS_LOWER, ID_AA64PFR0_ADV_SIMD_SHIFT), MRS_FIELD(true, MRS_LOWER, ID_AA64PFR0_FP_SHIFT), MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL3_SHIFT), MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL2_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL1_SHIFT), MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL0_SHIFT), MRS_FIELD_END, }; static struct mrs_field id_aa64dfr0_fields[] = { MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMS_VER_SHIFT), MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_CTX_CMPS_SHIFT), MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_WRPS_SHIFT), MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_BRPS_SHIFT), MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMU_VER_SHIFT), MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_TRACE_VER_SHIFT), MRS_FIELD(false, MRS_EXACT_VAL(0x6), ID_AA64DFR0_DEBUG_VER_SHIFT), MRS_FIELD_END, }; struct mrs_user_reg { u_int CRm; u_int Op2; size_t offset; struct mrs_field *fields; }; static struct mrs_user_reg user_regs[] = { { /* id_aa64isar0_el1 */ .CRm = 6, .Op2 = 0, .offset = __offsetof(struct cpu_desc, id_aa64isar0), .fields = id_aa64isar0_fields, }, { /* id_aa64isar1_el1 */ .CRm = 6, .Op2 = 1, .offset = __offsetof(struct cpu_desc, id_aa64isar1), .fields = id_aa64isar1_fields, }, { /* id_aa64pfr0_el1 */ .CRm = 4, .Op2 = 0, .offset = __offsetof(struct cpu_desc, id_aa64pfr0), .fields = id_aa64pfr0_fields, }, { /* id_aa64dfr0_el1 */ .CRm = 5, .Op2 = 0, .offset = __offsetof(struct cpu_desc, id_aa64dfr0), .fields = id_aa64dfr0_fields, }, }; #define CPU_DESC_FIELD(desc, idx) \ *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset) static int user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame, uint32_t esr) { uint64_t value; int CRm, Op2, i, reg; if ((insn & MRS_MASK) != MRS_VALUE) return (0); /* * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}. * These are in the EL1 CPU identification space. * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1. * CRm == {4-7} holds the ID_AA64 registers. * * For full details see the ARMv8 ARM (ARM DDI 0487C.a) * Table D9-2 System instruction encodings for non-Debug System * register accesses. */ if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0) return (0); CRm = mrs_CRm(insn); if (CRm > 7 || (CRm < 4 && CRm != 0)) return (0); Op2 = mrs_Op2(insn); value = 0; for (i = 0; i < nitems(user_regs); i++) { if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) { value = CPU_DESC_FIELD(user_cpu_desc, i); break; } } if (CRm == 0) { switch (Op2) { case 0: value = READ_SPECIALREG(midr_el1); break; case 5: value = READ_SPECIALREG(mpidr_el1); break; case 6: value = READ_SPECIALREG(revidr_el1); break; default: return (0); } } /* * We will handle this instruction, move to the next so we * don't trap here again. */ frame->tf_elr += INSN_SIZE; reg = MRS_REGISTER(insn); /* If reg is 31 then write to xzr, i.e. do nothing */ if (reg == 31) return (1); if (reg < nitems(frame->tf_x)) frame->tf_x[reg] = value; else if (reg == 30) frame->tf_lr = value; return (1); } static void update_user_regs(u_int cpu) { struct mrs_field *fields; uint64_t cur, value; int i, j, cur_field, new_field; for (i = 0; i < nitems(user_regs); i++) { value = CPU_DESC_FIELD(cpu_desc[cpu], i); if (cpu == 0) cur = value; else cur = CPU_DESC_FIELD(user_cpu_desc, i); fields = user_regs[i].fields; for (j = 0; fields[j].type != 0; j++) { switch (fields[j].type & MRS_TYPE_MASK) { case MRS_EXACT: cur &= ~(0xfu << fields[j].shift); cur |= (uint64_t)MRS_EXACT_FIELD(fields[j].type) << fields[j].shift; break; case MRS_LOWER: new_field = (value >> fields[j].shift) & 0xf; cur_field = (cur >> fields[j].shift) & 0xf; if ((fields[j].sign && (int)new_field < (int)cur_field) || (!fields[j].sign && (u_int)new_field < (u_int)cur_field)) { cur &= ~(0xfu << fields[j].shift); cur |= new_field << fields[j].shift; } break; default: panic("Invalid field type: %d", fields[j].type); } } CPU_DESC_FIELD(user_cpu_desc, i) = cur; } } static void identify_cpu_sysinit(void *dummy __unused) { int cpu; /* Create a user visible cpu description with safe values */ memset(&user_cpu_desc, 0, sizeof(user_cpu_desc)); /* Safe values for these registers */ user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_ADV_SIMD_NONE | ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64; user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DEBUG_VER_8; CPU_FOREACH(cpu) { print_cpu_features(cpu); update_user_regs(cpu); } install_undef_handler(true, user_mrs_handler); } SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL); void print_cpu_features(u_int cpu) { struct sbuf *sb; int printed; sb = sbuf_new_auto(); sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu, cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name, cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision); sbuf_cat(sb, " affinity:"); switch(cpu_aff_levels) { default: case 4: sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr)); /* FALLTHROUGH */ case 3: sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr)); /* FALLTHROUGH */ case 2: sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr)); /* FALLTHROUGH */ case 1: case 0: /* On UP this will be zero */ sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr)); break; } sbuf_finish(sb); printf("%s\n", sbuf_data(sb)); sbuf_clear(sb); /* * There is a hardware errata where, if one CPU is performing a TLB * invalidation while another is performing a store-exclusive the * store-exclusive may return the wrong status. A workaround seems * to be to use an IPI to invalidate on each CPU, however given the * limited number of affected units (pass 1.1 is the evaluation * hardware revision), and the lack of information from Cavium * this has not been implemented. * * At the time of writing this the only information is from: * https://lkml.org/lkml/2016/8/4/722 */ /* * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also * triggers on pass 2.0+. */ if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 && CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known " "hardware bugs that may cause the incorrect operation of " "atomic operations.\n"); if (cpu != 0 && cpu_print_regs == 0) return; #define SEP_STR ((printed++) == 0) ? "" : "," /* AArch64 Instruction Set Attribute Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) { printed = 0; sbuf_printf(sb, " Instruction Set Attributes 0 = <"); switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_RDM_NONE: break; case ID_AA64ISAR0_RDM_IMPL: sbuf_printf(sb, "%sRDM", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown RDM", SEP_STR); } switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_ATOMIC_NONE: break; case ID_AA64ISAR0_ATOMIC_IMPL: sbuf_printf(sb, "%sAtomic", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown Atomic", SEP_STR); } switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_AES_NONE: break; case ID_AA64ISAR0_AES_BASE: sbuf_printf(sb, "%sAES", SEP_STR); break; case ID_AA64ISAR0_AES_PMULL: sbuf_printf(sb, "%sAES+PMULL", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown AES", SEP_STR); break; } switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_SHA1_NONE: break; case ID_AA64ISAR0_SHA1_BASE: sbuf_printf(sb, "%sSHA1", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown SHA1", SEP_STR); break; } switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_SHA2_NONE: break; case ID_AA64ISAR0_SHA2_BASE: sbuf_printf(sb, "%sSHA2", SEP_STR); break; case ID_AA64ISAR0_SHA2_512: sbuf_printf(sb, "%sSHA2+SHA512", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown SHA2", SEP_STR); break; } switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_CRC32_NONE: break; case ID_AA64ISAR0_CRC32_BASE: sbuf_printf(sb, "%sCRC32", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown CRC32", SEP_STR); break; } switch (ID_AA64ISAR0_SHA3(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_SHA3_NONE: break; case ID_AA64ISAR0_SHA3_IMPL: sbuf_printf(sb, "%sSHA3", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown SHA3", SEP_STR); break; } switch (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_SM3_NONE: break; case ID_AA64ISAR0_SM3_IMPL: sbuf_printf(sb, "%sSM3", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown SM3", SEP_STR); break; } switch (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_SM4_NONE: break; case ID_AA64ISAR0_SM4_IMPL: sbuf_printf(sb, "%sSM4", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown SM4", SEP_STR); break; } switch (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_DP_NONE: break; case ID_AA64ISAR0_DP_IMPL: sbuf_printf(sb, "%sDotProd", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown DP", SEP_STR); break; } if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0) sbuf_printf(sb, "%s%#lx", SEP_STR, cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK); sbuf_finish(sb); printf("%s>\n", sbuf_data(sb)); sbuf_clear(sb); } /* AArch64 Instruction Set Attribute Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) { printed = 0; sbuf_printf(sb, " Instruction Set Attributes 1 = <"); switch (ID_AA64ISAR1_GPI(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_GPI_NONE: break; case ID_AA64ISAR1_GPI_IMPL: sbuf_printf(sb, "%sImpl GenericAuth", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR); break; } switch (ID_AA64ISAR1_GPA(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_GPA_NONE: break; case ID_AA64ISAR1_GPA_IMPL: sbuf_printf(sb, "%sPrince GenericAuth", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR); break; } switch (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_LRCPC_NONE: break; case ID_AA64ISAR1_LRCPC_IMPL: sbuf_printf(sb, "%sRCpc", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown RCpc", SEP_STR); break; } switch (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_FCMA_NONE: break; case ID_AA64ISAR1_FCMA_IMPL: sbuf_printf(sb, "%sFCMA", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown FCMA", SEP_STR); break; } switch (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_JSCVT_NONE: break; case ID_AA64ISAR1_JSCVT_IMPL: sbuf_printf(sb, "%sJS Conv", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown JS Conv", SEP_STR); break; } switch (ID_AA64ISAR1_API(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_API_NONE: break; case ID_AA64ISAR1_API_IMPL: sbuf_printf(sb, "%sImpl AddrAuth", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown Impl AddrAuth", SEP_STR); break; } switch (ID_AA64ISAR1_APA(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_APA_NONE: break; case ID_AA64ISAR1_APA_IMPL: sbuf_printf(sb, "%sPrince AddrAuth", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown Prince AddrAuth", SEP_STR); break; } switch (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_DPB_NONE: break; case ID_AA64ISAR1_DPB_IMPL: sbuf_printf(sb, "%sDC CVAP", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown DC CVAP", SEP_STR); break; } if ((cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK) != 0) sbuf_printf(sb, "%s%#lx", SEP_STR, cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK); sbuf_finish(sb); printf("%s>\n", sbuf_data(sb)); sbuf_clear(sb); } /* AArch64 Processor Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) { printed = 0; sbuf_printf(sb, " Processor Features 0 = <"); switch (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_SVE_NONE: break; case ID_AA64PFR0_SVE_IMPL: sbuf_printf(sb, "%sSVE", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown SVE", SEP_STR); break; } switch (ID_AA64PFR0_RAS(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_RAS_NONE: break; case ID_AA64PFR0_RAS_V1: sbuf_printf(sb, "%sRASv1", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown RAS", SEP_STR); break; } switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_GIC_CPUIF_NONE: break; case ID_AA64PFR0_GIC_CPUIF_EN: sbuf_printf(sb, "%sGIC", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown GIC interface", SEP_STR); break; } switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_ADV_SIMD_NONE: break; case ID_AA64PFR0_ADV_SIMD_IMPL: sbuf_printf(sb, "%sAdvSIMD", SEP_STR); break; case ID_AA64PFR0_ADV_SIMD_HP: sbuf_printf(sb, "%sAdvSIMD+HP", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown AdvSIMD", SEP_STR); break; } switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_FP_NONE: break; case ID_AA64PFR0_FP_IMPL: sbuf_printf(sb, "%sFloat", SEP_STR); break; case ID_AA64PFR0_FP_HP: sbuf_printf(sb, "%sFloat+HP", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown Float", SEP_STR); break; } switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_EL3_NONE: sbuf_printf(sb, "%sNo EL3", SEP_STR); break; case ID_AA64PFR0_EL3_64: sbuf_printf(sb, "%sEL3", SEP_STR); break; case ID_AA64PFR0_EL3_64_32: sbuf_printf(sb, "%sEL3 32", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown EL3", SEP_STR); break; } switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_EL2_NONE: sbuf_printf(sb, "%sNo EL2", SEP_STR); break; case ID_AA64PFR0_EL2_64: sbuf_printf(sb, "%sEL2", SEP_STR); break; case ID_AA64PFR0_EL2_64_32: sbuf_printf(sb, "%sEL2 32", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown EL2", SEP_STR); break; } switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_EL1_64: sbuf_printf(sb, "%sEL1", SEP_STR); break; case ID_AA64PFR0_EL1_64_32: sbuf_printf(sb, "%sEL1 32", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown EL1", SEP_STR); break; } switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_EL0_64: sbuf_printf(sb, "%sEL0", SEP_STR); break; case ID_AA64PFR0_EL0_64_32: sbuf_printf(sb, "%sEL0 32", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown EL0", SEP_STR); break; } if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0) sbuf_printf(sb, "%s%#lx", SEP_STR, cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK); sbuf_finish(sb); printf("%s>\n", sbuf_data(sb)); sbuf_clear(sb); } /* AArch64 Processor Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) { printf(" Processor Features 1 = <%#lx>\n", cpu_desc[cpu].id_aa64pfr1); } /* AArch64 Memory Model Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) { printed = 0; sbuf_printf(sb, " Memory Model Features 0 = <"); switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_TGRAN4_NONE: break; case ID_AA64MMFR0_TGRAN4_IMPL: sbuf_printf(sb, "%s4k Granule", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown 4k Granule", SEP_STR); break; } switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_TGRAN16_NONE: break; case ID_AA64MMFR0_TGRAN16_IMPL: sbuf_printf(sb, "%s16k Granule", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown 16k Granule", SEP_STR); break; } switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_TGRAN64_NONE: break; case ID_AA64MMFR0_TGRAN64_IMPL: sbuf_printf(sb, "%s64k Granule", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown 64k Granule", SEP_STR); break; } switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_BIGEND_FIXED: break; case ID_AA64MMFR0_BIGEND_MIXED: sbuf_printf(sb, "%sMixedEndian", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown Endian switching", SEP_STR); break; } switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_BIGEND_EL0_FIXED: break; case ID_AA64MMFR0_BIGEND_EL0_MIXED: sbuf_printf(sb, "%sEL0 MixEndian", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown EL0 Endian switching", SEP_STR); break; } switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_S_NS_MEM_NONE: break; case ID_AA64MMFR0_S_NS_MEM_DISTINCT: sbuf_printf(sb, "%sS/NS Mem", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown S/NS Mem", SEP_STR); break; } switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_ASID_BITS_8: sbuf_printf(sb, "%s8bit ASID", SEP_STR); break; case ID_AA64MMFR0_ASID_BITS_16: sbuf_printf(sb, "%s16bit ASID", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown ASID", SEP_STR); break; } switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_PA_RANGE_4G: sbuf_printf(sb, "%s4GB PA", SEP_STR); break; case ID_AA64MMFR0_PA_RANGE_64G: sbuf_printf(sb, "%s64GB PA", SEP_STR); break; case ID_AA64MMFR0_PA_RANGE_1T: sbuf_printf(sb, "%s1TB PA", SEP_STR); break; case ID_AA64MMFR0_PA_RANGE_4T: sbuf_printf(sb, "%s4TB PA", SEP_STR); break; case ID_AA64MMFR0_PA_RANGE_16T: sbuf_printf(sb, "%s16TB PA", SEP_STR); break; case ID_AA64MMFR0_PA_RANGE_256T: sbuf_printf(sb, "%s256TB PA", SEP_STR); break; case ID_AA64MMFR0_PA_RANGE_4P: sbuf_printf(sb, "%s4PB PA", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown PA Range", SEP_STR); break; } if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0) sbuf_printf(sb, "%s%#lx", SEP_STR, cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK); sbuf_finish(sb); printf("%s>\n", sbuf_data(sb)); sbuf_clear(sb); } /* AArch64 Memory Model Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) { printed = 0; sbuf_printf(sb, " Memory Model Features 1 = <"); switch (ID_AA64MMFR1_XNX(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_XNX_NONE: break; case ID_AA64MMFR1_XNX_IMPL: sbuf_printf(sb, "%sEL2 XN", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown XNX", SEP_STR); break; } switch (ID_AA64MMFR1_SPEC_SEI(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_SPEC_SEI_NONE: break; case ID_AA64MMFR1_SPEC_SEI_IMPL: sbuf_printf(sb, "%sSpecSEI", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown SpecSEI", SEP_STR); break; } switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_PAN_NONE: break; case ID_AA64MMFR1_PAN_IMPL: sbuf_printf(sb, "%sPAN", SEP_STR); break; case ID_AA64MMFR1_PAN_ATS1E1: sbuf_printf(sb, "%sPAN+AT", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown PAN", SEP_STR); break; } switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_LO_NONE: break; case ID_AA64MMFR1_LO_IMPL: sbuf_printf(sb, "%sLO", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown LO", SEP_STR); break; } switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_HPDS_NONE: break; case ID_AA64MMFR1_HPDS_HPD: sbuf_printf(sb, "%sHPDS", SEP_STR); break; case ID_AA64MMFR1_HPDS_TTPBHA: sbuf_printf(sb, "%sTTPBHA", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown HPDS", SEP_STR); break; } switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_VH_NONE: break; case ID_AA64MMFR1_VH_IMPL: sbuf_printf(sb, "%sVHE", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown VHE", SEP_STR); break; } switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_VMIDBITS_8: break; case ID_AA64MMFR1_VMIDBITS_16: sbuf_printf(sb, "%s16 VMID bits", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown VMID bits", SEP_STR); break; } switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_HAFDBS_NONE: break; case ID_AA64MMFR1_HAFDBS_AF: sbuf_printf(sb, "%sAF", SEP_STR); break; case ID_AA64MMFR1_HAFDBS_AF_DBS: sbuf_printf(sb, "%sAF+DBS", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown Hardware update AF/DBS", SEP_STR); break; } if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0) sbuf_printf(sb, "%s%#lx", SEP_STR, cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK); sbuf_finish(sb); printf("%s>\n", sbuf_data(sb)); sbuf_clear(sb); } /* AArch64 Memory Model Feature Register 2 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) { printed = 0; sbuf_printf(sb, " Memory Model Features 2 = <"); switch (ID_AA64MMFR2_NV(cpu_desc[cpu].id_aa64mmfr2)) { case ID_AA64MMFR2_NV_NONE: break; case ID_AA64MMFR2_NV_IMPL: sbuf_printf(sb, "%sNestedVirt", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown NestedVirt", SEP_STR); break; } switch (ID_AA64MMFR2_CCIDX(cpu_desc[cpu].id_aa64mmfr2)) { case ID_AA64MMFR2_CCIDX_32: sbuf_printf(sb, "%s32b CCIDX", SEP_STR); break; case ID_AA64MMFR2_CCIDX_64: sbuf_printf(sb, "%s64b CCIDX", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown CCIDX", SEP_STR); break; } switch (ID_AA64MMFR2_VA_RANGE(cpu_desc[cpu].id_aa64mmfr2)) { case ID_AA64MMFR2_VA_RANGE_48: sbuf_printf(sb, "%s48b VA", SEP_STR); break; case ID_AA64MMFR2_VA_RANGE_52: sbuf_printf(sb, "%s52b VA", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown VA Range", SEP_STR); break; } switch (ID_AA64MMFR2_IESB(cpu_desc[cpu].id_aa64mmfr2)) { case ID_AA64MMFR2_IESB_NONE: break; case ID_AA64MMFR2_IESB_IMPL: sbuf_printf(sb, "%sIESB", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown IESB", SEP_STR); break; } switch (ID_AA64MMFR2_LSM(cpu_desc[cpu].id_aa64mmfr2)) { case ID_AA64MMFR2_LSM_NONE: break; case ID_AA64MMFR2_LSM_IMPL: sbuf_printf(sb, "%sLSM", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown LSM", SEP_STR); break; } switch (ID_AA64MMFR2_UAO(cpu_desc[cpu].id_aa64mmfr2)) { case ID_AA64MMFR2_UAO_NONE: break; case ID_AA64MMFR2_UAO_IMPL: sbuf_printf(sb, "%sUAO", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown UAO", SEP_STR); break; } switch (ID_AA64MMFR2_CNP(cpu_desc[cpu].id_aa64mmfr2)) { case ID_AA64MMFR2_CNP_NONE: break; case ID_AA64MMFR2_CNP_IMPL: sbuf_printf(sb, "%sCnP", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown CnP", SEP_STR); break; } if ((cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK) != 0) sbuf_printf(sb, "%s%#lx", SEP_STR, cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK); sbuf_finish(sb); printf("%s>\n", sbuf_data(sb)); sbuf_clear(sb); } /* AArch64 Debug Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) { printed = 0; sbuf_printf(sb, " Debug Features 0 = <"); switch(ID_AA64DFR0_PMS_VER(cpu_desc[cpu].id_aa64dfr0)) { case ID_AA64DFR0_PMS_VER_NONE: break; case ID_AA64DFR0_PMS_VER_V1: sbuf_printf(sb, "%sSPE v1", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown SPE", SEP_STR); break; } sbuf_printf(sb, "%s%lu CTX Breakpoints", SEP_STR, ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0)); sbuf_printf(sb, "%s%lu Watchpoints", SEP_STR, ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0)); sbuf_printf(sb, "%s%lu Breakpoints", SEP_STR, ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0)); switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) { case ID_AA64DFR0_PMU_VER_NONE: break; case ID_AA64DFR0_PMU_VER_3: sbuf_printf(sb, "%sPMUv3", SEP_STR); break; case ID_AA64DFR0_PMU_VER_3_1: sbuf_printf(sb, "%sPMUv3+16 bit evtCount", SEP_STR); break; case ID_AA64DFR0_PMU_VER_IMPL: sbuf_printf(sb, "%sImplementation defined PMU", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown PMU", SEP_STR); break; } switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) { case ID_AA64DFR0_TRACE_VER_NONE: break; case ID_AA64DFR0_TRACE_VER_IMPL: sbuf_printf(sb, "%sTrace", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown Trace", SEP_STR); break; } switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) { case ID_AA64DFR0_DEBUG_VER_8: sbuf_printf(sb, "%sDebug v8", SEP_STR); break; case ID_AA64DFR0_DEBUG_VER_8_VHE: sbuf_printf(sb, "%sDebug v8+VHE", SEP_STR); break; case ID_AA64DFR0_DEBUG_VER_8_2: sbuf_printf(sb, "%sDebug v8.2", SEP_STR); break; default: sbuf_printf(sb, "%sUnknown Debug", SEP_STR); break; } if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK) sbuf_printf(sb, "%s%#lx", SEP_STR, cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK); sbuf_finish(sb); printf("%s>\n", sbuf_data(sb)); sbuf_clear(sb); } /* AArch64 Memory Model Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) { printf(" Debug Features 1 = <%#lx>\n", cpu_desc[cpu].id_aa64dfr1); } /* AArch64 Auxiliary Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) { printf(" Auxiliary Features 0 = <%#lx>\n", cpu_desc[cpu].id_aa64afr0); } /* AArch64 Auxiliary Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) { printf(" Auxiliary Features 1 = <%#lx>\n", cpu_desc[cpu].id_aa64afr1); } sbuf_delete(sb); sb = NULL; #undef SEP_STR } void identify_cpu(void) { u_int midr; u_int impl_id; u_int part_id; u_int cpu; size_t i; const struct cpu_parts *cpu_partsp = NULL; cpu = PCPU_GET(cpuid); midr = get_midr(); /* * Store midr to pcpu to allow fast reading * from EL0, EL1 and assembly code. */ PCPU_SET(midr, midr); impl_id = CPU_IMPL(midr); for (i = 0; i < nitems(cpu_implementers); i++) { if (impl_id == cpu_implementers[i].impl_id || cpu_implementers[i].impl_id == 0) { cpu_desc[cpu].cpu_impl = impl_id; cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name; cpu_partsp = cpu_implementers[i].cpu_parts; break; } } part_id = CPU_PART(midr); for (i = 0; &cpu_partsp[i] != NULL; i++) { if (part_id == cpu_partsp[i].part_id || cpu_partsp[i].part_id == 0) { cpu_desc[cpu].cpu_part_num = part_id; cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name; break; } } cpu_desc[cpu].cpu_revision = CPU_REV(midr); cpu_desc[cpu].cpu_variant = CPU_VAR(midr); /* Save affinity for current CPU */ cpu_desc[cpu].mpidr = get_mpidr(); CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK; cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(ID_AA64DFR0_EL1); cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(ID_AA64DFR1_EL1); cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(ID_AA64ISAR0_EL1); cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(ID_AA64ISAR1_EL1); cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(ID_AA64MMFR0_EL1); cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1); cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1); cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(ID_AA64PFR0_EL1); cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(ID_AA64PFR1_EL1); if (cpu != 0) { /* * This code must run on one cpu at a time, but we are * not scheduling on the current core so implement a * simple spinlock. */ while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0) __asm __volatile("wfe" ::: "memory"); switch (cpu_aff_levels) { case 0: if (CPU_AFF0(cpu_desc[cpu].mpidr) != CPU_AFF0(cpu_desc[0].mpidr)) cpu_aff_levels = 1; /* FALLTHROUGH */ case 1: if (CPU_AFF1(cpu_desc[cpu].mpidr) != CPU_AFF1(cpu_desc[0].mpidr)) cpu_aff_levels = 2; /* FALLTHROUGH */ case 2: if (CPU_AFF2(cpu_desc[cpu].mpidr) != CPU_AFF2(cpu_desc[0].mpidr)) cpu_aff_levels = 3; /* FALLTHROUGH */ case 3: if (CPU_AFF3(cpu_desc[cpu].mpidr) != CPU_AFF3(cpu_desc[0].mpidr)) cpu_aff_levels = 4; break; } if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0) cpu_print_regs |= PRINT_ID_AA64_AFR0; if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1) cpu_print_regs |= PRINT_ID_AA64_AFR1; if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0) cpu_print_regs |= PRINT_ID_AA64_DFR0; if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1) cpu_print_regs |= PRINT_ID_AA64_DFR1; if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0) cpu_print_regs |= PRINT_ID_AA64_ISAR0; if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1) cpu_print_regs |= PRINT_ID_AA64_ISAR1; if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0) cpu_print_regs |= PRINT_ID_AA64_MMFR0; if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1) cpu_print_regs |= PRINT_ID_AA64_MMFR1; if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2) cpu_print_regs |= PRINT_ID_AA64_MMFR2; if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0) cpu_print_regs |= PRINT_ID_AA64_PFR0; if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1) cpu_print_regs |= PRINT_ID_AA64_PFR1; /* Wake up the other CPUs */ atomic_store_rel_int(&ident_lock, 0); __asm __volatile("sev" ::: "memory"); } } Index: head/sys/arm64/arm64/locore.S =================================================================== --- head/sys/arm64/arm64/locore.S (revision 343003) +++ head/sys/arm64/arm64/locore.S (revision 343004) @@ -1,701 +1,719 @@ /*- * Copyright (c) 2012-2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "assym.inc" #include "opt_kstack_pages.h" #include #include #include #include #include #include #include #define VIRT_BITS 48 #define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT) .globl kernbase .set kernbase, KERNBASE #define DEVICE_MEM 0 #define NORMAL_UNCACHED 1 #define NORMAL_MEM 2 /* * We assume: * MMU on with an identity map, or off * D-Cache: off * I-Cache: on or off * We are loaded at a 2MiB aligned address */ .text .globl _start _start: /* Drop to EL1 */ bl drop_to_el1 /* * Disable the MMU. We may have entered the kernel with it on and * will need to update the tables later. If this has been set up * with anything other than a VA == PA map then this will fail, * but in this case the code to find where we are running from * would have also failed. */ dsb sy mrs x2, sctlr_el1 bic x2, x2, SCTLR_M msr sctlr_el1, x2 isb /* Set the context id */ msr contextidr_el1, xzr /* Get the virt -> phys offset */ bl get_virt_delta /* * At this point: * x29 = PA - VA * x28 = Our physical load address */ /* Create the page tables */ bl create_pagetables /* * At this point: * x27 = TTBR0 table * x26 = Kernel L1 table * x24 = TTBR1 table */ /* Enable the mmu */ bl start_mmu /* Jump to the virtual address space */ ldr x15, .Lvirtdone br x15 virtdone: /* Set up the stack */ adr x25, initstack_end mov sp, x25 sub sp, sp, #PCB_SIZE /* Zero the BSS */ ldr x15, .Lbss ldr x14, .Lend 1: str xzr, [x15], #8 cmp x15, x14 b.lo 1b /* Backup the module pointer */ mov x1, x0 /* Make the page table base a virtual address */ sub x26, x26, x29 sub x24, x24, x29 sub sp, sp, #(64 * 4) mov x0, sp /* Degate the delda so it is VA -> PA */ neg x29, x29 str x1, [x0] /* modulep */ str x26, [x0, 8] /* kern_l1pt */ str x29, [x0, 16] /* kern_delta */ str x25, [x0, 24] /* kern_stack */ str x24, [x0, 32] /* kern_l0pt */ /* trace back starts here */ mov fp, #0 /* Branch to C code */ bl initarm bl mi_startup /* We should not get here */ brk 0 .align 3 .Lvirtdone: .quad virtdone .Lbss: .quad __bss_start .Lend: .quad _end #ifdef SMP /* * mpentry(unsigned long) * * Called by a core when it is being brought online. * The data in x0 is passed straight to init_secondary. */ ENTRY(mpentry) /* Disable interrupts */ msr daifset, #2 /* Drop to EL1 */ bl drop_to_el1 /* Set the context id */ msr contextidr_el1, xzr /* Load the kernel page table */ adr x24, pagetable_l0_ttbr1 /* Load the identity page table */ adr x27, pagetable_l0_ttbr0 /* Enable the mmu */ bl start_mmu /* Jump to the virtual address space */ ldr x15, =mp_virtdone br x15 mp_virtdone: ldr x4, =secondary_stacks mov x5, #(PAGE_SIZE * KSTACK_PAGES) mul x5, x0, x5 add sp, x4, x5 b init_secondary END(mpentry) #endif /* * If we are started in EL2, configure the required hypervisor * registers and drop to EL1. */ drop_to_el1: mrs x1, CurrentEL lsr x1, x1, #2 cmp x1, #0x2 b.eq 1f ret 1: /* Configure the Hypervisor */ mov x2, #(HCR_RW) msr hcr_el2, x2 /* Load the Virtualization Process ID Register */ mrs x2, midr_el1 msr vpidr_el2, x2 /* Load the Virtualization Multiprocess ID Register */ mrs x2, mpidr_el1 msr vmpidr_el2, x2 /* Set the bits that need to be 1 in sctlr_el1 */ ldr x2, .Lsctlr_res1 msr sctlr_el1, x2 /* Don't trap to EL2 for exceptions */ mov x2, #CPTR_RES1 msr cptr_el2, x2 /* Don't trap to EL2 for CP15 traps */ msr hstr_el2, xzr /* Enable access to the physical timers at EL1 */ mrs x2, cnthctl_el2 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) msr cnthctl_el2, x2 /* Set the counter offset to a known value */ msr cntvoff_el2, xzr /* Hypervisor trap functions */ adr x2, hyp_vectors msr vbar_el2, x2 mov x2, #(PSR_F | PSR_I | PSR_A | PSR_D | PSR_M_EL1h) msr spsr_el2, x2 /* Configure GICv3 CPU interface */ mrs x2, id_aa64pfr0_el1 /* Extract GIC bits from the register */ ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */ cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT) b.ne 2f mrs x2, icc_sre_el2 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */ orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */ msr icc_sre_el2, x2 2: /* Set the address to return to our return address */ msr elr_el2, x30 isb eret .align 3 .Lsctlr_res1: .quad SCTLR_RES1 #define VECT_EMPTY \ .align 7; \ 1: b 1b .align 11 hyp_vectors: VECT_EMPTY /* Synchronous EL2t */ VECT_EMPTY /* IRQ EL2t */ VECT_EMPTY /* FIQ EL2t */ VECT_EMPTY /* Error EL2t */ VECT_EMPTY /* Synchronous EL2h */ VECT_EMPTY /* IRQ EL2h */ VECT_EMPTY /* FIQ EL2h */ VECT_EMPTY /* Error EL2h */ VECT_EMPTY /* Synchronous 64-bit EL1 */ VECT_EMPTY /* IRQ 64-bit EL1 */ VECT_EMPTY /* FIQ 64-bit EL1 */ VECT_EMPTY /* Error 64-bit EL1 */ VECT_EMPTY /* Synchronous 32-bit EL1 */ VECT_EMPTY /* IRQ 32-bit EL1 */ VECT_EMPTY /* FIQ 32-bit EL1 */ VECT_EMPTY /* Error 32-bit EL1 */ /* * Get the delta between the physical address we were loaded to and the * virtual address we expect to run from. This is used when building the * initial page table. */ get_virt_delta: /* Load the physical address of virt_map */ adr x29, virt_map /* Load the virtual address of virt_map stored in virt_map */ ldr x28, [x29] /* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */ sub x29, x29, x28 /* Find the load address for the kernel */ mov x28, #(KERNBASE) add x28, x28, x29 ret .align 3 virt_map: .quad virt_map /* * This builds the page tables containing the identity map, and the kernel * virtual map. * * It relys on: * We were loaded to an address that is on a 2MiB boundary * All the memory must not cross a 1GiB boundaty * x28 contains the physical address we were loaded from * * TODO: This is out of date. * There are at least 5 pages before that address for the page tables * The pages used are: * - The Kernel L2 table * - The Kernel L1 table * - The Kernel L0 table (TTBR1) * - The identity (PA = VA) L1 table * - The identity (PA = VA) L0 table (TTBR0) * - The DMAP L1 tables */ create_pagetables: /* Save the Link register */ mov x5, x30 /* Clean the page table */ adr x6, pagetable mov x26, x6 adr x27, pagetable_end 1: stp xzr, xzr, [x6], #16 stp xzr, xzr, [x6], #16 stp xzr, xzr, [x6], #16 stp xzr, xzr, [x6], #16 cmp x6, x27 b.lo 1b /* * Build the TTBR1 maps. */ /* Find the size of the kernel */ mov x6, #(KERNBASE) /* Find modulep - begin */ sub x8, x0, x6 /* Add two 2MiB pages for the module data and round up */ ldr x7, =(3 * L2_SIZE - 1) add x8, x8, x7 /* Get the number of l2 pages to allocate, rounded down */ lsr x10, x8, #(L2_SHIFT) /* Create the kernel space L2 table */ mov x6, x26 mov x7, #NORMAL_MEM mov x8, #(KERNBASE & L2_BLOCK_MASK) mov x9, x28 bl build_l2_block_pagetable /* Move to the l1 table */ add x26, x26, #PAGE_SIZE /* Link the l1 -> l2 table */ mov x9, x6 mov x6, x26 bl link_l1_pagetable /* Move to the l0 table */ add x24, x26, #PAGE_SIZE /* Link the l0 -> l1 table */ mov x9, x6 mov x6, x24 mov x10, #1 bl link_l0_pagetable /* Link the DMAP tables */ ldr x8, =DMAP_MIN_ADDRESS adr x9, pagetable_dmap; mov x10, #DMAP_TABLES bl link_l0_pagetable /* * Build the TTBR0 maps. */ add x27, x24, #PAGE_SIZE mov x6, x27 /* The initial page table */ #if defined(SOCDEV_PA) && defined(SOCDEV_VA) /* Create a table for the UART */ mov x7, #DEVICE_MEM mov x8, #(SOCDEV_VA) /* VA start */ mov x9, #(SOCDEV_PA) /* PA start */ mov x10, #1 bl build_l1_block_pagetable #endif /* Create the VA = PA map */ mov x7, #NORMAL_UNCACHED /* Uncached as it's only needed early on */ mov x9, x27 mov x8, x9 /* VA start (== PA start) */ mov x10, #1 bl build_l1_block_pagetable /* Move to the l0 table */ add x27, x27, #PAGE_SIZE /* Link the l0 -> l1 table */ mov x9, x6 mov x6, x27 mov x10, #1 bl link_l0_pagetable /* Restore the Link register */ mov x30, x5 ret /* * Builds an L0 -> L1 table descriptor * * This is a link for a 512GiB block of memory with up to 1GiB regions mapped * within it by build_l1_block_pagetable. * * x6 = L0 table * x8 = Virtual Address * x9 = L1 PA (trashed) * x10 = Entry count * x11, x12 and x13 are trashed */ link_l0_pagetable: /* * Link an L0 -> L1 table entry. */ /* Find the table index */ lsr x11, x8, #L0_SHIFT and x11, x11, #L0_ADDR_MASK /* Build the L0 block entry */ mov x12, #L0_TABLE /* Only use the output address bits */ lsr x9, x9, #PAGE_SHIFT 1: orr x13, x12, x9, lsl #PAGE_SHIFT /* Store the entry */ str x13, [x6, x11, lsl #3] sub x10, x10, #1 add x11, x11, #1 add x9, x9, #1 cbnz x10, 1b ret /* * Builds an L1 -> L2 table descriptor * * This is a link for a 1GiB block of memory with up to 2MiB regions mapped * within it by build_l2_block_pagetable. * * x6 = L1 table * x8 = Virtual Address * x9 = L2 PA (trashed) * x11, x12 and x13 are trashed */ link_l1_pagetable: /* * Link an L1 -> L2 table entry. */ /* Find the table index */ lsr x11, x8, #L1_SHIFT and x11, x11, #Ln_ADDR_MASK /* Build the L1 block entry */ mov x12, #L1_TABLE /* Only use the output address bits */ lsr x9, x9, #PAGE_SHIFT orr x13, x12, x9, lsl #PAGE_SHIFT /* Store the entry */ str x13, [x6, x11, lsl #3] ret /* * Builds count 1 GiB page table entry * x6 = L1 table * x7 = Type (0 = Device, 1 = Normal) * x8 = VA start * x9 = PA start (trashed) * x10 = Entry count * x11, x12 and x13 are trashed */ build_l1_block_pagetable: /* * Build the L1 table entry. */ /* Find the table index */ lsr x11, x8, #L1_SHIFT and x11, x11, #Ln_ADDR_MASK /* Build the L1 block entry */ lsl x12, x7, #2 orr x12, x12, #L1_BLOCK orr x12, x12, #(ATTR_AF) #ifdef SMP orr x12, x12, ATTR_SH(ATTR_SH_IS) #endif /* Only use the output address bits */ lsr x9, x9, #L1_SHIFT /* Set the physical address for this virtual address */ 1: orr x13, x12, x9, lsl #L1_SHIFT /* Store the entry */ str x13, [x6, x11, lsl #3] sub x10, x10, #1 add x11, x11, #1 add x9, x9, #1 cbnz x10, 1b ret /* * Builds count 2 MiB page table entry * x6 = L2 table * x7 = Type (0 = Device, 1 = Normal) * x8 = VA start * x9 = PA start (trashed) * x10 = Entry count * x11, x12 and x13 are trashed */ build_l2_block_pagetable: /* * Build the L2 table entry. */ /* Find the table index */ lsr x11, x8, #L2_SHIFT and x11, x11, #Ln_ADDR_MASK /* Build the L2 block entry */ lsl x12, x7, #2 orr x12, x12, #L2_BLOCK orr x12, x12, #(ATTR_AF) #ifdef SMP orr x12, x12, ATTR_SH(ATTR_SH_IS) #endif /* Only use the output address bits */ lsr x9, x9, #L2_SHIFT /* Set the physical address for this virtual address */ 1: orr x13, x12, x9, lsl #L2_SHIFT /* Store the entry */ str x13, [x6, x11, lsl #3] sub x10, x10, #1 add x11, x11, #1 add x9, x9, #1 cbnz x10, 1b ret start_mmu: dsb sy /* Load the exception vectors */ ldr x2, =exception_vectors msr vbar_el1, x2 /* Load ttbr0 and ttbr1 */ msr ttbr0_el1, x27 msr ttbr1_el1, x24 isb /* Clear the Monitor Debug System control register */ msr mdscr_el1, xzr /* Invalidate the TLB */ tlbi vmalle1is ldr x2, mair msr mair_el1, x2 /* * Setup TCR according to PARange bits from ID_AA64MMFR0_EL1. */ ldr x2, tcr mrs x3, id_aa64mmfr0_el1 bfi x2, x3, #32, #3 msr tcr_el1, x2 /* Setup SCTLR */ ldr x2, sctlr_set ldr x3, sctlr_clear mrs x1, sctlr_el1 bic x1, x1, x3 /* Clear the required bits */ orr x1, x1, x2 /* Set the required bits */ msr sctlr_el1, x1 isb ret .align 3 mair: .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, 0) | \ MAIR_ATTR(MAIR_NORMAL_NC, 1) | \ MAIR_ATTR(MAIR_NORMAL_WB, 2) | \ MAIR_ATTR(MAIR_NORMAL_WT, 3) tcr: .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_ASID_16 | TCR_TG1_4K | \ TCR_CACHE_ATTRS | TCR_SMP_ATTRS) sctlr_set: /* Bits to set */ .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \ SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \ SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \ SCTLR_M | SCTLR_CP15BEN) sctlr_clear: /* Bits to clear */ .quad (SCTLR_EE | SCTLR_EOE | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \ SCTLR_ITD | SCTLR_A) .globl abort abort: b abort //.section .init_pagetable .align 12 /* 4KiB aligned */ /* * 3 initial tables (in the following order): * L2 for kernel (High addresses) * L1 for kernel * L1 for user (Low addresses) */ pagetable: .space PAGE_SIZE pagetable_l1_ttbr1: .space PAGE_SIZE pagetable_l0_ttbr1: .space PAGE_SIZE pagetable_l1_ttbr0: .space PAGE_SIZE pagetable_l0_ttbr0: .space PAGE_SIZE .globl pagetable_dmap pagetable_dmap: .space PAGE_SIZE * DMAP_TABLES pagetable_end: el2_pagetable: .space PAGE_SIZE .globl init_pt_va init_pt_va: .quad pagetable /* XXX: Keep page tables VA */ .align 4 initstack: .space (PAGE_SIZE * KSTACK_PAGES) initstack_end: ENTRY(sigcode) mov x0, sp add x0, x0, #SF_UC 1: mov x8, #SYS_sigreturn svc 0 /* sigreturn failed, exit */ mov x8, #SYS_exit svc 0 b 1b END(sigcode) /* This may be copied to the stack, keep it 16-byte aligned */ .align 3 esigcode: .data .align 3 .global szsigcode szsigcode: .quad esigcode - sigcode + +ENTRY(aarch32_sigcode) + .word 0xe1a0000d // mov r0, sp + .word 0xe2800040 // add r0, r0, #SIGF_UC + .word 0xe59f700c // ldr r7, [pc, #12] + .word 0xef000000 // swi #0 + .word 0xe59f7008 // ldr r7, [pc, #8] + .word 0xef000000 // swi #0 + .word 0xeafffffa // b . - 16 +END(aarch32_sigcode) + .word SYS_sigreturn + .word SYS_exit + .align 3 +aarch32_esigcode: + .data + .global sz_aarch32_sigcode +sz_aarch32_sigcode: + .quad aarch32_esigcode - aarch32_sigcode Index: head/sys/arm64/arm64/machdep.c =================================================================== --- head/sys/arm64/arm64/machdep.c (revision 343003) +++ head/sys/arm64/arm64/machdep.c (revision 343004) @@ -1,1210 +1,1229 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_acpi.h" #include "opt_platform.h" #include "opt_ddb.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef DEV_ACPI #include #include #endif #ifdef FDT #include #include #endif enum arm64_bus arm64_bus_method = ARM64_BUS_NONE; struct pcpu __pcpu[MAXCPU]; static struct trapframe proc0_tf; int early_boot = 1; int cold = 1; struct kva_md_info kmi; int64_t dcache_line_size; /* The minimum D cache line size */ int64_t icache_line_size; /* The minimum I cache line size */ int64_t idcache_line_size; /* The minimum cache line size */ int64_t dczva_line_size; /* The size of cache line the dc zva zeroes */ int has_pan; /* * Physical address of the EFI System Table. Stashed from the metadata hints * passed into the kernel and used by the EFI code to call runtime services. */ vm_paddr_t efi_systbl_phys; /* pagezero_* implementations are provided in support.S */ void pagezero_simple(void *); void pagezero_cache(void *); /* pagezero_simple is default pagezero */ void (*pagezero)(void *p) = pagezero_simple; static void pan_setup(void) { uint64_t id_aa64mfr1; id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1); if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE) has_pan = 1; } void pan_enable(void) { /* * The LLVM integrated assembler doesn't understand the PAN * PSTATE field. Because of this we need to manually create * the instruction in an asm block. This is equivalent to: * msr pan, #1 * * This sets the PAN bit, stopping the kernel from accessing * memory when userspace can also access it unless the kernel * uses the userspace load/store instructions. */ if (has_pan) { WRITE_SPECIALREG(sctlr_el1, READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN); __asm __volatile(".inst 0xd500409f | (0x1 << 8)"); } } static void cpu_startup(void *dummy) { undef_init(); identify_cpu(); install_cpu_errata(); vm_ksubmap_init(&kmi); bufinit(); vm_pager_bufferinit(); } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); int cpu_idle_wakeup(int cpu) { return (0); } int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; regs->sp = frame->tf_sp; regs->lr = frame->tf_lr; regs->elr = frame->tf_elr; regs->spsr = frame->tf_spsr; memcpy(regs->x, frame->tf_x, sizeof(regs->x)); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; frame->tf_sp = regs->sp; frame->tf_lr = regs->lr; frame->tf_elr = regs->elr; frame->tf_spsr &= ~PSR_FLAGS; frame->tf_spsr |= regs->spsr & PSR_FLAGS; memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x)); return (0); } int fill_fpregs(struct thread *td, struct fpreg *regs) { #ifdef VFP struct pcb *pcb; pcb = td->td_pcb; if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running VFP instructions we will * need to save the state to memcpy it below. */ if (td == curthread) vfp_save_state(td, pcb); KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate, ("Called fill_fpregs while the kernel is using the VFP")); memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs, sizeof(regs->fp_q)); regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr; regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr; } else #endif memset(regs, 0, sizeof(*regs)); return (0); } int set_fpregs(struct thread *td, struct fpreg *regs) { #ifdef VFP struct pcb *pcb; pcb = td->td_pcb; KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate, ("Called set_fpregs while the kernel is using the VFP")); memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q)); pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr; pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr; #endif return (0); } int fill_dbregs(struct thread *td, struct dbreg *regs) { printf("ARM64TODO: fill_dbregs"); return (EDOOFUS); } int set_dbregs(struct thread *td, struct dbreg *regs) { printf("ARM64TODO: set_dbregs"); return (EDOOFUS); } #ifdef COMPAT_FREEBSD32 int fill_regs32(struct thread *td, struct reg32 *regs) { + int i; + struct trapframe *tf; - printf("ARM64TODO: fill_regs32"); - return (EDOOFUS); + tf = td->td_frame; + for (i = 0; i < 13; i++) + regs->r[i] = tf->tf_x[i]; + regs->r_sp = tf->tf_sp; + regs->r_lr = tf->tf_lr; + regs->r_pc = tf->tf_elr; + regs->r_cpsr = tf->tf_spsr; + + return (0); } int set_regs32(struct thread *td, struct reg32 *regs) { + int i; + struct trapframe *tf; - printf("ARM64TODO: set_regs32"); - return (EDOOFUS); + tf = td->td_frame; + for (i = 0; i < 13; i++) + tf->tf_x[i] = regs->r[i]; + tf->tf_sp = regs->r_sp; + tf->tf_lr = regs->r_lr; + tf->tf_elr = regs->r_pc; + tf->tf_spsr = regs->r_cpsr; + + + return (0); } int fill_fpregs32(struct thread *td, struct fpreg32 *regs) { printf("ARM64TODO: fill_fpregs32"); return (EDOOFUS); } int set_fpregs32(struct thread *td, struct fpreg32 *regs) { printf("ARM64TODO: set_fpregs32"); return (EDOOFUS); } int fill_dbregs32(struct thread *td, struct dbreg32 *regs) { printf("ARM64TODO: fill_dbregs32"); return (EDOOFUS); } int set_dbregs32(struct thread *td, struct dbreg32 *regs) { printf("ARM64TODO: set_dbregs32"); return (EDOOFUS); } #endif int ptrace_set_pc(struct thread *td, u_long addr) { printf("ARM64TODO: ptrace_set_pc"); return (EDOOFUS); } int ptrace_single_step(struct thread *td) { td->td_frame->tf_spsr |= PSR_SS; td->td_pcb->pcb_flags |= PCB_SINGLE_STEP; return (0); } int ptrace_clear_single_step(struct thread *td) { td->td_frame->tf_spsr &= ~PSR_SS; td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP; return (0); } void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf = td->td_frame; memset(tf, 0, sizeof(struct trapframe)); tf->tf_x[0] = stack; tf->tf_sp = STACKALIGN(stack); tf->tf_lr = imgp->entry_addr; tf->tf_elr = imgp->entry_addr; } /* Sanity check these are the same size, they will be memcpy'd to and fro */ CTASSERT(sizeof(((struct trapframe *)0)->tf_x) == sizeof((struct gpregs *)0)->gp_x); CTASSERT(sizeof(((struct trapframe *)0)->tf_x) == sizeof((struct reg *)0)->x); int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; if (clear_ret & GET_MC_CLEAR_RET) { mcp->mc_gpregs.gp_x[0] = 0; mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C; } else { mcp->mc_gpregs.gp_x[0] = tf->tf_x[0]; mcp->mc_gpregs.gp_spsr = tf->tf_spsr; } memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1], sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1)); mcp->mc_gpregs.gp_sp = tf->tf_sp; mcp->mc_gpregs.gp_lr = tf->tf_lr; mcp->mc_gpregs.gp_elr = tf->tf_elr; return (0); } int set_mcontext(struct thread *td, mcontext_t *mcp) { struct trapframe *tf = td->td_frame; uint32_t spsr; spsr = mcp->mc_gpregs.gp_spsr; if ((spsr & PSR_M_MASK) != PSR_M_EL0t || (spsr & (PSR_AARCH32 | PSR_F | PSR_I | PSR_A | PSR_D)) != 0) return (EINVAL); memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x)); tf->tf_sp = mcp->mc_gpregs.gp_sp; tf->tf_lr = mcp->mc_gpregs.gp_lr; tf->tf_elr = mcp->mc_gpregs.gp_elr; tf->tf_spsr = mcp->mc_gpregs.gp_spsr; return (0); } static void get_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef VFP struct pcb *curpcb; critical_enter(); curpcb = curthread->td_pcb; if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running VFP instructions we will * need to save the state to memcpy it below. */ vfp_save_state(td, curpcb); KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate, ("Called get_fpcontext while the kernel is using the VFP")); KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0, ("Non-userspace FPU flags set in get_fpcontext")); memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs, sizeof(mcp->mc_fpregs)); mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr; mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr; mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags; mcp->mc_flags |= _MC_FP_VALID; } critical_exit(); #endif } static void set_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef VFP struct pcb *curpcb; critical_enter(); if ((mcp->mc_flags & _MC_FP_VALID) != 0) { curpcb = curthread->td_pcb; /* * Discard any vfp state for the current thread, we * are about to override it. */ vfp_discard(td); KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate, ("Called set_fpcontext while the kernel is using the VFP")); memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q, sizeof(mcp->mc_fpregs)); curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr; curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr; curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK; } critical_exit(); #endif } void cpu_idle(int busy) { spinlock_enter(); if (!busy) cpu_idleclock(); if (!sched_runnable()) __asm __volatile( "dsb sy \n" "wfi \n"); if (!busy) cpu_activeclock(); spinlock_exit(); } void cpu_halt(void) { /* We should have shutdown by now, if not enter a low power sleep */ intr_disable(); while (1) { __asm __volatile("wfi"); } } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* ARM64TODO TBD */ } /* Get current clock frequency for the given CPU ID. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { struct pcpu *pc; pc = pcpu_find(cpu_id); if (pc == NULL || rate == NULL) return (EINVAL); if (pc->pc_clock == 0) return (EOPNOTSUPP); *rate = pc->pc_clock; return (0); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { pcpu->pc_acpi_id = 0xffffffff; } void spinlock_enter(void) { struct thread *td; register_t daif; td = curthread; if (td->td_md.md_spinlock_count == 0) { daif = intr_disable(); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_daif = daif; } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t daif; td = curthread; critical_exit(); daif = td->td_md.md_saved_daif; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(daif); } #ifndef _SYS_SYSPROTO_H_ struct sigreturn_args { ucontext_t *ucp; }; #endif int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { ucontext_t uc; int error; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); set_fpcontext(td, &uc.uc_mcontext); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { int i; for (i = 0; i < PCB_LR; i++) pcb->pcb_x[i] = tf->tf_x[i]; pcb->pcb_x[PCB_LR] = tf->tf_lr; pcb->pcb_pc = tf->tf_elr; pcb->pcb_sp = tf->tf_sp; } void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct thread *td; struct proc *p; struct trapframe *tf; struct sigframe *fp, frame; struct sigacts *psp; struct sysentvec *sysent; int onstack, sig; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else { fp = (struct sigframe *)td->td_frame->tf_sp; } /* Make room, keeping the stack aligned */ fp--; fp = (struct sigframe *)STACKALIGN(fp); /* Fill in the frame to copy out */ bzero(&frame, sizeof(frame)); get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); get_fpcontext(td, &frame.sf_uc.uc_mcontext); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack = td->td_sigstk; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ? (onstack ? SS_ONSTACK : 0) : SS_DISABLE; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } tf->tf_x[0]= sig; tf->tf_x[1] = (register_t)&fp->sf_si; tf->tf_x[2] = (register_t)&fp->sf_uc; tf->tf_elr = (register_t)catcher; tf->tf_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_lr = (register_t)sysent->sv_sigcode_base; else tf->tf_lr = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr, tf->tf_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } static void init_proc0(vm_offset_t kstack) { struct pcpu *pcpup = &__pcpu[0]; proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1; thread0.td_pcb->pcb_fpflags = 0; thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate; thread0.td_pcb->pcb_vfpcpu = UINT_MAX; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; /* Set the base address of translation table 0. */ thread0.td_proc->p_md.md_l0addr = READ_SPECIALREG(ttbr0_el1); } typedef struct { uint32_t type; uint64_t phys_start; uint64_t virt_start; uint64_t num_pages; uint64_t attr; } EFI_MEMORY_DESCRIPTOR; typedef void (*efi_map_entry_cb)(struct efi_md *); static void foreach_efi_map_entry(struct efi_map_header *efihdr, efi_map_entry_cb cb) { struct efi_md *map, *p; size_t efisz; int ndesc, i; /* * Memory map data provided by UEFI via the GetMemoryMap * Boot Services API. */ efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf; map = (struct efi_md *)((uint8_t *)efihdr + efisz); if (efihdr->descriptor_size == 0) return; ndesc = efihdr->memory_size / efihdr->descriptor_size; for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p, efihdr->descriptor_size)) { cb(p); } } static void exclude_efi_map_entry(struct efi_md *p) { switch (p->md_type) { case EFI_MD_TYPE_CODE: case EFI_MD_TYPE_DATA: case EFI_MD_TYPE_BS_CODE: case EFI_MD_TYPE_BS_DATA: case EFI_MD_TYPE_FREE: /* * We're allowed to use any entry with these types. */ break; default: arm_physmem_exclude_region(p->md_phys, p->md_pages * PAGE_SIZE, EXFLAG_NOALLOC); } } static void exclude_efi_map_entries(struct efi_map_header *efihdr) { foreach_efi_map_entry(efihdr, exclude_efi_map_entry); } static void add_efi_map_entry(struct efi_md *p) { switch (p->md_type) { case EFI_MD_TYPE_RT_DATA: /* * Runtime data will be excluded after the DMAP * region is created to stop it from being added * to phys_avail. */ case EFI_MD_TYPE_CODE: case EFI_MD_TYPE_DATA: case EFI_MD_TYPE_BS_CODE: case EFI_MD_TYPE_BS_DATA: case EFI_MD_TYPE_FREE: /* * We're allowed to use any entry with these types. */ arm_physmem_hardware_region(p->md_phys, p->md_pages * PAGE_SIZE); break; } } static void add_efi_map_entries(struct efi_map_header *efihdr) { foreach_efi_map_entry(efihdr, add_efi_map_entry); } static void print_efi_map_entry(struct efi_md *p) { const char *type; static const char *types[] = { "Reserved", "LoaderCode", "LoaderData", "BootServicesCode", "BootServicesData", "RuntimeServicesCode", "RuntimeServicesData", "ConventionalMemory", "UnusableMemory", "ACPIReclaimMemory", "ACPIMemoryNVS", "MemoryMappedIO", "MemoryMappedIOPortSpace", "PalCode", "PersistentMemory" }; if (p->md_type < nitems(types)) type = types[p->md_type]; else type = ""; printf("%23s %012lx %12p %08lx ", type, p->md_phys, p->md_virt, p->md_pages); if (p->md_attr & EFI_MD_ATTR_UC) printf("UC "); if (p->md_attr & EFI_MD_ATTR_WC) printf("WC "); if (p->md_attr & EFI_MD_ATTR_WT) printf("WT "); if (p->md_attr & EFI_MD_ATTR_WB) printf("WB "); if (p->md_attr & EFI_MD_ATTR_UCE) printf("UCE "); if (p->md_attr & EFI_MD_ATTR_WP) printf("WP "); if (p->md_attr & EFI_MD_ATTR_RP) printf("RP "); if (p->md_attr & EFI_MD_ATTR_XP) printf("XP "); if (p->md_attr & EFI_MD_ATTR_NV) printf("NV "); if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE) printf("MORE_RELIABLE "); if (p->md_attr & EFI_MD_ATTR_RO) printf("RO "); if (p->md_attr & EFI_MD_ATTR_RT) printf("RUNTIME"); printf("\n"); } static void print_efi_map_entries(struct efi_map_header *efihdr) { printf("%23s %12s %12s %8s %4s\n", "Type", "Physical", "Virtual", "#Pages", "Attr"); foreach_efi_map_entry(efihdr, print_efi_map_entry); } #ifdef FDT static void try_load_dtb(caddr_t kmdp) { vm_offset_t dtbp; dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); if (dtbp == (vm_offset_t)NULL) { printf("ERROR loading DTB\n"); return; } if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); } #endif static bool bus_probe(void) { bool has_acpi, has_fdt; char *order, *env; has_acpi = has_fdt = false; #ifdef FDT has_fdt = (OF_peer(0) != 0); #endif #ifdef DEV_ACPI has_acpi = (acpi_find_table(ACPI_SIG_SPCR) != 0); #endif env = kern_getenv("kern.cfg.order"); if (env != NULL) { order = env; while (order != NULL) { if (has_acpi && strncmp(order, "acpi", 4) == 0 && (order[4] == ',' || order[4] == '\0')) { arm64_bus_method = ARM64_BUS_ACPI; break; } if (has_fdt && strncmp(order, "fdt", 3) == 0 && (order[3] == ',' || order[3] == '\0')) { arm64_bus_method = ARM64_BUS_FDT; break; } order = strchr(order, ','); } freeenv(env); /* If we set the bus method it is valid */ if (arm64_bus_method != ARM64_BUS_NONE) return (true); } /* If no order or an invalid order was set use the default */ if (arm64_bus_method == ARM64_BUS_NONE) { if (has_fdt) arm64_bus_method = ARM64_BUS_FDT; else if (has_acpi) arm64_bus_method = ARM64_BUS_ACPI; } /* * If no option was set the default is valid, otherwise we are * setting one to get cninit() working, then calling panic to tell * the user about the invalid bus setup. */ return (env == NULL); } static void cache_setup(void) { int dcache_line_shift, icache_line_shift, dczva_line_shift; uint32_t ctr_el0; uint32_t dczid_el0; ctr_el0 = READ_SPECIALREG(ctr_el0); /* Read the log2 words in each D cache line */ dcache_line_shift = CTR_DLINE_SIZE(ctr_el0); /* Get the D cache line size */ dcache_line_size = sizeof(int) << dcache_line_shift; /* And the same for the I cache */ icache_line_shift = CTR_ILINE_SIZE(ctr_el0); icache_line_size = sizeof(int) << icache_line_shift; idcache_line_size = MIN(dcache_line_size, icache_line_size); dczid_el0 = READ_SPECIALREG(dczid_el0); /* Check if dc zva is not prohibited */ if (dczid_el0 & DCZID_DZP) dczva_line_size = 0; else { /* Same as with above calculations */ dczva_line_shift = DCZID_BS_SIZE(dczid_el0); dczva_line_size = sizeof(int) << dczva_line_shift; /* Change pagezero function */ pagezero = pagezero_cache; } } void initarm(struct arm64_bootparams *abp) { struct efi_fb *efifb; struct efi_map_header *efihdr; struct pcpu *pcpup; char *env; #ifdef FDT struct mem_region mem_regions[FDT_MEM_REGIONS]; int mem_regions_sz; #endif vm_offset_t lastaddr; caddr_t kmdp; bool valid; /* Set the module data location */ preload_metadata = (caddr_t)(uintptr_t)(abp->modulep); /* Find the kernel address */ kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0); link_elf_ireloc(kmdp); #ifdef FDT try_load_dtb(kmdp); #endif efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t); /* Find the address to start allocating from */ lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); /* Load the physical memory ranges */ efihdr = (struct efi_map_header *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP); if (efihdr != NULL) add_efi_map_entries(efihdr); #ifdef FDT else { /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, NULL) != 0) panic("Cannot get physical memory regions"); arm_physmem_hardware_regions(mem_regions, mem_regions_sz); } if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0) arm_physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); #endif /* Exclude the EFI framebuffer from our view of physical memory. */ efifb = (struct efi_fb *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_FB); if (efifb != NULL) arm_physmem_exclude_region(efifb->fb_addr, efifb->fb_size, EXFLAG_NOALLOC); /* Set the pcpu data, this is needed by pmap_bootstrap */ pcpup = &__pcpu[0]; pcpu_init(pcpup, 0, sizeof(struct pcpu)); /* * Set the pcpu pointer with a backup in tpidr_el1 to be * loaded when entering the kernel from userland. */ __asm __volatile( "mov x18, %0 \n" "msr tpidr_el1, %0" :: "r"(pcpup)); PCPU_SET(curthread, &thread0); /* Do basic tuning, hz etc */ init_param1(); cache_setup(); pan_setup(); /* Bootstrap enough of pmap to enter the kernel proper */ pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt, KERNBASE - abp->kern_delta, lastaddr - KERNBASE); /* Exclude entries neexed in teh DMAP region, but not phys_avail */ if (efihdr != NULL) exclude_efi_map_entries(efihdr); arm_physmem_init_kernel_globals(); devmap_bootstrap(0, NULL); valid = bus_probe(); cninit(); if (!valid) panic("Invalid bus configuration: %s", kern_getenv("kern.cfg.order")); init_proc0(abp->kern_stack); msgbufinit(msgbufp, msgbufsize); mutex_init(); init_param2(physmem); dbg_init(); kdb_init(); pan_enable(); env = kern_getenv("kernelname"); if (env != NULL) strlcpy(kernelname, env, sizeof(kernelname)); if (boothowto & RB_VERBOSE) { print_efi_map_entries(efihdr); arm_physmem_print_tables(); } early_boot = 0; } void dbg_init(void) { /* Clear OS lock */ WRITE_SPECIALREG(OSLAR_EL1, 0); /* This permits DDB to use debug registers for watchpoints. */ dbg_monitor_init(); /* TODO: Eventually will need to initialize debug registers here. */ } #ifdef DDB #include DB_SHOW_COMMAND(specialregs, db_show_spregs) { #define PRINT_REG(reg) \ db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg)) PRINT_REG(actlr_el1); PRINT_REG(afsr0_el1); PRINT_REG(afsr1_el1); PRINT_REG(aidr_el1); PRINT_REG(amair_el1); PRINT_REG(ccsidr_el1); PRINT_REG(clidr_el1); PRINT_REG(contextidr_el1); PRINT_REG(cpacr_el1); PRINT_REG(csselr_el1); PRINT_REG(ctr_el0); PRINT_REG(currentel); PRINT_REG(daif); PRINT_REG(dczid_el0); PRINT_REG(elr_el1); PRINT_REG(esr_el1); PRINT_REG(far_el1); #if 0 /* ARM64TODO: Enable VFP before reading floating-point registers */ PRINT_REG(fpcr); PRINT_REG(fpsr); #endif PRINT_REG(id_aa64afr0_el1); PRINT_REG(id_aa64afr1_el1); PRINT_REG(id_aa64dfr0_el1); PRINT_REG(id_aa64dfr1_el1); PRINT_REG(id_aa64isar0_el1); PRINT_REG(id_aa64isar1_el1); PRINT_REG(id_aa64pfr0_el1); PRINT_REG(id_aa64pfr1_el1); PRINT_REG(id_afr0_el1); PRINT_REG(id_dfr0_el1); PRINT_REG(id_isar0_el1); PRINT_REG(id_isar1_el1); PRINT_REG(id_isar2_el1); PRINT_REG(id_isar3_el1); PRINT_REG(id_isar4_el1); PRINT_REG(id_isar5_el1); PRINT_REG(id_mmfr0_el1); PRINT_REG(id_mmfr1_el1); PRINT_REG(id_mmfr2_el1); PRINT_REG(id_mmfr3_el1); #if 0 /* Missing from llvm */ PRINT_REG(id_mmfr4_el1); #endif PRINT_REG(id_pfr0_el1); PRINT_REG(id_pfr1_el1); PRINT_REG(isr_el1); PRINT_REG(mair_el1); PRINT_REG(midr_el1); PRINT_REG(mpidr_el1); PRINT_REG(mvfr0_el1); PRINT_REG(mvfr1_el1); PRINT_REG(mvfr2_el1); PRINT_REG(revidr_el1); PRINT_REG(sctlr_el1); PRINT_REG(sp_el0); PRINT_REG(spsel); PRINT_REG(spsr_el1); PRINT_REG(tcr_el1); PRINT_REG(tpidr_el0); PRINT_REG(tpidr_el1); PRINT_REG(tpidrro_el0); PRINT_REG(ttbr0_el1); PRINT_REG(ttbr1_el1); PRINT_REG(vbar_el1); #undef PRINT_REG } DB_SHOW_COMMAND(vtop, db_show_vtop) { uint64_t phys; if (have_addr) { phys = arm64_address_translate_s1e1r(addr); db_printf("EL1 physical address reg (read): 0x%016lx\n", phys); phys = arm64_address_translate_s1e1w(addr); db_printf("EL1 physical address reg (write): 0x%016lx\n", phys); phys = arm64_address_translate_s1e0r(addr); db_printf("EL0 physical address reg (read): 0x%016lx\n", phys); phys = arm64_address_translate_s1e0w(addr); db_printf("EL0 physical address reg (write): 0x%016lx\n", phys); } else db_printf("show vtop \n"); } #endif Index: head/sys/arm64/arm64/undefined.c =================================================================== --- head/sys/arm64/arm64/undefined.c (revision 343003) +++ head/sys/arm64/arm64/undefined.c (revision 343004) @@ -1,142 +1,177 @@ /*- * Copyright (c) 2017 Andrew Turner * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237 * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include +#include #include +#include +#include +#include #include #include +#include MALLOC_DEFINE(M_UNDEF, "undefhandler", "Undefined instruction handler data"); struct undef_handler { LIST_ENTRY(undef_handler) uh_link; undef_handler_t uh_handler; }; /* * Create two undefined instruction handler lists, one for userspace, one for * the kernel. This allows us to handle instructions that will trap */ LIST_HEAD(, undef_handler) undef_handlers[2]; /* * Work around a bug in QEMU prior to 2.5.1 where reading unknown ID * registers would raise an exception when they should return 0. */ static int id_aa64mmfr2_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame, uint32_t esr) { int reg; #define MRS_ID_AA64MMFR2_EL0_MASK (MRS_MASK | 0x000fffe0) #define MRS_ID_AA64MMFR2_EL0_VALUE (MRS_VALUE | 0x00080740) /* mrs xn, id_aa64mfr2_el1 */ if ((insn & MRS_ID_AA64MMFR2_EL0_MASK) == MRS_ID_AA64MMFR2_EL0_VALUE) { reg = MRS_REGISTER(insn); frame->tf_elr += INSN_SIZE; if (reg < nitems(frame->tf_x)) { frame->tf_x[reg] = 0; } else if (reg == 30) { frame->tf_lr = 0; } /* If reg is 32 then write to xzr, i.e. do nothing */ return (1); } return (0); } +#ifdef COMPAT_FREEBSD32 +/* arm32 GDB breakpoints */ +#define GDB_BREAKPOINT 0xe6000011 +#define GDB5_BREAKPOINT 0xe7ffdefe +static int +gdb_trapper(vm_offset_t va, uint32_t insn, struct trapframe *frame, + uint32_t esr) +{ + struct thread *td = curthread; + + if (insn == GDB_BREAKPOINT || insn == GDB5_BREAKPOINT) { + if (SV_PROC_FLAG(td->td_proc, SV_ILP32) && + va < VM_MAXUSER_ADDRESS) { + ksiginfo_t ksi; + + ksiginfo_init_trap(&ksi); + ksi.ksi_signo = SIGTRAP; + ksi.ksi_code = TRAP_TRACE; + ksi.ksi_addr = (void *)va; + trapsignal(td, &ksi); + return 1; + } + } + return 0; +} +#endif + void undef_init(void) { LIST_INIT(&undef_handlers[0]); LIST_INIT(&undef_handlers[1]); install_undef_handler(false, id_aa64mmfr2_handler); +#ifdef COMPAT_FREEBSD32 + install_undef_handler(true, gdb_trapper); +#endif } void * install_undef_handler(bool user, undef_handler_t func) { struct undef_handler *uh; uh = malloc(sizeof(*uh), M_UNDEF, M_WAITOK); uh->uh_handler = func; LIST_INSERT_HEAD(&undef_handlers[user ? 0 : 1], uh, uh_link); return (uh); } void remove_undef_handler(void *handle) { struct undef_handler *uh; uh = handle; LIST_REMOVE(uh, uh_link); free(handle, M_UNDEF); } int undef_insn(u_int el, struct trapframe *frame) { struct undef_handler *uh; uint32_t insn; int ret; KASSERT(el < 2, ("Invalid exception level %u", el)); if (el == 0) { ret = fueword32((uint32_t *)frame->tf_elr, &insn); if (ret != 0) panic("Unable to read userspace faulting instruction"); } else { insn = *(uint32_t *)frame->tf_elr; } LIST_FOREACH(uh, &undef_handlers[el], uh_link) { ret = uh->uh_handler(frame->tf_elr, insn, frame, frame->tf_esr); if (ret) return (1); } return (0); } Index: head/sys/arm64/arm64/vm_machdep.c =================================================================== --- head/sys/arm64/arm64/vm_machdep.c (revision 343003) +++ head/sys/arm64/arm64/vm_machdep.c (revision 343004) @@ -1,273 +1,288 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #include /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) { struct pcb *pcb2; struct trapframe *tf; if ((flags & RFPROC) == 0) return; if (td1 == curthread) { /* * Save the tpidr_el0 and the vfp state, these normally happen * in cpu_switch, but if userland changes these then forks * this may not have happened. */ td1->td_pcb->pcb_tpidr_el0 = READ_SPECIALREG(tpidr_el0); + td1->td_pcb->pcb_tpidrro_el0 = READ_SPECIALREG(tpidrro_el0); #ifdef VFP if ((td1->td_pcb->pcb_fpflags & PCB_FP_STARTED) != 0) vfp_save_state(td1, td1->td_pcb); #endif } pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; td2->td_pcb = pcb2; bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); td2->td_proc->p_md.md_l0addr = vtophys(vmspace_pmap(td2->td_proc->p_vmspace)->pm_l0); tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1); bcopy(td1->td_frame, tf, sizeof(*tf)); tf->tf_x[0] = 0; tf->tf_x[1] = 0; - tf->tf_spsr = 0; + tf->tf_spsr = td1->td_frame->tf_spsr & PSR_M_32; td2->td_frame = tf; /* Set the return value registers for fork() */ td2->td_pcb->pcb_x[8] = (uintptr_t)fork_return; td2->td_pcb->pcb_x[9] = (uintptr_t)td2; td2->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline; td2->td_pcb->pcb_sp = (uintptr_t)td2->td_frame; td2->td_pcb->pcb_fpusaved = &td2->td_pcb->pcb_fpustate; td2->td_pcb->pcb_vfpcpu = UINT_MAX; /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_daif = td1->td_md.md_saved_daif & ~DAIF_I_MASKED; } void cpu_reset(void) { psci_reset(); printf("cpu_reset failed"); while(1) __asm volatile("wfi" ::: "memory"); } void cpu_thread_swapin(struct thread *td) { } void cpu_thread_swapout(struct thread *td) { } void cpu_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame; frame = td->td_frame; switch (error) { case 0: frame->tf_x[0] = td->td_retval[0]; frame->tf_x[1] = td->td_retval[1]; frame->tf_spsr &= ~PSR_C; /* carry bit */ break; case ERESTART: frame->tf_elr -= 4; break; case EJUSTRETURN: break; default: frame->tf_spsr |= PSR_C; /* carry bit */ frame->tf_x[0] = SV_ABI_ERRNO(td->td_proc, error); break; } } /* * Initialize machine state, mostly pcb and trap frame for a new * thread, about to return to userspace. Put enough state in the new * thread's PCB to get it to go back to the fork_return(), which * finalizes the thread state and handles peculiarities of the first * return to userspace for the new thread. */ void cpu_copy_thread(struct thread *td, struct thread *td0) { bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb)); td->td_pcb->pcb_x[8] = (uintptr_t)fork_return; td->td_pcb->pcb_x[9] = (uintptr_t)td; td->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline; td->td_pcb->pcb_sp = (uintptr_t)td->td_frame; td->td_pcb->pcb_fpusaved = &td->td_pcb->pcb_fpustate; td->td_pcb->pcb_vfpcpu = UINT_MAX; /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_daif = td0->td_md.md_saved_daif & ~DAIF_I_MASKED; } /* * Set that machine state for performing an upcall that starts * the entry function with the given argument. */ void cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf = td->td_frame; - tf->tf_sp = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size); + /* 32bits processes use r13 for sp */ + if (td->td_frame->tf_spsr & PSR_M_32) + tf->tf_x[13] = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size); + else + tf->tf_sp = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size); tf->tf_elr = (register_t)entry; tf->tf_x[0] = (register_t)arg; } int cpu_set_user_tls(struct thread *td, void *tls_base) { struct pcb *pcb; if ((uintptr_t)tls_base >= VM_MAXUSER_ADDRESS) return (EINVAL); pcb = td->td_pcb; - pcb->pcb_tpidr_el0 = (register_t)tls_base; - if (td == curthread) - WRITE_SPECIALREG(tpidr_el0, tls_base); + if (td->td_frame->tf_spsr & PSR_M_32) { + /* 32bits arm stores the user TLS into tpidrro */ + pcb->pcb_tpidrro_el0 = (register_t)tls_base; + pcb->pcb_tpidr_el0 = (register_t)tls_base; + if (td == curthread) { + WRITE_SPECIALREG(tpidrro_el0, tls_base); + WRITE_SPECIALREG(tpidr_el0, tls_base); + } + } else { + pcb->pcb_tpidr_el0 = (register_t)tls_base; + if (td == curthread) + WRITE_SPECIALREG(tpidr_el0, tls_base); + } return (0); } void cpu_thread_exit(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * PAGE_SIZE) - 1; td->td_frame = (struct trapframe *)STACKALIGN( (struct trapframe *)td->td_pcb - 1); } void cpu_thread_free(struct thread *td) { } void cpu_thread_clean(struct thread *td) { } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) { td->td_pcb->pcb_x[8] = (uintptr_t)func; td->td_pcb->pcb_x[9] = (uintptr_t)arg; td->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline; td->td_pcb->pcb_sp = (uintptr_t)td->td_frame; td->td_pcb->pcb_fpusaved = &td->td_pcb->pcb_fpustate; td->td_pcb->pcb_vfpcpu = UINT_MAX; } void cpu_exit(struct thread *td) { } void swi_vm(void *v) { if (busdma_swi_pending != 0) busdma_swi(); } Index: head/sys/arm64/conf/GENERIC =================================================================== --- head/sys/arm64/conf/GENERIC (revision 343003) +++ head/sys/arm64/conf/GENERIC (revision 343004) @@ -1,299 +1,299 @@ # # GENERIC -- Generic kernel configuration file for FreeBSD/arm64 # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu ARM64 ident GENERIC makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support options SCHED_ULE # ULE scheduler options PREEMPTION # Enable kernel thread preemption options VIMAGE # Subsystem virtualization, e.g. VNET options INET # InterNETworking options INET6 # IPv6 communications protocols options IPSEC # IP (v4/v6) security options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5 options TCP_HHOOK # hhook(9) framework for TCP options TCP_OFFLOAD # TCP offload options TCP_RFC7413 # TCP Fast Open options SCTP # Stream Control Transmission Protocol options FFS # Berkeley Fast Filesystem options SOFTUPDATES # Enable FFS soft updates support options UFS_ACL # Support for access control lists options UFS_DIRHASH # Improve performance on big directories options UFS_GJOURNAL # Enable gjournal-based UFS journaling options QUOTA # Enable disk quotas for UFS options MD_ROOT # MD is a potential root device options NFSCL # Network Filesystem Client options NFSD # Network Filesystem Server options NFSLOCKD # Network Lock Manager options NFS_ROOT # NFS usable as /, requires NFSCL options MSDOSFS # MSDOS Filesystem options CD9660 # ISO 9660 Filesystem options PROCFS # Process filesystem (requires PSEUDOFS) options PSEUDOFS # Pseudo-filesystem framework options GEOM_RAID # Soft RAID functionality. options GEOM_LABEL # Provides labelization -options COMPAT_FREEBSD32 # Incomplete, but used by cloudabi32.ko. +options COMPAT_FREEBSD32 # Compatible with FreeBSD/arm options COMPAT_FREEBSD11 # Compatible with FreeBSD11 options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI options KTRACE # ktrace(1) support options STACK # stack(9) support options SYSVSHM # SYSV-style shared memory options SYSVMSG # SYSV-style message queues options SYSVSEM # SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options KBD_INSTALL_CDEV # install a CDEV entry in /dev options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) options AUDIT # Security event auditing options CAPABILITY_MODE # Capsicum capability mode options CAPABILITIES # Capsicum capabilities options MAC # TrustedBSD MAC Framework options KDTRACE_FRAME # Ensure frames are compiled in options KDTRACE_HOOKS # Kernel DTrace hooks options VFP # Floating-point support options RACCT # Resource accounting framework options RACCT_DEFAULT_TO_DISABLED # Set kern.racct.enable=0 by default options RCTL # Resource limits options SMP options INTRNG # Debugging support. Always need this: options KDB # Enable kernel debugger support. options KDB_TRACE # Print a stack trace for a panic. # For full debugger support use (turn off in stable branch): options DDB # Support DDB. #options GDB # Support remote GDB. options DEADLKRES # Enable the deadlock resolver options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones options ALT_BREAK_TO_DEBUGGER # Enter debugger on keyboard escape sequence options USB_DEBUG # enable debug msgs options VERBOSE_SYSINIT=0 # Support debug.verbose_sysinit, off by default # Warning: KUBSAN can result in a kernel too large for loader to load #options KUBSAN # Kernel Undefined Behavior Sanitizer #options KCOV # Kernel Coverage Sanitizer # Kernel dump features. options EKCD # Support for encrypted kernel dumps options GZIO # gzip-compressed kernel and user dumps options ZSTDIO # zstd-compressed kernel and user dumps options NETDUMP # netdump(4) client support # SoC support options SOC_ALLWINNER_A64 options SOC_ALLWINNER_H5 options SOC_CAVM_THUNDERX options SOC_HISI_HI6220 options SOC_BRCM_BCM2837 options SOC_MARVELL_8K options SOC_ROCKCHIP_RK3328 options SOC_ROCKCHIP_RK3399 options SOC_XILINX_ZYNQ # Timer drivers device a10_timer # Annapurna Alpine drivers device al_ccu # Alpine Cache Coherency Unit device al_nb_service # Alpine North Bridge Service device al_iofic # I/O Fabric Interrupt Controller device al_serdes # Serializer/Deserializer device al_udma # Universal DMA # Qualcomm Snapdragon drivers device qcom_gcc # Global Clock Controller # VirtIO support device virtio device virtio_pci device virtio_mmio device virtio_blk device vtnet # CPU frequency control device cpufreq # Bus drivers device pci device al_pci # Annapurna Alpine PCI-E options PCI_HP # PCI-Express native HotPlug options PCI_IOV # PCI SR-IOV support # Ethernet NICs device mdio device mii device miibus # MII bus support device awg # Allwinner EMAC Gigabit Ethernet device axgbe # AMD Opteron A1100 integrated NIC device em # Intel PRO/1000 Gigabit Ethernet Family device ix # Intel 10Gb Ethernet Family device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet device neta # Marvell Armada 370/38x/XP/3700 NIC device smc # SMSC LAN91C111 device vnic # Cavium ThunderX NIC device al_eth # Annapurna Alpine Ethernet NIC device dwc_rk # Rockchip Designware # Block devices device ahci device scbus device da # ATA/SCSI peripherals device pass # Passthrough device (direct ATA/SCSI access) # MMC/SD/SDIO Card slot support device sdhci device sdhci_xenon # Marvell Xenon SD/MMC controller device aw_mmc # Allwinner SD/MMC controller device mmc # mmc/sd bus device mmcsd # mmc/sd flash cards device dwmmc # Serial (COM) ports device uart # Generic UART driver device uart_msm # Qualcomm MSM UART driver device uart_mu # RPI3 aux port device uart_mvebu # Armada 3700 UART driver device uart_ns8250 # ns8250-type UART driver device uart_snps device pl011 # USB support device aw_ehci # Allwinner EHCI USB interface (USB 2.0) device aw_usbphy # Allwinner USB PHY device dwcotg # DWC OTG controller device ohci # OHCI USB interface device ehci # EHCI USB interface (USB 2.0) device ehci_mv # Marvell EHCI USB interface device xhci # XHCI PCI->USB interface (USB 3.0) device xhci_mv # Marvell XHCI USB interface device usb # USB Bus (required) device ukbd # Keyboard device umass # Disks/Mass storage - Requires scbus and da # USB ethernet support device muge device smcphy device smsc # GPIO / PINCTRL device aw_gpio # Allwinner GPIO controller device gpio device gpioled device fdt_pinctrl device mv_gpio # Marvell GPIO controller device mvebu_pinctrl # Marvell Pinmux Controller # I2C device aw_rsb # Allwinner Reduced Serial Bus device bcm2835_bsc # Broadcom BCM283x I2C bus device iicbus device iic device twsi # Allwinner I2C controller device rk_i2c # RockChip I2C controller device syr827 # Silergy SYR827 PMIC # Clock and reset controllers device aw_ccu # Allwinner clock controller # Interrupt controllers device aw_nmi # Allwinner NMI support device mv_cp110_icu # Marvell CP110 ICU device mv_ap806_gicp # Marvell AP806 GICP # Real-time clock support device aw_rtc # Allwinner Real-time Clock device mv_rtc # Marvell Real-time Clock # Watchdog controllers device aw_wdog # Allwinner Watchdog # Power management controllers device axp81x # X-Powers AXP81x PMIC device rk805 # RockChip RK805 PMIC # EFUSE device aw_sid # Allwinner Secure ID EFUSE # Thermal sensors device aw_thermal # Allwinner Thermal Sensor Controller device mv_thermal # Marvell Thermal Sensor Controller # SPI device spibus device bcm2835_spi # Broadcom BCM283x SPI bus # PWM device pwm device aw_pwm # Console device vt device kbdmux device vt_efifb # EVDEV support device evdev # input event device support options EVDEV_SUPPORT # evdev support in legacy drivers device uinput # install /dev/uinput cdev # Pseudo devices. device crypto # core crypto support device loop # Network loopback device random # Entropy device device ether # Ethernet support device vlan # 802.1Q VLAN support device tun # Packet tunnel. device md # Memory "disks" device gif # IPv6 and IPv4 tunneling device firmware # firmware assist module options EFIRT # EFI Runtime Services # EXT_RESOURCES pseudo devices options EXT_RESOURCES device clk device phy device hwreset device nvmem device regulator device syscon device aw_syscon # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # Chip-specific errata options THUNDERX_PASS_1_1_ERRATA options FDT device acpi # DTBs makeoptions MODULES_EXTRA="dtb/allwinner dtb/rockchip" Index: head/sys/arm64/include/armreg.h =================================================================== --- head/sys/arm64/include/armreg.h (revision 343003) +++ head/sys/arm64/include/armreg.h (revision 343004) @@ -1,663 +1,667 @@ /*- * Copyright (c) 2013, 2014 Andrew Turner * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_ARMREG_H_ #define _MACHINE_ARMREG_H_ #define INSN_SIZE 4 #define READ_SPECIALREG(reg) \ ({ uint64_t val; \ __asm __volatile("mrs %0, " __STRING(reg) : "=&r" (val)); \ val; \ }) #define WRITE_SPECIALREG(reg, val) \ __asm __volatile("msr " __STRING(reg) ", %0" : : "r"((uint64_t)val)) /* CNTHCTL_EL2 - Counter-timer Hypervisor Control register */ #define CNTHCTL_EVNTI_MASK (0xf << 4) /* Bit to trigger event stream */ #define CNTHCTL_EVNTDIR (1 << 3) /* Control transition trigger bit */ #define CNTHCTL_EVNTEN (1 << 2) /* Enable event stream */ #define CNTHCTL_EL1PCEN (1 << 1) /* Allow EL0/1 physical timer access */ #define CNTHCTL_EL1PCTEN (1 << 0) /*Allow EL0/1 physical counter access*/ /* CPACR_EL1 */ #define CPACR_FPEN_MASK (0x3 << 20) #define CPACR_FPEN_TRAP_ALL1 (0x0 << 20) /* Traps from EL0 and EL1 */ #define CPACR_FPEN_TRAP_EL0 (0x1 << 20) /* Traps from EL0 */ #define CPACR_FPEN_TRAP_ALL2 (0x2 << 20) /* Traps from EL0 and EL1 */ #define CPACR_FPEN_TRAP_NONE (0x3 << 20) /* No traps */ #define CPACR_TTA (0x1 << 28) /* CTR_EL0 - Cache Type Register */ #define CTR_DLINE_SHIFT 16 #define CTR_DLINE_MASK (0xf << CTR_DLINE_SHIFT) #define CTR_DLINE_SIZE(reg) (((reg) & CTR_DLINE_MASK) >> CTR_DLINE_SHIFT) #define CTR_ILINE_SHIFT 0 #define CTR_ILINE_MASK (0xf << CTR_ILINE_SHIFT) #define CTR_ILINE_SIZE(reg) (((reg) & CTR_ILINE_MASK) >> CTR_ILINE_SHIFT) /* DAIF - Interrupt Mask Bits */ #define DAIF_D_MASKED (1 << 9) #define DAIF_A_MASKED (1 << 8) #define DAIF_I_MASKED (1 << 7) #define DAIF_F_MASKED (1 << 6) /* DCZID_EL0 - Data Cache Zero ID register */ #define DCZID_DZP (1 << 4) /* DC ZVA prohibited if non-0 */ #define DCZID_BS_SHIFT 0 #define DCZID_BS_MASK (0xf << DCZID_BS_SHIFT) #define DCZID_BS_SIZE(reg) (((reg) & DCZID_BS_MASK) >> DCZID_BS_SHIFT) /* ESR_ELx */ #define ESR_ELx_ISS_MASK 0x00ffffff #define ISS_INSN_FnV (0x01 << 10) #define ISS_INSN_EA (0x01 << 9) #define ISS_INSN_S1PTW (0x01 << 7) #define ISS_INSN_IFSC_MASK (0x1f << 0) #define ISS_DATA_ISV (0x01 << 24) #define ISS_DATA_SAS_MASK (0x03 << 22) #define ISS_DATA_SSE (0x01 << 21) #define ISS_DATA_SRT_MASK (0x1f << 16) #define ISS_DATA_SF (0x01 << 15) #define ISS_DATA_AR (0x01 << 14) #define ISS_DATA_FnV (0x01 << 10) #define ISS_DATa_EA (0x01 << 9) #define ISS_DATa_CM (0x01 << 8) #define ISS_INSN_S1PTW (0x01 << 7) #define ISS_DATa_WnR (0x01 << 6) #define ISS_DATA_DFSC_MASK (0x3f << 0) #define ISS_DATA_DFSC_ASF_L0 (0x00 << 0) #define ISS_DATA_DFSC_ASF_L1 (0x01 << 0) #define ISS_DATA_DFSC_ASF_L2 (0x02 << 0) #define ISS_DATA_DFSC_ASF_L3 (0x03 << 0) #define ISS_DATA_DFSC_TF_L0 (0x04 << 0) #define ISS_DATA_DFSC_TF_L1 (0x05 << 0) #define ISS_DATA_DFSC_TF_L2 (0x06 << 0) #define ISS_DATA_DFSC_TF_L3 (0x07 << 0) #define ISS_DATA_DFSC_AFF_L1 (0x09 << 0) #define ISS_DATA_DFSC_AFF_L2 (0x0a << 0) #define ISS_DATA_DFSC_AFF_L3 (0x0b << 0) #define ISS_DATA_DFSC_PF_L1 (0x0d << 0) #define ISS_DATA_DFSC_PF_L2 (0x0e << 0) #define ISS_DATA_DFSC_PF_L3 (0x0f << 0) #define ISS_DATA_DFSC_EXT (0x10 << 0) #define ISS_DATA_DFSC_EXT_L0 (0x14 << 0) #define ISS_DATA_DFSC_EXT_L1 (0x15 << 0) #define ISS_DATA_DFSC_EXT_L2 (0x16 << 0) #define ISS_DATA_DFSC_EXT_L3 (0x17 << 0) #define ISS_DATA_DFSC_ECC (0x18 << 0) #define ISS_DATA_DFSC_ECC_L0 (0x1c << 0) #define ISS_DATA_DFSC_ECC_L1 (0x1d << 0) #define ISS_DATA_DFSC_ECC_L2 (0x1e << 0) #define ISS_DATA_DFSC_ECC_L3 (0x1f << 0) #define ISS_DATA_DFSC_ALIGN (0x21 << 0) #define ISS_DATA_DFSC_TLB_CONFLICT (0x30 << 0) #define ESR_ELx_IL (0x01 << 25) #define ESR_ELx_EC_SHIFT 26 #define ESR_ELx_EC_MASK (0x3f << 26) #define ESR_ELx_EXCEPTION(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT) #define EXCP_UNKNOWN 0x00 /* Unkwn exception */ #define EXCP_FP_SIMD 0x07 /* VFP/SIMD trap */ #define EXCP_ILL_STATE 0x0e /* Illegal execution state */ #define EXCP_SVC32 0x11 /* SVC trap for AArch32 */ #define EXCP_SVC64 0x15 /* SVC trap for AArch64 */ #define EXCP_MSR 0x18 /* MSR/MRS trap */ #define EXCP_INSN_ABORT_L 0x20 /* Instruction abort, from lower EL */ #define EXCP_INSN_ABORT 0x21 /* Instruction abort, from same EL */ #define EXCP_PC_ALIGN 0x22 /* PC alignment fault */ #define EXCP_DATA_ABORT_L 0x24 /* Data abort, from lower EL */ #define EXCP_DATA_ABORT 0x25 /* Data abort, from same EL */ #define EXCP_SP_ALIGN 0x26 /* SP slignment fault */ #define EXCP_TRAP_FP 0x2c /* Trapped FP exception */ #define EXCP_SERROR 0x2f /* SError interrupt */ #define EXCP_SOFTSTP_EL0 0x32 /* Software Step, from lower EL */ #define EXCP_SOFTSTP_EL1 0x33 /* Software Step, from same EL */ #define EXCP_WATCHPT_EL1 0x35 /* Watchpoint, from same EL */ #define EXCP_BRK 0x3c /* Breakpoint */ /* ICC_CTLR_EL1 */ #define ICC_CTLR_EL1_EOIMODE (1U << 1) /* ICC_IAR1_EL1 */ #define ICC_IAR1_EL1_SPUR (0x03ff) /* ICC_IGRPEN0_EL1 */ #define ICC_IGRPEN0_EL1_EN (1U << 0) /* ICC_PMR_EL1 */ #define ICC_PMR_EL1_PRIO_MASK (0xFFUL) /* ICC_SGI1R_EL1 */ #define ICC_SGI1R_EL1_TL_MASK 0xffffUL #define ICC_SGI1R_EL1_AFF1_SHIFT 16 #define ICC_SGI1R_EL1_SGIID_SHIFT 24 #define ICC_SGI1R_EL1_AFF2_SHIFT 32 #define ICC_SGI1R_EL1_AFF3_SHIFT 48 #define ICC_SGI1R_EL1_SGIID_MASK 0xfUL #define ICC_SGI1R_EL1_IRM (0x1UL << 40) /* ICC_SRE_EL1 */ #define ICC_SRE_EL1_SRE (1U << 0) /* ICC_SRE_EL2 */ #define ICC_SRE_EL2_SRE (1U << 0) #define ICC_SRE_EL2_EN (1U << 3) /* ID_AA64DFR0_EL1 */ #define ID_AA64DFR0_MASK 0x0000000ff0f0fffful #define ID_AA64DFR0_DEBUG_VER_SHIFT 0 #define ID_AA64DFR0_DEBUG_VER_MASK (0xf << ID_AA64DFR0_DEBUG_VER_SHIFT) #define ID_AA64DFR0_DEBUG_VER(x) ((x) & ID_AA64DFR0_DEBUG_VER_MASK) #define ID_AA64DFR0_DEBUG_VER_8 (0x6 << ID_AA64DFR0_DEBUG_VER_SHIFT) #define ID_AA64DFR0_DEBUG_VER_8_VHE (0x7 << ID_AA64DFR0_DEBUG_VER_SHIFT) #define ID_AA64DFR0_DEBUG_VER_8_2 (0x8 << ID_AA64DFR0_DEBUG_VER_SHIFT) #define ID_AA64DFR0_TRACE_VER_SHIFT 4 #define ID_AA64DFR0_TRACE_VER_MASK (0xf << ID_AA64DFR0_TRACE_VER_SHIFT) #define ID_AA64DFR0_TRACE_VER(x) ((x) & ID_AA64DFR0_TRACE_VER_MASK) #define ID_AA64DFR0_TRACE_VER_NONE (0x0 << ID_AA64DFR0_TRACE_VER_SHIFT) #define ID_AA64DFR0_TRACE_VER_IMPL (0x1 << ID_AA64DFR0_TRACE_VER_SHIFT) #define ID_AA64DFR0_PMU_VER_SHIFT 8 #define ID_AA64DFR0_PMU_VER_MASK (0xf << ID_AA64DFR0_PMU_VER_SHIFT) #define ID_AA64DFR0_PMU_VER(x) ((x) & ID_AA64DFR0_PMU_VER_MASK) #define ID_AA64DFR0_PMU_VER_NONE (0x0 << ID_AA64DFR0_PMU_VER_SHIFT) #define ID_AA64DFR0_PMU_VER_3 (0x1 << ID_AA64DFR0_PMU_VER_SHIFT) #define ID_AA64DFR0_PMU_VER_3_1 (0x4 << ID_AA64DFR0_PMU_VER_SHIFT) #define ID_AA64DFR0_PMU_VER_IMPL (0xf << ID_AA64DFR0_PMU_VER_SHIFT) #define ID_AA64DFR0_BRPS_SHIFT 12 #define ID_AA64DFR0_BRPS_MASK (0xf << ID_AA64DFR0_BRPS_SHIFT) #define ID_AA64DFR0_BRPS(x) \ ((((x) >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) + 1) #define ID_AA64DFR0_WRPS_SHIFT 20 #define ID_AA64DFR0_WRPS_MASK (0xf << ID_AA64DFR0_WRPS_SHIFT) #define ID_AA64DFR0_WRPS(x) \ ((((x) >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) + 1) #define ID_AA64DFR0_CTX_CMPS_SHIFT 28 #define ID_AA64DFR0_CTX_CMPS_MASK (0xf << ID_AA64DFR0_CTX_CMPS_SHIFT) #define ID_AA64DFR0_CTX_CMPS(x) \ ((((x) >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) + 1) #define ID_AA64DFR0_PMS_VER_SHIFT 32 #define ID_AA64DFR0_PMS_VER_MASK (0xful << ID_AA64DFR0_PMS_VER_SHIFT) #define ID_AA64DFR0_PMS_VER(x) ((x) & ID_AA64DFR0_PMS_VER_MASK) #define ID_AA64DFR0_PMS_VER_NONE (0x0ul << ID_AA64DFR0_PMS_VER_SHIFT) #define ID_AA64DFR0_PMS_VER_V1 (0x1ul << ID_AA64DFR0_PMS_VER_SHIFT) /* ID_AA64ISAR0_EL1 */ #define ID_AA64ISAR0_MASK 0x0000fffff0fffff0ul #define ID_AA64ISAR0_AES_SHIFT 4 #define ID_AA64ISAR0_AES_MASK (0xf << ID_AA64ISAR0_AES_SHIFT) #define ID_AA64ISAR0_AES(x) ((x) & ID_AA64ISAR0_AES_MASK) #define ID_AA64ISAR0_AES_NONE (0x0 << ID_AA64ISAR0_AES_SHIFT) #define ID_AA64ISAR0_AES_BASE (0x1 << ID_AA64ISAR0_AES_SHIFT) #define ID_AA64ISAR0_AES_PMULL (0x2 << ID_AA64ISAR0_AES_SHIFT) #define ID_AA64ISAR0_SHA1_SHIFT 8 #define ID_AA64ISAR0_SHA1_MASK (0xf << ID_AA64ISAR0_SHA1_SHIFT) #define ID_AA64ISAR0_SHA1(x) ((x) & ID_AA64ISAR0_SHA1_MASK) #define ID_AA64ISAR0_SHA1_NONE (0x0 << ID_AA64ISAR0_SHA1_SHIFT) #define ID_AA64ISAR0_SHA1_BASE (0x1 << ID_AA64ISAR0_SHA1_SHIFT) #define ID_AA64ISAR0_SHA2_SHIFT 12 #define ID_AA64ISAR0_SHA2_MASK (0xf << ID_AA64ISAR0_SHA2_SHIFT) #define ID_AA64ISAR0_SHA2(x) ((x) & ID_AA64ISAR0_SHA2_MASK) #define ID_AA64ISAR0_SHA2_NONE (0x0 << ID_AA64ISAR0_SHA2_SHIFT) #define ID_AA64ISAR0_SHA2_BASE (0x1 << ID_AA64ISAR0_SHA2_SHIFT) #define ID_AA64ISAR0_SHA2_512 (0x2 << ID_AA64ISAR0_SHA2_SHIFT) #define ID_AA64ISAR0_CRC32_SHIFT 16 #define ID_AA64ISAR0_CRC32_MASK (0xf << ID_AA64ISAR0_CRC32_SHIFT) #define ID_AA64ISAR0_CRC32(x) ((x) & ID_AA64ISAR0_CRC32_MASK) #define ID_AA64ISAR0_CRC32_NONE (0x0 << ID_AA64ISAR0_CRC32_SHIFT) #define ID_AA64ISAR0_CRC32_BASE (0x1 << ID_AA64ISAR0_CRC32_SHIFT) #define ID_AA64ISAR0_ATOMIC_SHIFT 20 #define ID_AA64ISAR0_ATOMIC_MASK (0xf << ID_AA64ISAR0_ATOMIC_SHIFT) #define ID_AA64ISAR0_ATOMIC(x) ((x) & ID_AA64ISAR0_ATOMIC_MASK) #define ID_AA64ISAR0_ATOMIC_NONE (0x0 << ID_AA64ISAR0_ATOMIC_SHIFT) #define ID_AA64ISAR0_ATOMIC_IMPL (0x2 << ID_AA64ISAR0_ATOMIC_SHIFT) #define ID_AA64ISAR0_RDM_SHIFT 28 #define ID_AA64ISAR0_RDM_MASK (0xf << ID_AA64ISAR0_RDM_SHIFT) #define ID_AA64ISAR0_RDM(x) ((x) & ID_AA64ISAR0_RDM_MASK) #define ID_AA64ISAR0_RDM_NONE (0x0 << ID_AA64ISAR0_RDM_SHIFT) #define ID_AA64ISAR0_RDM_IMPL (0x1 << ID_AA64ISAR0_RDM_SHIFT) #define ID_AA64ISAR0_SHA3_SHIFT 32 #define ID_AA64ISAR0_SHA3_MASK (0xful << ID_AA64ISAR0_SHA3_SHIFT) #define ID_AA64ISAR0_SHA3(x) ((x) & ID_AA64ISAR0_SHA3_MASK) #define ID_AA64ISAR0_SHA3_NONE (0x0ul << ID_AA64ISAR0_SHA3_SHIFT) #define ID_AA64ISAR0_SHA3_IMPL (0x1ul << ID_AA64ISAR0_SHA3_SHIFT) #define ID_AA64ISAR0_SM3_SHIFT 36 #define ID_AA64ISAR0_SM3_MASK (0xful << ID_AA64ISAR0_SM3_SHIFT) #define ID_AA64ISAR0_SM3(x) ((x) & ID_AA64ISAR0_SM3_MASK) #define ID_AA64ISAR0_SM3_NONE (0x0ul << ID_AA64ISAR0_SM3_SHIFT) #define ID_AA64ISAR0_SM3_IMPL (0x1ul << ID_AA64ISAR0_SM3_SHIFT) #define ID_AA64ISAR0_SM4_SHIFT 40 #define ID_AA64ISAR0_SM4_MASK (0xful << ID_AA64ISAR0_SM4_SHIFT) #define ID_AA64ISAR0_SM4(x) ((x) & ID_AA64ISAR0_SM4_MASK) #define ID_AA64ISAR0_SM4_NONE (0x0ul << ID_AA64ISAR0_SM4_SHIFT) #define ID_AA64ISAR0_SM4_IMPL (0x1ul << ID_AA64ISAR0_SM4_SHIFT) #define ID_AA64ISAR0_DP_SHIFT 44 #define ID_AA64ISAR0_DP_MASK (0xful << ID_AA64ISAR0_DP_SHIFT) #define ID_AA64ISAR0_DP(x) ((x) & ID_AA64ISAR0_DP_MASK) #define ID_AA64ISAR0_DP_NONE (0x0ul << ID_AA64ISAR0_DP_SHIFT) #define ID_AA64ISAR0_DP_IMPL (0x1ul << ID_AA64ISAR0_DP_SHIFT) /* ID_AA64ISAR1_EL1 */ #define ID_AA64ISAR1_MASK 0xffffffff #define ID_AA64ISAR1_DPB_SHIFT 0 #define ID_AA64ISAR1_DPB_MASK (0xf << ID_AA64ISAR1_DPB_SHIFT) #define ID_AA64ISAR1_DPB(x) ((x) & ID_AA64ISAR1_DPB_MASK) #define ID_AA64ISAR1_DPB_NONE (0x0 << ID_AA64ISAR1_DPB_SHIFT) #define ID_AA64ISAR1_DPB_IMPL (0x1 << ID_AA64ISAR1_DPB_SHIFT) #define ID_AA64ISAR1_APA_SHIFT 4 #define ID_AA64ISAR1_APA_MASK (0xf << ID_AA64ISAR1_APA_SHIFT) #define ID_AA64ISAR1_APA(x) ((x) & ID_AA64ISAR1_APA_MASK) #define ID_AA64ISAR1_APA_NONE (0x0 << ID_AA64ISAR1_APA_SHIFT) #define ID_AA64ISAR1_APA_IMPL (0x1 << ID_AA64ISAR1_APA_SHIFT) #define ID_AA64ISAR1_API_SHIFT 8 #define ID_AA64ISAR1_API_MASK (0xf << ID_AA64ISAR1_API_SHIFT) #define ID_AA64ISAR1_API(x) ((x) & ID_AA64ISAR1_API_MASK) #define ID_AA64ISAR1_API_NONE (0x0 << ID_AA64ISAR1_API_SHIFT) #define ID_AA64ISAR1_API_IMPL (0x1 << ID_AA64ISAR1_API_SHIFT) #define ID_AA64ISAR1_JSCVT_SHIFT 12 #define ID_AA64ISAR1_JSCVT_MASK (0xf << ID_AA64ISAR1_JSCVT_SHIFT) #define ID_AA64ISAR1_JSCVT(x) ((x) & ID_AA64ISAR1_JSCVT_MASK) #define ID_AA64ISAR1_JSCVT_NONE (0x0 << ID_AA64ISAR1_JSCVT_SHIFT) #define ID_AA64ISAR1_JSCVT_IMPL (0x1 << ID_AA64ISAR1_JSCVT_SHIFT) #define ID_AA64ISAR1_FCMA_SHIFT 16 #define ID_AA64ISAR1_FCMA_MASK (0xf << ID_AA64ISAR1_FCMA_SHIFT) #define ID_AA64ISAR1_FCMA(x) ((x) & ID_AA64ISAR1_FCMA_MASK) #define ID_AA64ISAR1_FCMA_NONE (0x0 << ID_AA64ISAR1_FCMA_SHIFT) #define ID_AA64ISAR1_FCMA_IMPL (0x1 << ID_AA64ISAR1_FCMA_SHIFT) #define ID_AA64ISAR1_LRCPC_SHIFT 20 #define ID_AA64ISAR1_LRCPC_MASK (0xf << ID_AA64ISAR1_LRCPC_SHIFT) #define ID_AA64ISAR1_LRCPC(x) ((x) & ID_AA64ISAR1_LRCPC_MASK) #define ID_AA64ISAR1_LRCPC_NONE (0x0 << ID_AA64ISAR1_LRCPC_SHIFT) #define ID_AA64ISAR1_LRCPC_IMPL (0x1 << ID_AA64ISAR1_LRCPC_SHIFT) #define ID_AA64ISAR1_GPA_SHIFT 24 #define ID_AA64ISAR1_GPA_MASK (0xf << ID_AA64ISAR1_GPA_SHIFT) #define ID_AA64ISAR1_GPA(x) ((x) & ID_AA64ISAR1_GPA_MASK) #define ID_AA64ISAR1_GPA_NONE (0x0 << ID_AA64ISAR1_GPA_SHIFT) #define ID_AA64ISAR1_GPA_IMPL (0x1 << ID_AA64ISAR1_GPA_SHIFT) #define ID_AA64ISAR1_GPI_SHIFT 28 #define ID_AA64ISAR1_GPI_MASK (0xf << ID_AA64ISAR1_GPI_SHIFT) #define ID_AA64ISAR1_GPI(x) ((x) & ID_AA64ISAR1_GPI_MASK) #define ID_AA64ISAR1_GPI_NONE (0x0 << ID_AA64ISAR1_GPI_SHIFT) #define ID_AA64ISAR1_GPI_IMPL (0x1 << ID_AA64ISAR1_GPI_SHIFT) /* ID_AA64MMFR0_EL1 */ #define ID_AA64MMFR0_MASK 0xffffffff #define ID_AA64MMFR0_PA_RANGE_SHIFT 0 #define ID_AA64MMFR0_PA_RANGE_MASK (0xf << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE(x) ((x) & ID_AA64MMFR0_PA_RANGE_MASK) #define ID_AA64MMFR0_PA_RANGE_4G (0x0 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_64G (0x1 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_1T (0x2 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_4T (0x3 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_16T (0x4 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_256T (0x5 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_4P (0x6 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_ASID_BITS_SHIFT 4 #define ID_AA64MMFR0_ASID_BITS_MASK (0xf << ID_AA64MMFR0_ASID_BITS_SHIFT) #define ID_AA64MMFR0_ASID_BITS(x) ((x) & ID_AA64MMFR0_ASID_BITS_MASK) #define ID_AA64MMFR0_ASID_BITS_8 (0x0 << ID_AA64MMFR0_ASID_BITS_SHIFT) #define ID_AA64MMFR0_ASID_BITS_16 (0x2 << ID_AA64MMFR0_ASID_BITS_SHIFT) #define ID_AA64MMFR0_BIGEND_SHIFT 8 #define ID_AA64MMFR0_BIGEND_MASK (0xf << ID_AA64MMFR0_BIGEND_SHIFT) #define ID_AA64MMFR0_BIGEND(x) ((x) & ID_AA64MMFR0_BIGEND_MASK) #define ID_AA64MMFR0_BIGEND_FIXED (0x0 << ID_AA64MMFR0_BIGEND_SHIFT) #define ID_AA64MMFR0_BIGEND_MIXED (0x1 << ID_AA64MMFR0_BIGEND_SHIFT) #define ID_AA64MMFR0_S_NS_MEM_SHIFT 12 #define ID_AA64MMFR0_S_NS_MEM_MASK (0xf << ID_AA64MMFR0_S_NS_MEM_SHIFT) #define ID_AA64MMFR0_S_NS_MEM(x) ((x) & ID_AA64MMFR0_S_NS_MEM_MASK) #define ID_AA64MMFR0_S_NS_MEM_NONE (0x0 << ID_AA64MMFR0_S_NS_MEM_SHIFT) #define ID_AA64MMFR0_S_NS_MEM_DISTINCT (0x1 << ID_AA64MMFR0_S_NS_MEM_SHIFT) #define ID_AA64MMFR0_BIGEND_EL0_SHIFT 16 #define ID_AA64MMFR0_BIGEND_EL0_MASK (0xf << ID_AA64MMFR0_BIGEND_EL0_SHIFT) #define ID_AA64MMFR0_BIGEND_EL0(x) ((x) & ID_AA64MMFR0_BIGEND_EL0_MASK) #define ID_AA64MMFR0_BIGEND_EL0_FIXED (0x0 << ID_AA64MMFR0_BIGEND_EL0_SHIFT) #define ID_AA64MMFR0_BIGEND_EL0_MIXED (0x1 << ID_AA64MMFR0_BIGEND_EL0_SHIFT) #define ID_AA64MMFR0_TGRAN16_SHIFT 20 #define ID_AA64MMFR0_TGRAN16_MASK (0xf << ID_AA64MMFR0_TGRAN16_SHIFT) #define ID_AA64MMFR0_TGRAN16(x) ((x) & ID_AA64MMFR0_TGRAN16_MASK) #define ID_AA64MMFR0_TGRAN16_NONE (0x0 << ID_AA64MMFR0_TGRAN16_SHIFT) #define ID_AA64MMFR0_TGRAN16_IMPL (0x1 << ID_AA64MMFR0_TGRAN16_SHIFT) #define ID_AA64MMFR0_TGRAN64_SHIFT 24 #define ID_AA64MMFR0_TGRAN64_MASK (0xf << ID_AA64MMFR0_TGRAN64_SHIFT) #define ID_AA64MMFR0_TGRAN64(x) ((x) & ID_AA64MMFR0_TGRAN64_MASK) #define ID_AA64MMFR0_TGRAN64_IMPL (0x0 << ID_AA64MMFR0_TGRAN64_SHIFT) #define ID_AA64MMFR0_TGRAN64_NONE (0xf << ID_AA64MMFR0_TGRAN64_SHIFT) #define ID_AA64MMFR0_TGRAN4_SHIFT 28 #define ID_AA64MMFR0_TGRAN4_MASK (0xf << ID_AA64MMFR0_TGRAN4_SHIFT) #define ID_AA64MMFR0_TGRAN4(x) ((x) & ID_AA64MMFR0_TGRAN4_MASK) #define ID_AA64MMFR0_TGRAN4_IMPL (0x0 << ID_AA64MMFR0_TGRAN4_SHIFT) #define ID_AA64MMFR0_TGRAN4_NONE (0xf << ID_AA64MMFR0_TGRAN4_SHIFT) /* ID_AA64MMFR1_EL1 */ #define ID_AA64MMFR1_MASK 0xffffffff #define ID_AA64MMFR1_HAFDBS_SHIFT 0 #define ID_AA64MMFR1_HAFDBS_MASK (0xf << ID_AA64MMFR1_HAFDBS_SHIFT) #define ID_AA64MMFR1_HAFDBS(x) ((x) & ID_AA64MMFR1_HAFDBS_MASK) #define ID_AA64MMFR1_HAFDBS_NONE (0x0 << ID_AA64MMFR1_HAFDBS_SHIFT) #define ID_AA64MMFR1_HAFDBS_AF (0x1 << ID_AA64MMFR1_HAFDBS_SHIFT) #define ID_AA64MMFR1_HAFDBS_AF_DBS (0x2 << ID_AA64MMFR1_HAFDBS_SHIFT) #define ID_AA64MMFR1_VMIDBITS_SHIFT 4 #define ID_AA64MMFR1_VMIDBITS_MASK (0xf << ID_AA64MMFR1_VMIDBITS_SHIFT) #define ID_AA64MMFR1_VMIDBITS(x) ((x) & ID_AA64MMFR1_VMIDBITS_MASK) #define ID_AA64MMFR1_VMIDBITS_8 (0x0 << ID_AA64MMFR1_VMIDBITS_SHIFT) #define ID_AA64MMFR1_VMIDBITS_16 (0x2 << ID_AA64MMFR1_VMIDBITS_SHIFT) #define ID_AA64MMFR1_VH_SHIFT 8 #define ID_AA64MMFR1_VH_MASK (0xf << ID_AA64MMFR1_VH_SHIFT) #define ID_AA64MMFR1_VH(x) ((x) & ID_AA64MMFR1_VH_MASK) #define ID_AA64MMFR1_VH_NONE (0x0 << ID_AA64MMFR1_VH_SHIFT) #define ID_AA64MMFR1_VH_IMPL (0x1 << ID_AA64MMFR1_VH_SHIFT) #define ID_AA64MMFR1_HPDS_SHIFT 12 #define ID_AA64MMFR1_HPDS_MASK (0xf << ID_AA64MMFR1_HPDS_SHIFT) #define ID_AA64MMFR1_HPDS(x) ((x) & ID_AA64MMFR1_HPDS_MASK) #define ID_AA64MMFR1_HPDS_NONE (0x0 << ID_AA64MMFR1_HPDS_SHIFT) #define ID_AA64MMFR1_HPDS_HPD (0x1 << ID_AA64MMFR1_HPDS_SHIFT) #define ID_AA64MMFR1_HPDS_TTPBHA (0x2 << ID_AA64MMFR1_HPDS_SHIFT) #define ID_AA64MMFR1_LO_SHIFT 16 #define ID_AA64MMFR1_LO_MASK (0xf << ID_AA64MMFR1_LO_SHIFT) #define ID_AA64MMFR1_LO(x) ((x) & ID_AA64MMFR1_LO_MASK) #define ID_AA64MMFR1_LO_NONE (0x0 << ID_AA64MMFR1_LO_SHIFT) #define ID_AA64MMFR1_LO_IMPL (0x1 << ID_AA64MMFR1_LO_SHIFT) #define ID_AA64MMFR1_PAN_SHIFT 20 #define ID_AA64MMFR1_PAN_MASK (0xf << ID_AA64MMFR1_PAN_SHIFT) #define ID_AA64MMFR1_PAN(x) ((x) & ID_AA64MMFR1_PAN_MASK) #define ID_AA64MMFR1_PAN_NONE (0x0 << ID_AA64MMFR1_PAN_SHIFT) #define ID_AA64MMFR1_PAN_IMPL (0x1 << ID_AA64MMFR1_PAN_SHIFT) #define ID_AA64MMFR1_PAN_ATS1E1 (0x2 << ID_AA64MMFR1_PAN_SHIFT) #define ID_AA64MMFR1_SPEC_SEI_SHIFT 24 #define ID_AA64MMFR1_SPEC_SEI_MASK (0xf << ID_AA64MMFR1_SPEC_SEI_SHIFT) #define ID_AA64MMFR1_SPEC_SEI(x) ((x) & ID_AA64MMFR1_SPEC_SEI_MASK) #define ID_AA64MMFR1_SPEC_SEI_NONE (0x0 << ID_AA64MMFR1_SPEC_SEI_SHIFT) #define ID_AA64MMFR1_SPEC_SEI_IMPL (0x1 << ID_AA64MMFR1_SPEC_SEI_SHIFT) #define ID_AA64MMFR1_XNX_SHIFT 28 #define ID_AA64MMFR1_XNX_MASK (0xf << ID_AA64MMFR1_XNX_SHIFT) #define ID_AA64MMFR1_XNX(x) ((x) & ID_AA64MMFR1_XNX_MASK) #define ID_AA64MMFR1_XNX_NONE (0x0 << ID_AA64MMFR1_XNX_SHIFT) #define ID_AA64MMFR1_XNX_IMPL (0x1 << ID_AA64MMFR1_XNX_SHIFT) /* ID_AA64MMFR2_EL1 */ #define ID_AA64MMFR2_EL1 S3_0_C0_C7_2 #define ID_AA64MMFR2_MASK 0x0fffffff #define ID_AA64MMFR2_CNP_SHIFT 0 #define ID_AA64MMFR2_CNP_MASK (0xf << ID_AA64MMFR2_CNP_SHIFT) #define ID_AA64MMFR2_CNP(x) ((x) & ID_AA64MMFR2_CNP_MASK) #define ID_AA64MMFR2_CNP_NONE (0x0 << ID_AA64MMFR2_CNP_SHIFT) #define ID_AA64MMFR2_CNP_IMPL (0x1 << ID_AA64MMFR2_CNP_SHIFT) #define ID_AA64MMFR2_UAO_SHIFT 4 #define ID_AA64MMFR2_UAO_MASK (0xf << ID_AA64MMFR2_UAO_SHIFT) #define ID_AA64MMFR2_UAO(x) ((x) & ID_AA64MMFR2_UAO_MASK) #define ID_AA64MMFR2_UAO_NONE (0x0 << ID_AA64MMFR2_UAO_SHIFT) #define ID_AA64MMFR2_UAO_IMPL (0x1 << ID_AA64MMFR2_UAO_SHIFT) #define ID_AA64MMFR2_LSM_SHIFT 8 #define ID_AA64MMFR2_LSM_MASK (0xf << ID_AA64MMFR2_LSM_SHIFT) #define ID_AA64MMFR2_LSM(x) ((x) & ID_AA64MMFR2_LSM_MASK) #define ID_AA64MMFR2_LSM_NONE (0x0 << ID_AA64MMFR2_LSM_SHIFT) #define ID_AA64MMFR2_LSM_IMPL (0x1 << ID_AA64MMFR2_LSM_SHIFT) #define ID_AA64MMFR2_IESB_SHIFT 12 #define ID_AA64MMFR2_IESB_MASK (0xf << ID_AA64MMFR2_IESB_SHIFT) #define ID_AA64MMFR2_IESB(x) ((x) & ID_AA64MMFR2_IESB_MASK) #define ID_AA64MMFR2_IESB_NONE (0x0 << ID_AA64MMFR2_IESB_SHIFT) #define ID_AA64MMFR2_IESB_IMPL (0x1 << ID_AA64MMFR2_IESB_SHIFT) #define ID_AA64MMFR2_VA_RANGE_SHIFT 16 #define ID_AA64MMFR2_VA_RANGE_MASK (0xf << ID_AA64MMFR2_VA_RANGE_SHIFT) #define ID_AA64MMFR2_VA_RANGE(x) ((x) & ID_AA64MMFR2_VA_RANGE_MASK) #define ID_AA64MMFR2_VA_RANGE_48 (0x0 << ID_AA64MMFR2_VA_RANGE_SHIFT) #define ID_AA64MMFR2_VA_RANGE_52 (0x1 << ID_AA64MMFR2_VA_RANGE_SHIFT) #define ID_AA64MMFR2_CCIDX_SHIFT 20 #define ID_AA64MMFR2_CCIDX_MASK (0xf << ID_AA64MMFR2_CCIDX_SHIFT) #define ID_AA64MMFR2_CCIDX(x) ((x) & ID_AA64MMFR2_CCIDX_MASK) #define ID_AA64MMFR2_CCIDX_32 (0x0 << ID_AA64MMFR2_CCIDX_SHIFT) #define ID_AA64MMFR2_CCIDX_64 (0x1 << ID_AA64MMFR2_CCIDX_SHIFT) #define ID_AA64MMFR2_NV_SHIFT 24 #define ID_AA64MMFR2_NV_MASK (0xf << ID_AA64MMFR2_NV_SHIFT) #define ID_AA64MMFR2_NV(x) ((x) & ID_AA64MMFR2_NV_MASK) #define ID_AA64MMFR2_NV_NONE (0x0 << ID_AA64MMFR2_NV_SHIFT) #define ID_AA64MMFR2_NV_IMPL (0x1 << ID_AA64MMFR2_NV_SHIFT) /* ID_AA64PFR0_EL1 */ #define ID_AA64PFR0_MASK 0x0000000ffffffffful #define ID_AA64PFR0_EL0_SHIFT 0 #define ID_AA64PFR0_EL0_MASK (0xf << ID_AA64PFR0_EL0_SHIFT) #define ID_AA64PFR0_EL0(x) ((x) & ID_AA64PFR0_EL0_MASK) #define ID_AA64PFR0_EL0_64 (1 << ID_AA64PFR0_EL0_SHIFT) #define ID_AA64PFR0_EL0_64_32 (2 << ID_AA64PFR0_EL0_SHIFT) #define ID_AA64PFR0_EL1_SHIFT 4 #define ID_AA64PFR0_EL1_MASK (0xf << ID_AA64PFR0_EL1_SHIFT) #define ID_AA64PFR0_EL1(x) ((x) & ID_AA64PFR0_EL1_MASK) #define ID_AA64PFR0_EL1_64 (1 << ID_AA64PFR0_EL1_SHIFT) #define ID_AA64PFR0_EL1_64_32 (2 << ID_AA64PFR0_EL1_SHIFT) #define ID_AA64PFR0_EL2_SHIFT 8 #define ID_AA64PFR0_EL2_MASK (0xf << ID_AA64PFR0_EL2_SHIFT) #define ID_AA64PFR0_EL2(x) ((x) & ID_AA64PFR0_EL2_MASK) #define ID_AA64PFR0_EL2_NONE (0 << ID_AA64PFR0_EL2_SHIFT) #define ID_AA64PFR0_EL2_64 (1 << ID_AA64PFR0_EL2_SHIFT) #define ID_AA64PFR0_EL2_64_32 (2 << ID_AA64PFR0_EL2_SHIFT) #define ID_AA64PFR0_EL3_SHIFT 12 #define ID_AA64PFR0_EL3_MASK (0xf << ID_AA64PFR0_EL3_SHIFT) #define ID_AA64PFR0_EL3(x) ((x) & ID_AA64PFR0_EL3_MASK) #define ID_AA64PFR0_EL3_NONE (0 << ID_AA64PFR0_EL3_SHIFT) #define ID_AA64PFR0_EL3_64 (1 << ID_AA64PFR0_EL3_SHIFT) #define ID_AA64PFR0_EL3_64_32 (2 << ID_AA64PFR0_EL3_SHIFT) #define ID_AA64PFR0_FP_SHIFT 16 #define ID_AA64PFR0_FP_MASK (0xf << ID_AA64PFR0_FP_SHIFT) #define ID_AA64PFR0_FP(x) ((x) & ID_AA64PFR0_FP_MASK) #define ID_AA64PFR0_FP_IMPL (0x0 << ID_AA64PFR0_FP_SHIFT) #define ID_AA64PFR0_FP_HP (0x1 << ID_AA64PFR0_FP_SHIFT) #define ID_AA64PFR0_FP_NONE (0xf << ID_AA64PFR0_FP_SHIFT) #define ID_AA64PFR0_ADV_SIMD_SHIFT 20 #define ID_AA64PFR0_ADV_SIMD_MASK (0xf << ID_AA64PFR0_ADV_SIMD_SHIFT) #define ID_AA64PFR0_ADV_SIMD(x) ((x) & ID_AA64PFR0_ADV_SIMD_MASK) #define ID_AA64PFR0_ADV_SIMD_IMPL (0x0 << ID_AA64PFR0_ADV_SIMD_SHIFT) #define ID_AA64PFR0_ADV_SIMD_HP (0x1 << ID_AA64PFR0_ADV_SIMD_SHIFT) #define ID_AA64PFR0_ADV_SIMD_NONE (0xf << ID_AA64PFR0_ADV_SIMD_SHIFT) #define ID_AA64PFR0_GIC_BITS 0x4 /* Number of bits in GIC field */ #define ID_AA64PFR0_GIC_SHIFT 24 #define ID_AA64PFR0_GIC_MASK (0xf << ID_AA64PFR0_GIC_SHIFT) #define ID_AA64PFR0_GIC(x) ((x) & ID_AA64PFR0_GIC_MASK) #define ID_AA64PFR0_GIC_CPUIF_NONE (0x0 << ID_AA64PFR0_GIC_SHIFT) #define ID_AA64PFR0_GIC_CPUIF_EN (0x1 << ID_AA64PFR0_GIC_SHIFT) #define ID_AA64PFR0_RAS_SHIFT 28 #define ID_AA64PFR0_RAS_MASK (0xf << ID_AA64PFR0_RAS_SHIFT) #define ID_AA64PFR0_RAS(x) ((x) & ID_AA64PFR0_RAS_MASK) #define ID_AA64PFR0_RAS_NONE (0x0 << ID_AA64PFR0_RAS_SHIFT) #define ID_AA64PFR0_RAS_V1 (0x1 << ID_AA64PFR0_RAS_SHIFT) #define ID_AA64PFR0_SVE_SHIFT 32 #define ID_AA64PFR0_SVE_MASK (0xful << ID_AA64PFR0_SVE_SHIFT) #define ID_AA64PFR0_SVE(x) ((x) & ID_AA64PFR0_SVE_MASK) #define ID_AA64PFR0_SVE_NONE (0x0ul << ID_AA64PFR0_SVE_SHIFT) #define ID_AA64PFR0_SVE_IMPL (0x1ul << ID_AA64PFR0_SVE_SHIFT) /* MAIR_EL1 - Memory Attribute Indirection Register */ #define MAIR_ATTR_MASK(idx) (0xff << ((n)* 8)) #define MAIR_ATTR(attr, idx) ((attr) << ((idx) * 8)) #define MAIR_DEVICE_nGnRnE 0x00 #define MAIR_NORMAL_NC 0x44 #define MAIR_NORMAL_WT 0xbb #define MAIR_NORMAL_WB 0xff /* PAR_EL1 - Physical Address Register */ #define PAR_F_SHIFT 0 #define PAR_F (0x1 << PAR_F_SHIFT) #define PAR_SUCCESS(x) (((x) & PAR_F) == 0) /* When PAR_F == 0 (success) */ #define PAR_SH_SHIFT 7 #define PAR_SH_MASK (0x3 << PAR_SH_SHIFT) #define PAR_NS_SHIFT 9 #define PAR_NS_MASK (0x3 << PAR_NS_SHIFT) #define PAR_PA_SHIFT 12 #define PAR_PA_MASK 0x0000fffffffff000 #define PAR_ATTR_SHIFT 56 #define PAR_ATTR_MASK (0xff << PAR_ATTR_SHIFT) /* When PAR_F == 1 (aborted) */ #define PAR_FST_SHIFT 1 #define PAR_FST_MASK (0x3f << PAR_FST_SHIFT) #define PAR_PTW_SHIFT 8 #define PAR_PTW_MASK (0x1 << PAR_PTW_SHIFT) #define PAR_S_SHIFT 9 #define PAR_S_MASK (0x1 << PAR_S_SHIFT) /* SCTLR_EL1 - System Control Register */ #define SCTLR_RES0 0xc8222440 /* Reserved ARMv8.0, write 0 */ #define SCTLR_RES1 0x30d00800 /* Reserved ARMv8.0, write 1 */ #define SCTLR_M 0x00000001 #define SCTLR_A 0x00000002 #define SCTLR_C 0x00000004 #define SCTLR_SA 0x00000008 #define SCTLR_SA0 0x00000010 #define SCTLR_CP15BEN 0x00000020 /* Bit 6 is reserved */ #define SCTLR_ITD 0x00000080 #define SCTLR_SED 0x00000100 #define SCTLR_UMA 0x00000200 /* Bit 10 is reserved */ /* Bit 11 is reserved */ #define SCTLR_I 0x00001000 #define SCTLR_EnDB 0x00002000 /* ARMv8.3 */ #define SCTLR_DZE 0x00004000 #define SCTLR_UCT 0x00008000 #define SCTLR_nTWI 0x00010000 /* Bit 17 is reserved */ #define SCTLR_nTWE 0x00040000 #define SCTLR_WXN 0x00080000 /* Bit 20 is reserved */ #define SCTLR_IESB 0x00200000 /* ARMv8.2 */ /* Bit 22 is reserved */ #define SCTLR_SPAN 0x00800000 /* ARMv8.1 */ #define SCTLR_EOE 0x01000000 #define SCTLR_EE 0x02000000 #define SCTLR_UCI 0x04000000 #define SCTLR_EnDA 0x08000000 /* ARMv8.3 */ #define SCTLR_nTLSMD 0x10000000 /* ARMv8.2 */ #define SCTLR_LSMAOE 0x20000000 /* ARMv8.2 */ #define SCTLR_EnIB 0x40000000 /* ARMv8.3 */ #define SCTLR_EnIA 0x80000000 /* ARMv8.3 */ /* SPSR_EL1 */ /* * When the exception is taken in AArch64: * M[3:2] is the exception level * M[1] is unused * M[0] is the SP select: * 0: always SP0 * 1: current ELs SP */ #define PSR_M_EL0t 0x00000000 #define PSR_M_EL1t 0x00000004 #define PSR_M_EL1h 0x00000005 #define PSR_M_EL2t 0x00000008 #define PSR_M_EL2h 0x00000009 +#define PSR_M_64 0x00000000 +#define PSR_M_32 0x00000010 #define PSR_M_MASK 0x0000000f + +#define PSR_T 0x00000020 #define PSR_AARCH32 0x00000010 #define PSR_F 0x00000040 #define PSR_I 0x00000080 #define PSR_A 0x00000100 #define PSR_D 0x00000200 #define PSR_IL 0x00100000 #define PSR_SS 0x00200000 #define PSR_V 0x10000000 #define PSR_C 0x20000000 #define PSR_Z 0x40000000 #define PSR_N 0x80000000 #define PSR_FLAGS 0xf0000000 /* TCR_EL1 - Translation Control Register */ #define TCR_ASID_16 (1 << 36) #define TCR_IPS_SHIFT 32 #define TCR_IPS_32BIT (0 << TCR_IPS_SHIFT) #define TCR_IPS_36BIT (1 << TCR_IPS_SHIFT) #define TCR_IPS_40BIT (2 << TCR_IPS_SHIFT) #define TCR_IPS_42BIT (3 << TCR_IPS_SHIFT) #define TCR_IPS_44BIT (4 << TCR_IPS_SHIFT) #define TCR_IPS_48BIT (5 << TCR_IPS_SHIFT) #define TCR_TG1_SHIFT 30 #define TCR_TG1_16K (1 << TCR_TG1_SHIFT) #define TCR_TG1_4K (2 << TCR_TG1_SHIFT) #define TCR_TG1_64K (3 << TCR_TG1_SHIFT) #define TCR_SH1_SHIFT 28 #define TCR_SH1_IS (0x3UL << TCR_SH1_SHIFT) #define TCR_ORGN1_SHIFT 26 #define TCR_ORGN1_WBWA (0x1UL << TCR_ORGN1_SHIFT) #define TCR_IRGN1_SHIFT 24 #define TCR_IRGN1_WBWA (0x1UL << TCR_IRGN1_SHIFT) #define TCR_SH0_SHIFT 12 #define TCR_SH0_IS (0x3UL << TCR_SH0_SHIFT) #define TCR_ORGN0_SHIFT 10 #define TCR_ORGN0_WBWA (0x1UL << TCR_ORGN0_SHIFT) #define TCR_IRGN0_SHIFT 8 #define TCR_IRGN0_WBWA (0x1UL << TCR_IRGN0_SHIFT) #define TCR_CACHE_ATTRS ((TCR_IRGN0_WBWA | TCR_IRGN1_WBWA) |\ (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA)) #ifdef SMP #define TCR_SMP_ATTRS (TCR_SH0_IS | TCR_SH1_IS) #else #define TCR_SMP_ATTRS 0 #endif #define TCR_T1SZ_SHIFT 16 #define TCR_T0SZ_SHIFT 0 #define TCR_T1SZ(x) ((x) << TCR_T1SZ_SHIFT) #define TCR_T0SZ(x) ((x) << TCR_T0SZ_SHIFT) #define TCR_TxSZ(x) (TCR_T1SZ(x) | TCR_T0SZ(x)) /* Saved Program Status Register */ #define DBG_SPSR_SS (0x1 << 21) /* Monitor Debug System Control Register */ #define DBG_MDSCR_SS (0x1 << 0) #define DBG_MDSCR_KDE (0x1 << 13) #define DBG_MDSCR_MDE (0x1 << 15) /* Perfomance Monitoring Counters */ #define PMCR_E (1 << 0) /* Enable all counters */ #define PMCR_P (1 << 1) /* Reset all counters */ #define PMCR_C (1 << 2) /* Clock counter reset */ #define PMCR_D (1 << 3) /* CNTR counts every 64 clk cycles */ #define PMCR_X (1 << 4) /* Export to ext. monitoring (ETM) */ #define PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ #define PMCR_LC (1 << 6) /* Long cycle count enable */ #define PMCR_IMP_SHIFT 24 /* Implementer code */ #define PMCR_IMP_MASK (0xff << PMCR_IMP_SHIFT) #define PMCR_IDCODE_SHIFT 16 /* Identification code */ #define PMCR_IDCODE_MASK (0xff << PMCR_IDCODE_SHIFT) #define PMCR_IDCODE_CORTEX_A57 0x01 #define PMCR_IDCODE_CORTEX_A72 0x02 #define PMCR_IDCODE_CORTEX_A53 0x03 #define PMCR_N_SHIFT 11 /* Number of counters implemented */ #define PMCR_N_MASK (0x1f << PMCR_N_SHIFT) #endif /* !_MACHINE_ARMREG_H_ */ Index: head/sys/arm64/include/elf.h =================================================================== --- head/sys/arm64/include/elf.h (revision 343003) +++ head/sys/arm64/include/elf.h (revision 343004) @@ -1,86 +1,90 @@ /*- * Copyright (c) 1996-1997 John D. Polstra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_ELF_H_ #define _MACHINE_ELF_H_ /* * ELF definitions for the AArch64 architecture. */ #include /* Definitions common to all 32 bit architectures. */ #include /* Definitions common to all 64 bit architectures. */ #ifndef __ELF_WORD_SIZE #define __ELF_WORD_SIZE 64 /* Used by */ #endif #include /* * Auxiliary vector entries for passing information to the interpreter. */ typedef struct { /* Auxiliary vector entry on initial stack */ int a_type; /* Entry type. */ union { int a_val; /* Integer value. */ } a_un; } Elf32_Auxinfo; typedef struct { /* Auxiliary vector entry on initial stack */ long a_type; /* Entry type. */ union { long a_val; /* Integer value. */ void *a_ptr; /* Address. */ void (*a_fcn)(void); /* Function pointer (not used). */ } a_un; } Elf64_Auxinfo; __ElfType(Auxinfo); #define ELF_ARCH EM_AARCH64 #define ELF_MACHINE_OK(x) ((x) == (ELF_ARCH)) /* Define "machine" characteristics */ #if __ELF_WORD_SIZE == 64 #define ELF_TARG_CLASS ELFCLASS64 #define ELF_TARG_DATA ELFDATA2LSB #define ELF_TARG_MACH EM_AARCH64 #define ELF_TARG_VER 1 #else #define ELF_TARG_CLASS ELFCLASS32 #define ELF_TARG_DATA ELFDATA2LSB #define ELF_TARG_MACH EM_ARM #define ELF_TARG_VER 1 #endif +#if __ELF_WORD_SIZE == 32 +#define ET_DYN_LOAD_ADDR 0x12000 +#else #define ET_DYN_LOAD_ADDR 0x100000 +#endif #endif /* !_MACHINE_ELF_H_ */ Index: head/sys/arm64/include/frame.h =================================================================== --- head/sys/arm64/include/frame.h (revision 343003) +++ head/sys/arm64/include/frame.h (revision 343004) @@ -1,75 +1,83 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_FRAME_H_ #define _MACHINE_FRAME_H_ #ifndef LOCORE #include #include /* * NOTE: keep this structure in sync with struct reg and struct mcontext. */ struct trapframe { uint64_t tf_sp; uint64_t tf_lr; uint64_t tf_elr; uint32_t tf_spsr; uint32_t tf_esr; uint64_t tf_x[30]; }; struct arm64_frame { struct arm64_frame *f_frame; u_long f_retaddr; }; /* * Signal frame, pushed onto the user stack. */ struct sigframe { siginfo_t sf_si; /* actual saved siginfo */ ucontext_t sf_uc; /* actual saved ucontext */ }; /* * There is no fixed frame layout, other than to be 16-byte aligned. */ struct frame { int dummy; }; +#ifdef COMPAT_FREEBSD32 +struct sigframe32 { + struct siginfo32 sf_si; + ucontext32_t sf_uc; + mcontext32_vfp_t sf_vfp; +}; +#endif /* COMPAT_FREEBSD32 */ + #endif /* !LOCORE */ #endif /* !_MACHINE_FRAME_H_ */ Index: head/sys/arm64/include/reg.h =================================================================== --- head/sys/arm64/include/reg.h (revision 343003) +++ head/sys/arm64/include/reg.h (revision 343004) @@ -1,88 +1,92 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014-2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_REG_H_ #define _MACHINE_REG_H_ struct reg { uint64_t x[30]; uint64_t lr; uint64_t sp; uint64_t elr; uint32_t spsr; }; struct reg32 { - int dummy; + unsigned int r[13]; + unsigned int r_sp; + unsigned int r_lr; + unsigned int r_pc; + unsigned int r_cpsr; }; struct fpreg { __uint128_t fp_q[32]; uint32_t fp_sr; uint32_t fp_cr; }; struct fpreg32 { int dummy; }; struct dbreg { int dummy; }; struct dbreg32 { int dummy; }; #define __HAVE_REG32 #ifdef _KERNEL /* * XXX these interfaces are MI, so they should be declared in a MI place. */ int fill_regs(struct thread *, struct reg *); int set_regs(struct thread *, struct reg *); int fill_fpregs(struct thread *, struct fpreg *); int set_fpregs(struct thread *, struct fpreg *); int fill_dbregs(struct thread *, struct dbreg *); int set_dbregs(struct thread *, struct dbreg *); #ifdef COMPAT_FREEBSD32 int fill_regs32(struct thread *, struct reg32 *); int set_regs32(struct thread *, struct reg32 *); int fill_fpregs32(struct thread *, struct fpreg32 *); int set_fpregs32(struct thread *, struct fpreg32 *); int fill_dbregs32(struct thread *, struct dbreg32 *); int set_dbregs32(struct thread *, struct dbreg32 *); #endif #endif #endif /* !_MACHINE_REG_H_ */ Index: head/sys/arm64/include/ucontext.h =================================================================== --- head/sys/arm64/include/ucontext.h (revision 343003) +++ head/sys/arm64/include/ucontext.h (revision 343004) @@ -1,64 +1,90 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014-2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_UCONTEXT_H_ #define _MACHINE_UCONTEXT_H_ struct gpregs { __register_t gp_x[30]; __register_t gp_lr; __register_t gp_sp; __register_t gp_elr; __uint32_t gp_spsr; int gp_pad; }; struct fpregs { __uint128_t fp_q[32]; __uint32_t fp_sr; __uint32_t fp_cr; int fp_flags; int fp_pad; }; struct __mcontext { struct gpregs mc_gpregs; struct fpregs mc_fpregs; int mc_flags; #define _MC_FP_VALID 0x1 /* Set when mc_fpregs has valid data */ int mc_pad; /* Padding */ __uint64_t mc_spare[8]; /* Space for expansion, set to zero */ }; typedef struct __mcontext mcontext_t; +#ifdef COMPAT_FREEBSD32 +#include +typedef struct __mcontext32 { + uint32_t mc_gregset[17]; + uint32_t mc_vfp_size; + uint32_t mc_vfp_ptr; + uint32_t mc_spare[33]; +} mcontext32_t; + +typedef struct __ucontext32 { + sigset_t uc_sigmask; + mcontext32_t uc_mcontext; + u_int32_t uc_link; + struct sigaltstack32 uc_stack; + u_int32_t uc_flags; + u_int32_t __spare__[4]; +} ucontext32_t; + +typedef struct __mcontext32_vfp { + __uint64_t mcv_reg[32]; + __uint32_t mcv_fpscr; +} mcontext32_vfp_t; + +#endif /* COMPAT_FREEBSD32 */ + + #endif /* !_MACHINE_UCONTEXT_H_ */ Index: head/sys/arm64/include/vfp.h =================================================================== --- head/sys/arm64/include/vfp.h (revision 343003) +++ head/sys/arm64/include/vfp.h (revision 343004) @@ -1,72 +1,77 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_VFP_H_ #define _MACHINE_VFP_H_ #ifndef LOCORE struct vfpstate { __uint128_t vfp_regs[32]; uint32_t vfp_fpcr; uint32_t vfp_fpsr; }; #ifdef _KERNEL struct pcb; void vfp_init(void); void vfp_discard(struct thread *); void vfp_restore_state(void); void vfp_save_state(struct thread *, struct pcb *); struct fpu_kern_ctx; /* * Flags for fpu_kern_alloc_ctx(), fpu_kern_enter() and fpu_kern_thread(). */ #define FPU_KERN_NORMAL 0x0000 #define FPU_KERN_NOWAIT 0x0001 #define FPU_KERN_KTHR 0x0002 #define FPU_KERN_NOCTX 0x0004 struct fpu_kern_ctx *fpu_kern_alloc_ctx(u_int); void fpu_kern_free_ctx(struct fpu_kern_ctx *); void fpu_kern_enter(struct thread *, struct fpu_kern_ctx *, u_int); int fpu_kern_leave(struct thread *, struct fpu_kern_ctx *); int fpu_kern_thread(u_int); int is_fpu_kern_thread(u_int); +/* Convert to and from Aarch32 FPSCR to Aarch64 FPCR/FPSR */ +#define VFP_FPSCR_FROM_SRCR(vpsr, vpcr) ((vpsr) | ((vpcr) & 0x7c00000)) +#define VFP_FPSR_FROM_FPSCR(vpscr) ((vpscr) &~ 0x7c00000) +#define VFP_FPCR_FROM_FPSCR(vpsrc) ((vpsrc) & 0x7c00000) + #endif #endif #endif /* !_MACHINE_VFP_H_ */