Index: head/sys/arm/arm/db_interface.c =================================================================== --- head/sys/arm/arm/db_interface.c (revision 290272) +++ head/sys/arm/arm/db_interface.c (revision 290273) @@ -1,384 +1,326 @@ /* $NetBSD: db_interface.c,v 1.33 2003/08/25 04:51:10 mrg Exp $ */ /*- * Copyright (c) 1996 Scott K. Stevens * * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * * From: db_interface.c,v 2.4 1991/02/05 17:11:13 mrt (CMU) */ /* * Interface to new debugger. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include /* just for boothowto */ #include #ifdef KDB #include #endif #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include static int nil = 0; int db_access_und_sp (struct db_variable *, db_expr_t *, int); int db_access_abt_sp (struct db_variable *, db_expr_t *, int); int db_access_irq_sp (struct db_variable *, db_expr_t *, int); static db_varfcn_t db_frame; #define DB_OFFSET(x) (db_expr_t *)offsetof(struct trapframe, x) struct db_variable db_regs[] = { { "spsr", DB_OFFSET(tf_spsr), db_frame }, { "r0", DB_OFFSET(tf_r0), db_frame }, { "r1", DB_OFFSET(tf_r1), db_frame }, { "r2", DB_OFFSET(tf_r2), db_frame }, { "r3", DB_OFFSET(tf_r3), db_frame }, { "r4", DB_OFFSET(tf_r4), db_frame }, { "r5", DB_OFFSET(tf_r5), db_frame }, { "r6", DB_OFFSET(tf_r6), db_frame }, { "r7", DB_OFFSET(tf_r7), db_frame }, { "r8", DB_OFFSET(tf_r8), db_frame }, { "r9", DB_OFFSET(tf_r9), db_frame }, { "r10", DB_OFFSET(tf_r10), db_frame }, { "r11", DB_OFFSET(tf_r11), db_frame }, { "r12", DB_OFFSET(tf_r12), db_frame }, { "usr_sp", DB_OFFSET(tf_usr_sp), db_frame }, { "usr_lr", DB_OFFSET(tf_usr_lr), db_frame }, { "svc_sp", DB_OFFSET(tf_svc_sp), db_frame }, { "svc_lr", DB_OFFSET(tf_svc_lr), db_frame }, { "pc", DB_OFFSET(tf_pc), db_frame }, { "und_sp", &nil, db_access_und_sp, }, { "abt_sp", &nil, db_access_abt_sp, }, { "irq_sp", &nil, db_access_irq_sp, }, }; struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]); int db_access_und_sp(struct db_variable *vp, db_expr_t *valp, int rw) { if (rw == DB_VAR_GET) { *valp = get_stackptr(PSR_UND32_MODE); return (1); } return (0); } int db_access_abt_sp(struct db_variable *vp, db_expr_t *valp, int rw) { if (rw == DB_VAR_GET) { *valp = get_stackptr(PSR_ABT32_MODE); return (1); } return (0); } int db_access_irq_sp(struct db_variable *vp, db_expr_t *valp, int rw) { if (rw == DB_VAR_GET) { *valp = get_stackptr(PSR_IRQ32_MODE); return (1); } return (0); } int db_frame(struct db_variable *vp, db_expr_t *valp, int rw) { int *reg; if (kdb_frame == NULL) return (0); reg = (int *)((uintptr_t)kdb_frame + (db_expr_t)vp->valuep); if (rw == DB_VAR_GET) *valp = *reg; else *reg = *valp; return (1); } void db_show_mdpcpu(struct pcpu *pc) { } int db_validate_address(vm_offset_t addr) { struct proc *p = curproc; struct pmap *pmap; if (!p || !p->p_vmspace || !p->p_vmspace->vm_map.pmap || #ifndef ARM32_NEW_VM_LAYOUT addr >= VM_MAXUSER_ADDRESS #else addr >= VM_MIN_KERNEL_ADDRESS #endif ) pmap = pmap_kernel(); else pmap = p->p_vmspace->vm_map.pmap; return (pmap_extract(pmap, addr) == FALSE); } /* * Read bytes from kernel address space for debugger. */ int db_read_bytes(addr, size, data) vm_offset_t addr; size_t size; char *data; { char *src = (char *)addr; if (db_validate_address((u_int)src)) { db_printf("address %p is invalid\n", src); return (-1); } if (size == 4 && (addr & 3) == 0 && ((uintptr_t)data & 3) == 0) { *((int*)data) = *((int*)src); return (0); } if (size == 2 && (addr & 1) == 0 && ((uintptr_t)data & 1) == 0) { *((short*)data) = *((short*)src); return (0); } while (size-- > 0) { if (db_validate_address((u_int)src)) { db_printf("address %p is invalid\n", src); return (-1); } *data++ = *src++; } return (0); } /* * Write bytes to kernel address space for debugger. */ int db_write_bytes(vm_offset_t addr, size_t size, char *data) { char *dst; size_t loop; dst = (char *)addr; if (db_validate_address((u_int)dst)) { db_printf("address %p is invalid\n", dst); return (0); } if (size == 4 && (addr & 3) == 0 && ((uintptr_t)data & 3) == 0) *((int*)dst) = *((int*)data); else if (size == 2 && (addr & 1) == 0 && ((uintptr_t)data & 1) == 0) *((short*)dst) = *((short*)data); else { loop = size; while (loop-- > 0) { if (db_validate_address((u_int)dst)) { db_printf("address %p is invalid\n", dst); return (-1); } *dst++ = *data++; } } /* make sure the caches and memory are in sync */ cpu_icache_sync_range(addr, size); /* In case the current page tables have been modified ... */ cpu_tlb_flushID(); cpu_cpwait(); return (0); } static u_int db_fetch_reg(int reg) { switch (reg) { case 0: return (kdb_frame->tf_r0); case 1: return (kdb_frame->tf_r1); case 2: return (kdb_frame->tf_r2); case 3: return (kdb_frame->tf_r3); case 4: return (kdb_frame->tf_r4); case 5: return (kdb_frame->tf_r5); case 6: return (kdb_frame->tf_r6); case 7: return (kdb_frame->tf_r7); case 8: return (kdb_frame->tf_r8); case 9: return (kdb_frame->tf_r9); case 10: return (kdb_frame->tf_r10); case 11: return (kdb_frame->tf_r11); case 12: return (kdb_frame->tf_r12); case 13: return (kdb_frame->tf_svc_sp); case 14: return (kdb_frame->tf_svc_lr); case 15: return (kdb_frame->tf_pc); default: panic("db_fetch_reg: botch"); } } +static u_int +db_branch_taken_read_int(void *cookie __unused, vm_offset_t offset, u_int *val) +{ + u_int ret; + + db_read_bytes(offset, 4, (char *)&ret); + *val = ret; + + return (0); +} + +static u_int +db_branch_taken_fetch_reg(void *cookie __unused, int reg) +{ + + return (db_fetch_reg(reg)); +} + u_int branch_taken(u_int insn, db_addr_t pc) { - u_int addr, nregs, offset = 0; + register_t new_pc; + int ret; - switch ((insn >> 24) & 0xf) { - case 0x2: /* add pc, reg1, #value */ - case 0x0: /* add pc, reg1, reg2, lsl #offset */ - addr = db_fetch_reg((insn >> 16) & 0xf); - if (((insn >> 16) & 0xf) == 15) - addr += 8; - if (insn & 0x0200000) { - offset = (insn >> 7) & 0x1e; - offset = (insn & 0xff) << (32 - offset) | - (insn & 0xff) >> offset; - } else { + ret = arm_predict_branch(NULL, insn, (register_t)pc, &new_pc, + db_branch_taken_fetch_reg, db_branch_taken_read_int); - offset = db_fetch_reg(insn & 0x0f); - if ((insn & 0x0000ff0) != 0x00000000) { - if (insn & 0x10) - nregs = db_fetch_reg((insn >> 8) & 0xf); - else - nregs = (insn >> 7) & 0x1f; - switch ((insn >> 5) & 3) { - case 0: - /* lsl */ - offset = offset << nregs; - break; - case 1: - /* lsr */ - offset = offset >> nregs; - break; - default: - break; /* XXX */ - } - } - return (addr + offset); - } - case 0xa: /* b ... */ - case 0xb: /* bl ... */ - addr = ((insn << 2) & 0x03ffffff); - if (addr & 0x02000000) - addr |= 0xfc000000; - return (pc + 8 + addr); - case 0x7: /* ldr pc, [pc, reg, lsl #2] */ - addr = db_fetch_reg(insn & 0xf); - addr = pc + 8 + (addr << 2); - db_read_bytes(addr, 4, (char *)&addr); - return (addr); - case 0x1: /* mov pc, reg */ - addr = db_fetch_reg(insn & 0xf); - return (addr); - case 0x4: - case 0x5: /* ldr pc, [reg] */ - addr = db_fetch_reg((insn >> 16) & 0xf); - /* ldr pc, [reg, #offset] */ - if (insn & (1 << 24)) - offset = insn & 0xfff; - if (insn & 0x00800000) - addr += offset; - else - addr -= offset; - db_read_bytes(addr, 4, (char *)&addr); - return (addr); - case 0x8: /* ldmxx reg, {..., pc} */ - case 0x9: - addr = db_fetch_reg((insn >> 16) & 0xf); - nregs = (insn & 0x5555) + ((insn >> 1) & 0x5555); - nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333); - nregs = (nregs + (nregs >> 4)) & 0x0f0f; - nregs = (nregs + (nregs >> 8)) & 0x001f; - switch ((insn >> 23) & 0x3) { - case 0x0: /* ldmda */ - addr = addr - 0; - break; - case 0x1: /* ldmia */ - addr = addr + 0 + ((nregs - 1) << 2); - break; - case 0x2: /* ldmdb */ - addr = addr - 4; - break; - case 0x3: /* ldmib */ - addr = addr + 4 + ((nregs - 1) << 2); - break; - } - db_read_bytes(addr, 4, (char *)&addr); - return (addr); - default: - panic("branch_taken: botch"); - } + if (ret != 0) + kdb_reenter(); + + return (new_pc); } Index: head/sys/arm/arm/machdep.c =================================================================== --- head/sys/arm/arm/machdep.c (revision 290272) +++ head/sys/arm/arm/machdep.c (revision 290273) @@ -1,1698 +1,1912 @@ /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */ /*- * Copyright (c) 2004 Olivier Houchard * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Machine dependant functions for kernel setup * * Created : 17/09/94 * Updated : 18/04/01 updated for new wscons */ #include "opt_compat.h" #include "opt_ddb.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include "opt_sched.h" #include "opt_timer.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif #ifdef DDB #include #if __ARM_ARCH >= 6 #include DB_SHOW_COMMAND(cp15, db_show_cp15) { u_int reg; reg = cp15_midr_get(); db_printf("Cpu ID: 0x%08x\n", reg); reg = cp15_ctr_get(); db_printf("Current Cache Lvl ID: 0x%08x\n",reg); reg = cp15_sctlr_get(); db_printf("Ctrl: 0x%08x\n",reg); reg = cp15_actlr_get(); db_printf("Aux Ctrl: 0x%08x\n",reg); reg = cp15_id_pfr0_get(); db_printf("Processor Feat 0: 0x%08x\n", reg); reg = cp15_id_pfr1_get(); db_printf("Processor Feat 1: 0x%08x\n", reg); reg = cp15_id_dfr0_get(); db_printf("Debug Feat 0: 0x%08x\n", reg); reg = cp15_id_afr0_get(); db_printf("Auxiliary Feat 0: 0x%08x\n", reg); reg = cp15_id_mmfr0_get(); db_printf("Memory Model Feat 0: 0x%08x\n", reg); reg = cp15_id_mmfr1_get(); db_printf("Memory Model Feat 1: 0x%08x\n", reg); reg = cp15_id_mmfr2_get(); db_printf("Memory Model Feat 2: 0x%08x\n", reg); reg = cp15_id_mmfr3_get(); db_printf("Memory Model Feat 3: 0x%08x\n", reg); reg = cp15_ttbr_get(); db_printf("TTB0: 0x%08x\n", reg); } DB_SHOW_COMMAND(vtop, db_show_vtop) { u_int reg; if (have_addr) { cp15_ats1cpr_set(addr); reg = cp15_par_get(); db_printf("Physical address reg: 0x%08x\n",reg); } else db_printf("show vtop \n"); } #endif /* __ARM_ARCH >= 6 */ #endif /* DDB */ #ifdef DEBUG #define debugf(fmt, args...) printf(fmt, ##args) #else #define debugf(fmt, args...) #endif struct pcpu __pcpu[MAXCPU]; struct pcpu *pcpup = &__pcpu[0]; static struct trapframe proc0_tf; uint32_t cpu_reset_address = 0; int cold = 1; vm_offset_t vector_page; int (*_arm_memcpy)(void *, void *, int, int) = NULL; int (*_arm_bzero)(void *, int, int) = NULL; int _min_memcpy_size = 0; int _min_bzero_size = 0; extern int *end; #ifdef FDT vm_paddr_t pmap_pa; #ifdef ARM_NEW_PMAP vm_offset_t systempage; vm_offset_t irqstack; vm_offset_t undstack; vm_offset_t abtstack; #else /* * This is the number of L2 page tables required for covering max * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, * stacks etc.), uprounded to be divisible by 4. */ #define KERNEL_PT_MAX 78 static struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; struct pv_addr systempage; static struct pv_addr msgbufpv; struct pv_addr irqstack; struct pv_addr undstack; struct pv_addr abtstack; static struct pv_addr kernelstack; #endif #endif #if defined(LINUX_BOOT_ABI) #define LBABI_MAX_BANKS 10 uint32_t board_id; struct arm_lbabi_tag *atag_list; char linux_command_line[LBABI_MAX_COMMAND_LINE + 1]; char atags[LBABI_MAX_COMMAND_LINE * 2]; uint32_t memstart[LBABI_MAX_BANKS]; uint32_t memsize[LBABI_MAX_BANKS]; uint32_t membanks; #endif static uint32_t board_revision; /* hex representation of uint64_t */ static char board_serial[32]; SYSCTL_NODE(_hw, OID_AUTO, board, CTLFLAG_RD, 0, "Board attributes"); SYSCTL_UINT(_hw_board, OID_AUTO, revision, CTLFLAG_RD, &board_revision, 0, "Board revision"); SYSCTL_STRING(_hw_board, OID_AUTO, serial, CTLFLAG_RD, board_serial, 0, "Board serial"); int vfp_exists; SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD, &vfp_exists, 0, "Floating point support enabled"); void board_set_serial(uint64_t serial) { snprintf(board_serial, sizeof(board_serial)-1, "%016jx", serial); } void board_set_revision(uint32_t revision) { board_revision = revision; } void sendsig(catcher, ksi, mask) sig_t catcher; ksiginfo_t *ksi; sigset_t *mask; { struct thread *td; struct proc *p; struct trapframe *tf; struct sigframe *fp, frame; struct sigacts *psp; int onstack; int sig; int code; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; code = ksi->ksi_code; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_usr_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else fp = (struct sigframe *)td->td_frame->tf_usr_sp; /* make room on the stack */ fp--; /* make the stack aligned */ fp = (struct sigframe *)STACKALIGN(fp); /* Populate the siginfo frame. */ get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK ) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack = td->td_sigstk; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } /* * Build context to run handler in. We invoke the handler * directly, only returning via the trampoline. Note the * trampoline version numbers are coordinated with machine- * dependent code in libc. */ tf->tf_r0 = sig; tf->tf_r1 = (register_t)&fp->sf_si; tf->tf_r2 = (register_t)&fp->sf_uc; /* the trampoline uses r5 as the uc address */ tf->tf_r5 = (register_t)&fp->sf_uc; tf->tf_pc = (register_t)catcher; tf->tf_usr_sp = (register_t)fp; tf->tf_usr_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode)); /* Set the mode to enter in the signal handler */ #if __ARM_ARCH >= 7 if ((register_t)catcher & 1) tf->tf_spsr |= PSR_T; else tf->tf_spsr &= ~PSR_T; #endif CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr, tf->tf_usr_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } struct kva_md_info kmi; /* * arm32_vector_init: * * Initialize the vector page, and select whether or not to * relocate the vectors. * * NOTE: We expect the vector page to be mapped at its expected * destination. */ extern unsigned int page0[], page0_data[]; void arm_vector_init(vm_offset_t va, int which) { unsigned int *vectors = (int *) va; unsigned int *vectors_data = vectors + (page0_data - page0); int vec; /* * Loop through the vectors we're taking over, and copy the * vector's insn and data word. */ for (vec = 0; vec < ARM_NVEC; vec++) { if ((which & (1 << vec)) == 0) { /* Don't want to take over this vector. */ continue; } vectors[vec] = page0[vec]; vectors_data[vec] = page0_data[vec]; } /* Now sync the vectors. */ cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int)); vector_page = va; if (va == ARM_VECTORS_HIGH) { /* * Assume the MD caller knows what it's doing here, and * really does want the vector page relocated. * * Note: This has to be done here (and not just in * cpu_setup()) because the vector page needs to be * accessible *before* cpu_startup() is called. * Think ddb(9) ... * * NOTE: If the CPU control register is not readable, * this will totally fail! We'll just assume that * any system that has high vector support has a * readable CPU control register, for now. If we * ever encounter one that does not, we'll have to * rethink this. */ cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); } } static void cpu_startup(void *dummy) { struct pcb *pcb = thread0.td_pcb; const unsigned int mbyte = 1024 * 1024; #ifdef ARM_TP_ADDRESS #ifndef ARM_CACHE_LOCK_ENABLE vm_page_t m; #endif #endif identify_arm_cpu(); vm_ksubmap_init(&kmi); /* * Display the RAM layout. */ printf("real memory = %ju (%ju MB)\n", (uintmax_t)arm32_ptob(realmem), (uintmax_t)arm32_ptob(realmem) / mbyte); printf("avail memory = %ju (%ju MB)\n", (uintmax_t)arm32_ptob(vm_cnt.v_free_count), (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte); if (bootverbose) { arm_physmem_print_tables(); arm_devmap_print_table(); } bufinit(); vm_pager_bufferinit(); pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack + USPACE_SVC_STACK_TOP; pmap_set_pcb_pagedir(pmap_kernel(), pcb); #ifndef ARM_NEW_PMAP vector_page_setprot(VM_PROT_READ); pmap_postinit(); #endif #ifdef ARM_TP_ADDRESS #ifdef ARM_CACHE_LOCK_ENABLE pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS); arm_lock_cache_line(ARM_TP_ADDRESS); #else m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO); pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m)); #endif *(uint32_t *)ARM_RAS_START = 0; *(uint32_t *)ARM_RAS_END = 0xffffffff; #endif } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { cpu_dcache_wb_range((uintptr_t)ptr, len); #ifdef ARM_L2_PIPT cpu_l2cache_wb_range((uintptr_t)vtophys(ptr), len); #else cpu_l2cache_wb_range((uintptr_t)ptr, len); #endif } /* Get current clock frequency for the given cpu id. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { return (ENXIO); } void cpu_idle(int busy) { CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu); spinlock_enter(); #ifndef NO_EVENTTIMERS if (!busy) cpu_idleclock(); #endif if (!sched_runnable()) cpu_sleep(0); #ifndef NO_EVENTTIMERS if (!busy) cpu_activeclock(); #endif spinlock_exit(); CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu); } int cpu_idle_wakeup(int cpu) { return (0); } /* * Most ARM platforms don't need to do anything special to init their clocks * (they get intialized during normal device attachment), and by not defining a * cpu_initclocks() function they get this generic one. Any platform that needs * to do something special can just provide their own implementation, which will * override this one due to the weak linkage. */ void arm_generic_initclocks(void) { #ifndef NO_EVENTTIMERS #ifdef SMP if (PCPU_GET(cpuid) == 0) cpu_initclocks_bsp(); else cpu_initclocks_ap(); #else cpu_initclocks_bsp(); #endif #endif } __weak_reference(arm_generic_initclocks, cpu_initclocks); int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *tf = td->td_frame; bcopy(&tf->tf_r0, regs->r, sizeof(regs->r)); regs->r_sp = tf->tf_usr_sp; regs->r_lr = tf->tf_usr_lr; regs->r_pc = tf->tf_pc; regs->r_cpsr = tf->tf_spsr; return (0); } int fill_fpregs(struct thread *td, struct fpreg *regs) { bzero(regs, sizeof(*regs)); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *tf = td->td_frame; bcopy(regs->r, &tf->tf_r0, sizeof(regs->r)); tf->tf_usr_sp = regs->r_sp; tf->tf_usr_lr = regs->r_lr; tf->tf_pc = regs->r_pc; tf->tf_spsr &= ~PSR_FLAGS; tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS; return (0); } int set_fpregs(struct thread *td, struct fpreg *regs) { return (0); } int fill_dbregs(struct thread *td, struct dbreg *regs) { return (0); } int set_dbregs(struct thread *td, struct dbreg *regs) { return (0); } static int ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v) { struct iovec iov; struct uio uio; PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED); iov.iov_base = (caddr_t) v; iov.iov_len = sizeof(u_int32_t); uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = (off_t)addr; uio.uio_resid = sizeof(u_int32_t); uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = UIO_READ; uio.uio_td = td; return proc_rwmem(td->td_proc, &uio); } static int ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v) { struct iovec iov; struct uio uio; PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED); iov.iov_base = (caddr_t) &v; iov.iov_len = sizeof(u_int32_t); uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = (off_t)addr; uio.uio_resid = sizeof(u_int32_t); uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = UIO_WRITE; uio.uio_td = td; return proc_rwmem(td->td_proc, &uio); } +static u_int +ptrace_get_usr_reg(void *cookie, int reg) +{ + int ret; + struct thread *td = cookie; + + KASSERT(((reg >= 0) && (reg <= ARM_REG_NUM_PC)), + ("reg is outside range")); + + switch(reg) { + case ARM_REG_NUM_PC: + ret = td->td_frame->tf_pc; + break; + case ARM_REG_NUM_LR: + ret = td->td_frame->tf_usr_lr; + break; + case ARM_REG_NUM_SP: + ret = td->td_frame->tf_usr_sp; + break; + default: + ret = *((register_t*)&td->td_frame->tf_r0 + reg); + break; + } + + return (ret); +} + +static u_int +ptrace_get_usr_int(void* cookie, vm_offset_t offset, u_int* val) +{ + struct thread *td = cookie; + u_int error; + + error = ptrace_read_int(td, offset, val); + + return (error); +} + +/** + * This function parses current instruction opcode and decodes + * any possible jump (change in PC) which might occur after + * the instruction is executed. + * + * @param td Thread structure of analysed task + * @param cur_instr Currently executed instruction + * @param alt_next_address Pointer to the variable where + * the destination address of the + * jump instruction shall be stored. + * + * @return <0> when jump is possible + * otherwise + */ +static int +ptrace_get_alternative_next(struct thread *td, uint32_t cur_instr, + uint32_t *alt_next_address) +{ + int error; + + if (inst_branch(cur_instr) || inst_call(cur_instr) || + inst_return(cur_instr)) { + error = arm_predict_branch(td, cur_instr, td->td_frame->tf_pc, + alt_next_address, ptrace_get_usr_reg, ptrace_get_usr_int); + + return (error); + } + + return (EINVAL); +} + int ptrace_single_step(struct thread *td) { struct proc *p; - int error; + int error, error_alt; + uint32_t cur_instr, alt_next = 0; /* TODO: This needs to be updated for Thumb-2 */ if ((td->td_frame->tf_spsr & PSR_T) != 0) return (EINVAL); KASSERT(td->td_md.md_ptrace_instr == 0, ("Didn't clear single step")); + KASSERT(td->td_md.md_ptrace_instr_alt == 0, + ("Didn't clear alternative single step")); p = td->td_proc; PROC_UNLOCK(p); - error = ptrace_read_int(td, td->td_frame->tf_pc + 4, - &td->td_md.md_ptrace_instr); + + error = ptrace_read_int(td, td->td_frame->tf_pc, + &cur_instr); if (error) goto out; - error = ptrace_write_int(td, td->td_frame->tf_pc + 4, - PTRACE_BREAKPOINT); - if (error) - td->td_md.md_ptrace_instr = 0; - td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 4; + + error = ptrace_read_int(td, td->td_frame->tf_pc + INSN_SIZE, + &td->td_md.md_ptrace_instr); + if (error == 0) { + error = ptrace_write_int(td, td->td_frame->tf_pc + INSN_SIZE, + PTRACE_BREAKPOINT); + if (error) { + td->td_md.md_ptrace_instr = 0; + } else { + td->td_md.md_ptrace_addr = td->td_frame->tf_pc + + INSN_SIZE; + } + } + + error_alt = ptrace_get_alternative_next(td, cur_instr, &alt_next); + if (error_alt == 0) { + error_alt = ptrace_read_int(td, alt_next, + &td->td_md.md_ptrace_instr_alt); + if (error_alt) { + td->td_md.md_ptrace_instr_alt = 0; + } else { + error_alt = ptrace_write_int(td, alt_next, + PTRACE_BREAKPOINT); + if (error_alt) + td->td_md.md_ptrace_instr_alt = 0; + else + td->td_md.md_ptrace_addr_alt = alt_next; + } + } + out: PROC_LOCK(p); - return (error); + return ((error != 0) && (error_alt != 0)); } int ptrace_clear_single_step(struct thread *td) { struct proc *p; /* TODO: This needs to be updated for Thumb-2 */ if ((td->td_frame->tf_spsr & PSR_T) != 0) return (EINVAL); - if (td->td_md.md_ptrace_instr) { + if (td->td_md.md_ptrace_instr != 0) { p = td->td_proc; PROC_UNLOCK(p); ptrace_write_int(td, td->td_md.md_ptrace_addr, td->td_md.md_ptrace_instr); PROC_LOCK(p); td->td_md.md_ptrace_instr = 0; } + + if (td->td_md.md_ptrace_instr_alt != 0) { + p = td->td_proc; + PROC_UNLOCK(p); + ptrace_write_int(td, td->td_md.md_ptrace_addr_alt, + td->td_md.md_ptrace_instr_alt); + PROC_LOCK(p); + td->td_md.md_ptrace_instr_alt = 0; + } + return (0); } int ptrace_set_pc(struct thread *td, unsigned long addr) { td->td_frame->tf_pc = addr; return (0); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { } void spinlock_enter(void) { struct thread *td; register_t cspr; td = curthread; if (td->td_md.md_spinlock_count == 0) { cspr = disable_interrupts(PSR_I | PSR_F); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_cspr = cspr; } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t cspr; td = curthread; critical_exit(); cspr = td->td_md.md_saved_cspr; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) restore_interrupts(cspr); } /* * Clear registers on exec */ void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf = td->td_frame; memset(tf, 0, sizeof(*tf)); tf->tf_usr_sp = stack; tf->tf_usr_lr = imgp->entry_addr; tf->tf_svc_lr = 0x77777777; tf->tf_pc = imgp->entry_addr; tf->tf_spsr = PSR_USR32_MODE; } /* * Get machine context. */ int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; __greg_t *gr = mcp->__gregs; if (clear_ret & GET_MC_CLEAR_RET) { gr[_REG_R0] = 0; gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C; } else { gr[_REG_R0] = tf->tf_r0; gr[_REG_CPSR] = tf->tf_spsr; } gr[_REG_R1] = tf->tf_r1; gr[_REG_R2] = tf->tf_r2; gr[_REG_R3] = tf->tf_r3; gr[_REG_R4] = tf->tf_r4; gr[_REG_R5] = tf->tf_r5; gr[_REG_R6] = tf->tf_r6; gr[_REG_R7] = tf->tf_r7; gr[_REG_R8] = tf->tf_r8; gr[_REG_R9] = tf->tf_r9; gr[_REG_R10] = tf->tf_r10; gr[_REG_R11] = tf->tf_r11; gr[_REG_R12] = tf->tf_r12; gr[_REG_SP] = tf->tf_usr_sp; gr[_REG_LR] = tf->tf_usr_lr; gr[_REG_PC] = tf->tf_pc; return (0); } /* * Set machine context. * * However, we don't set any but the user modifiable flags, and we won't * touch the cs selector. */ int set_mcontext(struct thread *td, mcontext_t *mcp) { struct trapframe *tf = td->td_frame; const __greg_t *gr = mcp->__gregs; tf->tf_r0 = gr[_REG_R0]; tf->tf_r1 = gr[_REG_R1]; tf->tf_r2 = gr[_REG_R2]; tf->tf_r3 = gr[_REG_R3]; tf->tf_r4 = gr[_REG_R4]; tf->tf_r5 = gr[_REG_R5]; tf->tf_r6 = gr[_REG_R6]; tf->tf_r7 = gr[_REG_R7]; tf->tf_r8 = gr[_REG_R8]; tf->tf_r9 = gr[_REG_R9]; tf->tf_r10 = gr[_REG_R10]; tf->tf_r11 = gr[_REG_R11]; tf->tf_r12 = gr[_REG_R12]; tf->tf_usr_sp = gr[_REG_SP]; tf->tf_usr_lr = gr[_REG_LR]; tf->tf_pc = gr[_REG_PC]; tf->tf_spsr = gr[_REG_CPSR]; return (0); } /* * MPSAFE */ int sys_sigreturn(td, uap) struct thread *td; struct sigreturn_args /* { const struct __ucontext *sigcntxp; } */ *uap; { ucontext_t uc; int spsr; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); /* * Make sure the processor mode has not been tampered with and * interrupts have not been disabled. */ spsr = uc.uc_mcontext.__gregs[_REG_CPSR]; if ((spsr & PSR_MODE) != PSR_USR32_MODE || (spsr & (PSR_I | PSR_F)) != 0) return (EINVAL); /* Restore register context. */ set_mcontext(td, &uc.uc_mcontext); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_regs.sf_r4 = tf->tf_r4; pcb->pcb_regs.sf_r5 = tf->tf_r5; pcb->pcb_regs.sf_r6 = tf->tf_r6; pcb->pcb_regs.sf_r7 = tf->tf_r7; pcb->pcb_regs.sf_r8 = tf->tf_r8; pcb->pcb_regs.sf_r9 = tf->tf_r9; pcb->pcb_regs.sf_r10 = tf->tf_r10; pcb->pcb_regs.sf_r11 = tf->tf_r11; pcb->pcb_regs.sf_r12 = tf->tf_r12; pcb->pcb_regs.sf_pc = tf->tf_pc; pcb->pcb_regs.sf_lr = tf->tf_usr_lr; pcb->pcb_regs.sf_sp = tf->tf_usr_sp; } /* * Fake up a boot descriptor table */ vm_offset_t fake_preload_metadata(struct arm_boot_params *abp __unused) { #ifdef DDB vm_offset_t zstart = 0, zend = 0; #endif vm_offset_t lastaddr; int i = 0; static uint32_t fake_preload[35]; fake_preload[i++] = MODINFO_NAME; fake_preload[i++] = strlen("kernel") + 1; strcpy((char*)&fake_preload[i++], "kernel"); i += 1; fake_preload[i++] = MODINFO_TYPE; fake_preload[i++] = strlen("elf kernel") + 1; strcpy((char*)&fake_preload[i++], "elf kernel"); i += 2; fake_preload[i++] = MODINFO_ADDR; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = KERNVIRTADDR; fake_preload[i++] = MODINFO_SIZE; fake_preload[i++] = sizeof(uint32_t); fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR; #ifdef DDB if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) { fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4); fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8); lastaddr = *(uint32_t *)(KERNVIRTADDR + 8); zend = lastaddr; zstart = *(uint32_t *)(KERNVIRTADDR + 4); db_fetch_ksymtab(zstart, zend); } else #endif lastaddr = (vm_offset_t)&end; fake_preload[i++] = 0; fake_preload[i] = 0; preload_metadata = (void *)fake_preload; return (lastaddr); } void pcpu0_init(void) { #if __ARM_ARCH >= 6 set_curthread(&thread0); #endif pcpu_init(pcpup, 0, sizeof(struct pcpu)); PCPU_SET(curthread, &thread0); } #if defined(LINUX_BOOT_ABI) vm_offset_t linux_parse_boot_param(struct arm_boot_params *abp) { struct arm_lbabi_tag *walker; uint32_t revision; uint64_t serial; /* * Linux boot ABI: r0 = 0, r1 is the board type (!= 0) and r2 * is atags or dtb pointer. If all of these aren't satisfied, * then punt. */ if (!(abp->abp_r0 == 0 && abp->abp_r1 != 0 && abp->abp_r2 != 0)) return 0; board_id = abp->abp_r1; walker = (struct arm_lbabi_tag *) (abp->abp_r2 + KERNVIRTADDR - abp->abp_physaddr); /* xxx - Need to also look for binary device tree */ if (ATAG_TAG(walker) != ATAG_CORE) return 0; atag_list = walker; while (ATAG_TAG(walker) != ATAG_NONE) { switch (ATAG_TAG(walker)) { case ATAG_CORE: break; case ATAG_MEM: arm_physmem_hardware_region(walker->u.tag_mem.start, walker->u.tag_mem.size); break; case ATAG_INITRD2: break; case ATAG_SERIAL: serial = walker->u.tag_sn.low | ((uint64_t)walker->u.tag_sn.high << 32); board_set_serial(serial); break; case ATAG_REVISION: revision = walker->u.tag_rev.rev; board_set_revision(revision); break; case ATAG_CMDLINE: /* XXX open question: Parse this for boothowto? */ bcopy(walker->u.tag_cmd.command, linux_command_line, ATAG_SIZE(walker)); break; default: break; } walker = ATAG_NEXT(walker); } /* Save a copy for later */ bcopy(atag_list, atags, (char *)walker - (char *)atag_list + ATAG_SIZE(walker)); return fake_preload_metadata(abp); } #endif #if defined(FREEBSD_BOOT_LOADER) vm_offset_t freebsd_parse_boot_param(struct arm_boot_params *abp) { vm_offset_t lastaddr = 0; void *mdp; void *kmdp; #ifdef DDB vm_offset_t ksym_start; vm_offset_t ksym_end; #endif /* * Mask metadata pointer: it is supposed to be on page boundary. If * the first argument (mdp) doesn't point to a valid address the * bootloader must have passed us something else than the metadata * ptr, so we give up. Also give up if we cannot find metadta section * the loader creates that we get all this data out of. */ if ((mdp = (void *)(abp->abp_r0 & ~PAGE_MASK)) == NULL) return 0; preload_metadata = mdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) return 0; boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); #ifdef DDB ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); db_fetch_ksymtab(ksym_start, ksym_end); #endif return lastaddr; } #endif vm_offset_t default_parse_boot_param(struct arm_boot_params *abp) { vm_offset_t lastaddr; #if defined(LINUX_BOOT_ABI) if ((lastaddr = linux_parse_boot_param(abp)) != 0) return lastaddr; #endif #if defined(FREEBSD_BOOT_LOADER) if ((lastaddr = freebsd_parse_boot_param(abp)) != 0) return lastaddr; #endif /* Fall back to hardcoded metadata. */ lastaddr = fake_preload_metadata(abp); return lastaddr; } /* * Stub version of the boot parameter parsing routine. We are * called early in initarm, before even VM has been initialized. * This routine needs to preserve any data that the boot loader * has passed in before the kernel starts to grow past the end * of the BSS, traditionally the place boot-loaders put this data. * * Since this is called so early, things that depend on the vm system * being setup (including access to some SoC's serial ports), about * all that can be done in this routine is to copy the arguments. * * This is the default boot parameter parsing routine. Individual * kernels/boards can override this weak function with one of their * own. We just fake metadata... */ __weak_reference(default_parse_boot_param, parse_boot_param); /* * Initialize proc0 */ void init_proc0(vm_offset_t kstack) { proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1; thread0.td_pcb->pcb_flags = 0; thread0.td_pcb->pcb_vfpcpu = -1; thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; +} + +int +arm_predict_branch(void *cookie, u_int insn, register_t pc, register_t *new_pc, + u_int (*fetch_reg)(void*, int), u_int (*read_int)(void*, vm_offset_t, u_int*)) +{ + u_int addr, nregs, offset = 0; + int error = 0; + + switch ((insn >> 24) & 0xf) { + case 0x2: /* add pc, reg1, #value */ + case 0x0: /* add pc, reg1, reg2, lsl #offset */ + addr = fetch_reg(cookie, (insn >> 16) & 0xf); + if (((insn >> 16) & 0xf) == 15) + addr += 8; + if (insn & 0x0200000) { + offset = (insn >> 7) & 0x1e; + offset = (insn & 0xff) << (32 - offset) | + (insn & 0xff) >> offset; + } else { + + offset = fetch_reg(cookie, insn & 0x0f); + if ((insn & 0x0000ff0) != 0x00000000) { + if (insn & 0x10) + nregs = fetch_reg(cookie, + (insn >> 8) & 0xf); + else + nregs = (insn >> 7) & 0x1f; + switch ((insn >> 5) & 3) { + case 0: + /* lsl */ + offset = offset << nregs; + break; + case 1: + /* lsr */ + offset = offset >> nregs; + break; + default: + break; /* XXX */ + } + + } + *new_pc = addr + offset; + return (0); + + } + + case 0xa: /* b ... */ + case 0xb: /* bl ... */ + addr = ((insn << 2) & 0x03ffffff); + if (addr & 0x02000000) + addr |= 0xfc000000; + *new_pc = (pc + 8 + addr); + return (0); + case 0x7: /* ldr pc, [pc, reg, lsl #2] */ + addr = fetch_reg(cookie, insn & 0xf); + addr = pc + 8 + (addr << 2); + error = read_int(cookie, addr, &addr); + *new_pc = addr; + return (error); + case 0x1: /* mov pc, reg */ + *new_pc = fetch_reg(cookie, insn & 0xf); + return (0); + case 0x4: + case 0x5: /* ldr pc, [reg] */ + addr = fetch_reg(cookie, (insn >> 16) & 0xf); + /* ldr pc, [reg, #offset] */ + if (insn & (1 << 24)) + offset = insn & 0xfff; + if (insn & 0x00800000) + addr += offset; + else + addr -= offset; + error = read_int(cookie, addr, &addr); + *new_pc = addr; + + return (error); + case 0x8: /* ldmxx reg, {..., pc} */ + case 0x9: + addr = fetch_reg(cookie, (insn >> 16) & 0xf); + nregs = (insn & 0x5555) + ((insn >> 1) & 0x5555); + nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333); + nregs = (nregs + (nregs >> 4)) & 0x0f0f; + nregs = (nregs + (nregs >> 8)) & 0x001f; + switch ((insn >> 23) & 0x3) { + case 0x0: /* ldmda */ + addr = addr - 0; + break; + case 0x1: /* ldmia */ + addr = addr + 0 + ((nregs - 1) << 2); + break; + case 0x2: /* ldmdb */ + addr = addr - 4; + break; + case 0x3: /* ldmib */ + addr = addr + 4 + ((nregs - 1) << 2); + break; + } + error = read_int(cookie, addr, &addr); + *new_pc = addr; + + return (error); + default: + return (EINVAL); + } } #ifdef ARM_NEW_PMAP void set_stackptrs(int cpu) { set_stackptr(PSR_IRQ32_MODE, irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_ABT32_MODE, abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_UND32_MODE, undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); } #else void set_stackptrs(int cpu) { set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_UND32_MODE, undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); } #endif #ifdef EFI #define efi_next_descriptor(ptr, size) \ ((struct efi_md *)(((uint8_t *) ptr) + size)) static void add_efi_map_entries(struct efi_map_header *efihdr, struct mem_region *mr, int *mrcnt, uint32_t *memsize) { struct efi_md *map, *p; const char *type; size_t efisz, memory_size; int ndesc, i, j; static const char *types[] = { "Reserved", "LoaderCode", "LoaderData", "BootServicesCode", "BootServicesData", "RuntimeServicesCode", "RuntimeServicesData", "ConventionalMemory", "UnusableMemory", "ACPIReclaimMemory", "ACPIMemoryNVS", "MemoryMappedIO", "MemoryMappedIOPortSpace", "PalCode" }; *mrcnt = 0; *memsize = 0; /* * Memory map data provided by UEFI via the GetMemoryMap * Boot Services API. */ efisz = roundup2(sizeof(struct efi_map_header), 0x10); map = (struct efi_md *)((uint8_t *)efihdr + efisz); if (efihdr->descriptor_size == 0) return; ndesc = efihdr->memory_size / efihdr->descriptor_size; if (boothowto & RB_VERBOSE) printf("%23s %12s %12s %8s %4s\n", "Type", "Physical", "Virtual", "#Pages", "Attr"); memory_size = 0; for (i = 0, j = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p, efihdr->descriptor_size)) { if (boothowto & RB_VERBOSE) { if (p->md_type <= EFI_MD_TYPE_PALCODE) type = types[p->md_type]; else type = ""; printf("%23s %012llx %12p %08llx ", type, p->md_phys, p->md_virt, p->md_pages); if (p->md_attr & EFI_MD_ATTR_UC) printf("UC "); if (p->md_attr & EFI_MD_ATTR_WC) printf("WC "); if (p->md_attr & EFI_MD_ATTR_WT) printf("WT "); if (p->md_attr & EFI_MD_ATTR_WB) printf("WB "); if (p->md_attr & EFI_MD_ATTR_UCE) printf("UCE "); if (p->md_attr & EFI_MD_ATTR_WP) printf("WP "); if (p->md_attr & EFI_MD_ATTR_RP) printf("RP "); if (p->md_attr & EFI_MD_ATTR_XP) printf("XP "); if (p->md_attr & EFI_MD_ATTR_RT) printf("RUNTIME"); printf("\n"); } switch (p->md_type) { case EFI_MD_TYPE_CODE: case EFI_MD_TYPE_DATA: case EFI_MD_TYPE_BS_CODE: case EFI_MD_TYPE_BS_DATA: case EFI_MD_TYPE_FREE: /* * We're allowed to use any entry with these types. */ break; default: continue; } j++; if (j >= FDT_MEM_REGIONS) break; mr[j].mr_start = p->md_phys; mr[j].mr_size = p->md_pages * PAGE_SIZE; memory_size += mr[j].mr_size; } *mrcnt = j; *memsize = memory_size; } #endif /* EFI */ #ifdef FDT static char * kenv_next(char *cp) { if (cp != NULL) { while (*cp != 0) cp++; cp++; if (*cp == 0) cp = NULL; } return (cp); } static void print_kenv(void) { char *cp; debugf("loader passed (static) kenv:\n"); if (kern_envp == NULL) { debugf(" no env, null ptr\n"); return; } debugf(" kern_envp = 0x%08x\n", (uint32_t)kern_envp); for (cp = kern_envp; cp != NULL; cp = kenv_next(cp)) debugf(" %x %s\n", (uint32_t)cp, cp); } #ifndef ARM_NEW_PMAP void * initarm(struct arm_boot_params *abp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; struct pv_addr kernel_l1pt; struct pv_addr dpcpu; vm_offset_t dtbp, freemempos, l2_start, lastaddr; uint32_t memsize, l2size; char *env; void *kmdp; u_int l1pagetable; int i, j, err_devmap, mem_regions_sz; lastaddr = parse_boot_param(abp); arm_physmem_kernaddr = abp->abp_physaddr; memsize = 0; cpuinfo_init(); set_cpufuncs(); /* * Find the dtb passed in by the boot loader. */ kmdp = preload_search_by_type("elf kernel"); if (kmdp != NULL) dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); else dtbp = (vm_offset_t)NULL; #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not retrieved (from metadata) try * to use the statically embedded one. */ if (dtbp == (vm_offset_t)NULL) dtbp = (vm_offset_t)&fdt_static_dtb; #endif if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) panic("Cannot get physical memory regions"); arm_physmem_hardware_regions(mem_regions, mem_regions_sz); /* Grab reserved memory regions information from device tree. */ if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) arm_physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); /* Platform-specific initialisation */ platform_probe_and_attach(); pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); /* Calculate number of L2 tables needed for mapping vm_page_array */ l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); l2size = (l2size >> L1_S_SHIFT) + 1; /* * Add one table for end of kernel map, one for stacks, msgbuf and * L1 and L2 tables map and one for vectors map. */ l2size += 3; /* Make it divisible by 4 */ l2size = (l2size + 3) & ~3; freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_va, (np)); \ (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR); #define alloc_pages(var, np) \ (var) = freemempos; \ freemempos += (np * PAGE_SIZE); \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) freemempos += PAGE_SIZE; valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); for (i = 0, j = 0; i < l2size; ++i) { if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { valloc_pages(kernel_pt_table[i], L2_TABLE_SIZE / PAGE_SIZE); j = i; } else { kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + L2_TABLE_SIZE_REAL * (i - j); kernel_pt_table[i].pv_pa = kernel_pt_table[i].pv_va - KERNVIRTADDR + abp->abp_physaddr; } } /* * Allocate a page for the system page mapped to 0x00000000 * or 0xffff0000. This page will just contain the system vectors * and can be shared by all processes. */ valloc_pages(systempage, 1); /* Allocate dynamic per-cpu area. */ valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu.pv_va, 0); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU); valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU); valloc_pages(undstack, UND_STACK_SIZE * MAXCPU); valloc_pages(kernelstack, kstack_pages * MAXCPU); valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_va; /* * Try to map as much as possible of kernel text and data using * 1MB section mapping and for the rest of initial kernel address * space use L2 coarse tables. * * Link L2 tables for mapping remainder of kernel (modulo 1MB) * and kernel structures */ l2_start = lastaddr & ~(L1_S_OFFSET); for (i = 0 ; i < l2size - 1; i++) pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, &kernel_pt_table[i]); pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; /* Map kernel code and data */ pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr, (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map L1 directory and allocated L2 page tables */ pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, kernel_pt_table[0].pv_pa, L2_TABLE_SIZE_REAL * l2size, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); /* Map allocated DPCPU, stacks and msgbuf */ pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, freemempos - dpcpu.pv_va, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Link and map the vector page */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, &kernel_pt_table[l2size - 1]); pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); /* Establish static device mappings. */ err_devmap = platform_devmap_init(); arm_devmap_bootstrap(l1pagetable, NULL); vm_max_kernel_address = platform_lastaddr(); cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT); pmap_pa = kernel_l1pt.pv_pa; setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); /* * Now that proper page tables are installed, call cpu_setup() to enable * instruction and data caches and other chip-specific features. */ cpu_setup(); /* * Only after the SOC registers block is mapped we can perform device * tree fixups, as they may attempt to read parameters from hardware. */ OF_interpret("perform-fixup", 0); platform_gpio_init(); cninit(); debugf("initarm: console initialized\n"); debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); debugf(" boothowto = 0x%08x\n", boothowto); debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); print_kenv(); env = kern_getenv("kernelname"); if (env != NULL) { strlcpy(kernelname, env, sizeof(kernelname)); freeenv(env); } if (err_devmap != 0) printf("WARNING: could not fully configure devmap, error=%d\n", err_devmap); platform_late_init(); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); set_stackptrs(0); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ cpu_idcache_wbinv_all(); undefined_init(); init_proc0(kernelstack.pv_va); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); pmap_bootstrap(freemempos, &kernel_l1pt); msgbufp = (void *)msgbufpv.pv_va; msgbufinit(msgbufp, msgbufsize); mutex_init(); /* * Exclude the kernel (and all the things we allocated which immediately * follow the kernel) from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_exclude_region(abp->abp_physaddr, (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); kdb_init(); return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - sizeof(struct pcb))); } #else /* !ARM_NEW_PMAP */ void * initarm(struct arm_boot_params *abp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; vm_paddr_t lastaddr; vm_offset_t dtbp, kernelstack, dpcpu; uint32_t memsize; char *env; void *kmdp; int err_devmap, mem_regions_sz; #ifdef EFI struct efi_map_header *efihdr; #endif /* get last allocated physical address */ arm_physmem_kernaddr = abp->abp_physaddr; lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr; memsize = 0; set_cpufuncs(); cpuinfo_init(); /* * Find the dtb passed in by the boot loader. */ kmdp = preload_search_by_type("elf kernel"); dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not retrieved (from metadata) try * to use the statically embedded one. */ if (dtbp == (vm_offset_t)NULL) dtbp = (vm_offset_t)&fdt_static_dtb; #endif if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); #ifdef EFI efihdr = (struct efi_map_header *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP); if (efihdr != NULL) { add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz, &memsize); } else #endif { /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) panic("Cannot get physical memory regions"); } arm_physmem_hardware_regions(mem_regions, mem_regions_sz); /* Grab reserved memory regions information from device tree. */ if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) arm_physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); /* * Set TEX remapping registers. * Setup kernel page tables and switch to kernel L1 page table. */ pmap_set_tex(); pmap_bootstrap_prepare(lastaddr); /* * Now that proper page tables are installed, call cpu_setup() to enable * instruction and data caches and other chip-specific features. */ cpu_setup(); /* Platform-specific initialisation */ platform_probe_and_attach(); pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); /* * Allocate a page for the system page mapped to 0xffff0000 * This page will just contain the system vectors and can be * shared by all processes. */ systempage = pmap_preboot_get_pages(1); /* Map the vector page. */ pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH, 1); if (virtual_end >= ARM_VECTORS_HIGH) virtual_end = ARM_VECTORS_HIGH - 1; /* Allocate dynamic per-cpu area. */ dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu, 0); /* Allocate stacks for all modes */ irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU); abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU); undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU ); kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU); /* Allocate message buffer. */ msgbufp = (void *)pmap_preboot_get_vpages( round_page(msgbufsize) / PAGE_SIZE); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ set_stackptrs(0); mutex_init(); /* Establish static device mappings. */ err_devmap = platform_devmap_init(); arm_devmap_bootstrap(0, NULL); vm_max_kernel_address = platform_lastaddr(); /* * Only after the SOC registers block is mapped we can perform device * tree fixups, as they may attempt to read parameters from hardware. */ OF_interpret("perform-fixup", 0); platform_gpio_init(); cninit(); debugf("initarm: console initialized\n"); debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); debugf(" boothowto = 0x%08x\n", boothowto); debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); debugf(" lastaddr1: 0x%08x\n", lastaddr); print_kenv(); env = kern_getenv("kernelname"); if (env != NULL) strlcpy(kernelname, env, sizeof(kernelname)); if (err_devmap != 0) printf("WARNING: could not fully configure devmap, error=%d\n", err_devmap); platform_late_init(); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ /* Set stack for exception handlers */ undefined_init(); init_proc0(kernelstack); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); enable_interrupts(PSR_A); pmap_bootstrap(0); /* Exclude the kernel (and all the things we allocated which immediately * follow the kernel) from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_exclude_region(abp->abp_physaddr, pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); /* Init message buffer. */ msgbufinit(msgbufp, msgbufsize); kdb_init(); return ((void *)STACKALIGN(thread0.td_pcb)); } #endif /* !ARM_NEW_PMAP */ #endif /* FDT */ Index: head/sys/arm/include/armreg.h =================================================================== --- head/sys/arm/include/armreg.h (revision 290272) +++ head/sys/arm/include/armreg.h (revision 290273) @@ -1,449 +1,455 @@ /* $NetBSD: armreg.h,v 1.37 2007/01/06 00:50:54 christos Exp $ */ /*- * Copyright (c) 1998, 2001 Ben Harris * Copyright (c) 1994-1996 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MACHINE_ARMREG_H #define MACHINE_ARMREG_H #include #define INSN_SIZE 4 #define INSN_COND_MASK 0xf0000000 /* Condition mask */ #define PSR_MODE 0x0000001f /* mode mask */ #define PSR_USR32_MODE 0x00000010 #define PSR_FIQ32_MODE 0x00000011 #define PSR_IRQ32_MODE 0x00000012 #define PSR_SVC32_MODE 0x00000013 #define PSR_MON32_MODE 0x00000016 #define PSR_ABT32_MODE 0x00000017 #define PSR_HYP32_MODE 0x0000001a #define PSR_UND32_MODE 0x0000001b #define PSR_SYS32_MODE 0x0000001f #define PSR_32_MODE 0x00000010 #define PSR_T 0x00000020 /* Instruction set bit */ #define PSR_F 0x00000040 /* FIQ disable bit */ #define PSR_I 0x00000080 /* IRQ disable bit */ #define PSR_A 0x00000100 /* Imprecise abort bit */ #define PSR_E 0x00000200 /* Data endianess bit */ #define PSR_GE 0x000f0000 /* Greater than or equal to bits */ #define PSR_J 0x01000000 /* Java bit */ #define PSR_Q 0x08000000 /* Sticky overflow bit */ #define PSR_V 0x10000000 /* Overflow bit */ #define PSR_C 0x20000000 /* Carry bit */ #define PSR_Z 0x40000000 /* Zero bit */ #define PSR_N 0x80000000 /* Negative bit */ #define PSR_FLAGS 0xf0000000 /* Flags mask. */ /* The high-order byte is always the implementor */ #define CPU_ID_IMPLEMENTOR_MASK 0xff000000 #define CPU_ID_ARM_LTD 0x41000000 /* 'A' */ #define CPU_ID_DEC 0x44000000 /* 'D' */ #define CPU_ID_INTEL 0x69000000 /* 'i' */ #define CPU_ID_TI 0x54000000 /* 'T' */ #define CPU_ID_FARADAY 0x66000000 /* 'f' */ /* How to decide what format the CPUID is in. */ #define CPU_ID_ISOLD(x) (((x) & 0x0000f000) == 0x00000000) #define CPU_ID_IS7(x) (((x) & 0x0000f000) == 0x00007000) #define CPU_ID_ISNEW(x) (!CPU_ID_ISOLD(x) && !CPU_ID_IS7(x)) /* On recent ARMs this byte holds the architecture and variant (sub-model) */ #define CPU_ID_ARCH_MASK 0x000f0000 #define CPU_ID_ARCH_V3 0x00000000 #define CPU_ID_ARCH_V4 0x00010000 #define CPU_ID_ARCH_V4T 0x00020000 #define CPU_ID_ARCH_V5 0x00030000 #define CPU_ID_ARCH_V5T 0x00040000 #define CPU_ID_ARCH_V5TE 0x00050000 #define CPU_ID_ARCH_V5TEJ 0x00060000 #define CPU_ID_ARCH_V6 0x00070000 #define CPU_ID_CPUID_SCHEME 0x000f0000 #define CPU_ID_VARIANT_MASK 0x00f00000 /* Next three nybbles are part number */ #define CPU_ID_PARTNO_MASK 0x0000fff0 /* Intel XScale has sub fields in part number */ #define CPU_ID_XSCALE_COREGEN_MASK 0x0000e000 /* core generation */ #define CPU_ID_XSCALE_COREREV_MASK 0x00001c00 /* core revision */ #define CPU_ID_XSCALE_PRODUCT_MASK 0x000003f0 /* product number */ /* And finally, the revision number. */ #define CPU_ID_REVISION_MASK 0x0000000f /* Individual CPUs are probably best IDed by everything but the revision. */ #define CPU_ID_CPU_MASK 0xfffffff0 /* ARM9 and later CPUs */ #define CPU_ID_ARM920T 0x41129200 #define CPU_ID_ARM920T_ALT 0x41009200 #define CPU_ID_ARM922T 0x41029220 #define CPU_ID_ARM926EJS 0x41069260 #define CPU_ID_ARM940T 0x41029400 /* XXX no MMU */ #define CPU_ID_ARM946ES 0x41049460 /* XXX no MMU */ #define CPU_ID_ARM966ES 0x41049660 /* XXX no MMU */ #define CPU_ID_ARM966ESR1 0x41059660 /* XXX no MMU */ #define CPU_ID_ARM1020E 0x4115a200 /* (AKA arm10 rev 1) */ #define CPU_ID_ARM1022ES 0x4105a220 #define CPU_ID_ARM1026EJS 0x4106a260 #define CPU_ID_ARM1136JS 0x4107b360 #define CPU_ID_ARM1136JSR1 0x4117b360 #define CPU_ID_ARM1176JZS 0x410fb760 #define CPU_ID_CORTEXA5 0x410fc050 #define CPU_ID_CORTEXA7 0x410fc070 #define CPU_ID_CORTEXA8R1 0x411fc080 #define CPU_ID_CORTEXA8R2 0x412fc080 #define CPU_ID_CORTEXA8R3 0x413fc080 #define CPU_ID_CORTEXA9R1 0x411fc090 #define CPU_ID_CORTEXA9R2 0x412fc090 #define CPU_ID_CORTEXA9R3 0x413fc090 #define CPU_ID_CORTEXA9R4 0x414fc090 #define CPU_ID_CORTEXA12R0 0x410fc0d0 #define CPU_ID_CORTEXA15R0 0x410fc0f0 #define CPU_ID_CORTEXA15R1 0x411fc0f0 #define CPU_ID_CORTEXA15R2 0x412fc0f0 #define CPU_ID_CORTEXA15R3 0x413fc0f0 #define CPU_ID_KRAIT 0x510f06f0 /* Snapdragon S4 Pro/APQ8064 */ #define CPU_ID_TI925T 0x54029250 #define CPU_ID_MV88FR131 0x56251310 /* Marvell Feroceon 88FR131 Core */ #define CPU_ID_MV88FR331 0x56153310 /* Marvell Feroceon 88FR331 Core */ #define CPU_ID_MV88FR571_VD 0x56155710 /* Marvell Feroceon 88FR571-VD Core (ID from datasheet) */ /* * LokiPlus core has also ID set to 0x41159260 and this define cause execution of unsupported * L2-cache instructions so need to disable it. 0x41159260 is a generic ARM926E-S ID. */ #ifdef SOC_MV_LOKIPLUS #define CPU_ID_MV88FR571_41 0x00000000 #else #define CPU_ID_MV88FR571_41 0x41159260 /* Marvell Feroceon 88FR571-VD Core (actual ID from CPU reg) */ #endif #define CPU_ID_MV88SV581X_V7 0x561F5810 /* Marvell Sheeva 88SV581x v7 Core */ #define CPU_ID_MV88SV584X_V7 0x562F5840 /* Marvell Sheeva 88SV584x v7 Core */ /* Marvell's CPUIDs with ARM ID in implementor field */ #define CPU_ID_ARM_88SV581X_V7 0x413FC080 /* Marvell Sheeva 88SV581x v7 Core */ #define CPU_ID_FA526 0x66015260 #define CPU_ID_FA626TE 0x66056260 #define CPU_ID_80200 0x69052000 #define CPU_ID_PXA250 0x69052100 /* sans core revision */ #define CPU_ID_PXA210 0x69052120 #define CPU_ID_PXA250A 0x69052100 /* 1st version Core */ #define CPU_ID_PXA210A 0x69052120 /* 1st version Core */ #define CPU_ID_PXA250B 0x69052900 /* 3rd version Core */ #define CPU_ID_PXA210B 0x69052920 /* 3rd version Core */ #define CPU_ID_PXA250C 0x69052d00 /* 4th version Core */ #define CPU_ID_PXA210C 0x69052d20 /* 4th version Core */ #define CPU_ID_PXA27X 0x69054110 #define CPU_ID_80321_400 0x69052420 #define CPU_ID_80321_600 0x69052430 #define CPU_ID_80321_400_B0 0x69052c20 #define CPU_ID_80321_600_B0 0x69052c30 #define CPU_ID_80219_400 0x69052e20 /* A0 stepping/revision. */ #define CPU_ID_80219_600 0x69052e30 /* A0 stepping/revision. */ #define CPU_ID_81342 0x69056810 #define CPU_ID_IXP425 0x690541c0 #define CPU_ID_IXP425_533 0x690541c0 #define CPU_ID_IXP425_400 0x690541d0 #define CPU_ID_IXP425_266 0x690541f0 #define CPU_ID_IXP435 0x69054040 #define CPU_ID_IXP465 0x69054200 /* CPUID registers */ #define ARM_PFR0_ARM_ISA_MASK 0x0000000f #define ARM_PFR0_THUMB_MASK 0x000000f0 #define ARM_PFR0_THUMB 0x10 #define ARM_PFR0_THUMB2 0x30 #define ARM_PFR0_JAZELLE_MASK 0x00000f00 #define ARM_PFR0_THUMBEE_MASK 0x0000f000 #define ARM_PFR1_ARMV4_MASK 0x0000000f #define ARM_PFR1_SEC_EXT_MASK 0x000000f0 #define ARM_PFR1_MICROCTRL_MASK 0x00000f00 /* * Post-ARM3 CP15 registers: * * 1 Control register * * 2 Translation Table Base * * 3 Domain Access Control * * 4 Reserved * * 5 Fault Status * * 6 Fault Address * * 7 Cache/write-buffer Control * * 8 TLB Control * * 9 Cache Lockdown * * 10 TLB Lockdown * * 11 Reserved * * 12 Reserved * * 13 Process ID (for FCSE) * * 14 Reserved * * 15 Implementation Dependent */ /* Some of the definitions below need cleaning up for V3/V4 architectures */ /* CPU control register (CP15 register 1) */ #define CPU_CONTROL_MMU_ENABLE 0x00000001 /* M: MMU/Protection unit enable */ #define CPU_CONTROL_AFLT_ENABLE 0x00000002 /* A: Alignment fault enable */ #define CPU_CONTROL_DC_ENABLE 0x00000004 /* C: IDC/DC enable */ #define CPU_CONTROL_WBUF_ENABLE 0x00000008 /* W: Write buffer enable */ #define CPU_CONTROL_32BP_ENABLE 0x00000010 /* P: 32-bit exception handlers */ #define CPU_CONTROL_32BD_ENABLE 0x00000020 /* D: 32-bit addressing */ #define CPU_CONTROL_LABT_ENABLE 0x00000040 /* L: Late abort enable */ #define CPU_CONTROL_BEND_ENABLE 0x00000080 /* B: Big-endian mode */ #define CPU_CONTROL_SYST_ENABLE 0x00000100 /* S: System protection bit */ #define CPU_CONTROL_ROM_ENABLE 0x00000200 /* R: ROM protection bit */ #define CPU_CONTROL_CPCLK 0x00000400 /* F: Implementation defined */ #define CPU_CONTROL_SW_ENABLE 0x00000400 /* SW: SWP instruction enable */ #define CPU_CONTROL_BPRD_ENABLE 0x00000800 /* Z: Branch prediction enable */ #define CPU_CONTROL_IC_ENABLE 0x00001000 /* I: IC enable */ #define CPU_CONTROL_VECRELOC 0x00002000 /* V: Vector relocation */ #define CPU_CONTROL_ROUNDROBIN 0x00004000 /* RR: Predictable replacement */ #define CPU_CONTROL_V4COMPAT 0x00008000 /* L4: ARMv4 compat LDR R15 etc */ #define CPU_CONTROL_HAF_ENABLE 0x00020000 /* HA: Hardware Access Flag Enable */ #define CPU_CONTROL_FI_ENABLE 0x00200000 /* FI: Low interrupt latency */ #define CPU_CONTROL_UNAL_ENABLE 0x00400000 /* U: unaligned data access */ #define CPU_CONTROL_V6_EXTPAGE 0x00800000 /* XP: ARMv6 extended page tables */ #define CPU_CONTROL_V_ENABLE 0x01000000 /* VE: Interrupt vectors enable */ #define CPU_CONTROL_EX_BEND 0x02000000 /* EE: exception endianness */ #define CPU_CONTROL_L2_ENABLE 0x04000000 /* L2 Cache enabled */ #define CPU_CONTROL_NMFI 0x08000000 /* NMFI: Non maskable FIQ */ #define CPU_CONTROL_TR_ENABLE 0x10000000 /* TRE: TEX Remap*/ #define CPU_CONTROL_AF_ENABLE 0x20000000 /* AFE: Access Flag enable */ #define CPU_CONTROL_TE_ENABLE 0x40000000 /* TE: Thumb Exception enable */ #define CPU_CONTROL_IDC_ENABLE CPU_CONTROL_DC_ENABLE /* ARM11x6 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM11X6_AUXCTL_RS 0x00000001 /* return stack */ #define ARM11X6_AUXCTL_DB 0x00000002 /* dynamic branch prediction */ #define ARM11X6_AUXCTL_SB 0x00000004 /* static branch prediction */ #define ARM11X6_AUXCTL_TR 0x00000008 /* MicroTLB replacement strat. */ #define ARM11X6_AUXCTL_EX 0x00000010 /* exclusive L1/L2 cache */ #define ARM11X6_AUXCTL_RA 0x00000020 /* clean entire cache disable */ #define ARM11X6_AUXCTL_RV 0x00000040 /* block transfer cache disable */ #define ARM11X6_AUXCTL_CZ 0x00000080 /* restrict cache size */ /* ARM1136 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM1136_AUXCTL_PFI 0x80000000 /* PFI: partial FI mode. */ /* This is an undocumented flag * used to work around a cache bug * in r0 steppings. See errata * 364296. */ /* ARM1176 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM1176_AUXCTL_PHD 0x10000000 /* inst. prefetch halting disable */ #define ARM1176_AUXCTL_BFD 0x20000000 /* branch folding disable */ #define ARM1176_AUXCTL_FSD 0x40000000 /* force speculative ops disable */ #define ARM1176_AUXCTL_FIO 0x80000000 /* low intr latency override */ /* XScale Auxillary Control Register (CP15 register 1, opcode2 1) */ #define XSCALE_AUXCTL_K 0x00000001 /* dis. write buffer coalescing */ #define XSCALE_AUXCTL_P 0x00000002 /* ECC protect page table access */ /* Note: XSCale core 3 uses those for LLR DCcahce attributes */ #define XSCALE_AUXCTL_MD_WB_RA 0x00000000 /* mini-D$ wb, read-allocate */ #define XSCALE_AUXCTL_MD_WB_RWA 0x00000010 /* mini-D$ wb, read/write-allocate */ #define XSCALE_AUXCTL_MD_WT 0x00000020 /* mini-D$ wt, read-allocate */ #define XSCALE_AUXCTL_MD_MASK 0x00000030 /* Xscale Core 3 only */ #define XSCALE_AUXCTL_LLR 0x00000400 /* Enable L2 for LLR Cache */ /* Marvell Extra Features Register (CP15 register 1, opcode2 0) */ #define MV_DC_REPLACE_LOCK 0x80000000 /* Replace DCache Lock */ #define MV_DC_STREAM_ENABLE 0x20000000 /* DCache Streaming Switch */ #define MV_WA_ENABLE 0x10000000 /* Enable Write Allocate */ #define MV_L2_PREFETCH_DISABLE 0x01000000 /* L2 Cache Prefetch Disable */ #define MV_L2_INV_EVICT_ERR 0x00800000 /* L2 Invalidates Uncorrectable Error Line Eviction */ #define MV_L2_ENABLE 0x00400000 /* L2 Cache enable */ #define MV_IC_REPLACE_LOCK 0x00080000 /* Replace ICache Lock */ #define MV_BGH_ENABLE 0x00040000 /* Branch Global History Register Enable */ #define MV_BTB_DISABLE 0x00020000 /* Branch Target Buffer Disable */ #define MV_L1_PARERR_ENABLE 0x00010000 /* L1 Parity Error Enable */ /* Cache type register definitions */ #define CPU_CT_ISIZE(x) ((x) & 0xfff) /* I$ info */ #define CPU_CT_DSIZE(x) (((x) >> 12) & 0xfff) /* D$ info */ #define CPU_CT_S (1U << 24) /* split cache */ #define CPU_CT_CTYPE(x) (((x) >> 25) & 0xf) /* cache type */ #define CPU_CT_FORMAT(x) ((x) >> 29) /* Cache type register definitions for ARM v7 */ #define CPU_CT_IMINLINE(x) ((x) & 0xf) /* I$ min line size */ #define CPU_CT_DMINLINE(x) (((x) >> 16) & 0xf) /* D$ min line size */ #define CPU_CT_CTYPE_WT 0 /* write-through */ #define CPU_CT_CTYPE_WB1 1 /* write-back, clean w/ read */ #define CPU_CT_CTYPE_WB2 2 /* w/b, clean w/ cp15,7 */ #define CPU_CT_CTYPE_WB6 6 /* w/b, cp15,7, lockdown fmt A */ #define CPU_CT_CTYPE_WB7 7 /* w/b, cp15,7, lockdown fmt B */ #define CPU_CT_xSIZE_LEN(x) ((x) & 0x3) /* line size */ #define CPU_CT_xSIZE_M (1U << 2) /* multiplier */ #define CPU_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x7) /* associativity */ #define CPU_CT_xSIZE_SIZE(x) (((x) >> 6) & 0x7) /* size */ #define CPU_CT_ARMV7 0x4 /* ARM v7 Cache type definitions */ #define CPUV7_CT_CTYPE_WT (1U << 31) #define CPUV7_CT_CTYPE_WB (1 << 30) #define CPUV7_CT_CTYPE_RA (1 << 29) #define CPUV7_CT_CTYPE_WA (1 << 28) #define CPUV7_CT_xSIZE_LEN(x) ((x) & 0x7) /* line size */ #define CPUV7_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x3ff) /* associativity */ #define CPUV7_CT_xSIZE_SET(x) (((x) >> 13) & 0x7fff) /* num sets */ #define CPUV7_L2CTLR_NPROC_SHIFT 24 #define CPUV7_L2CTLR_NPROC(r) ((((r) >> CPUV7_L2CTLR_NPROC_SHIFT) & 3) + 1) #define CPU_CLIDR_CTYPE(reg,x) (((reg) >> ((x) * 3)) & 0x7) #define CPU_CLIDR_LOUIS(reg) (((reg) >> 21) & 0x7) #define CPU_CLIDR_LOC(reg) (((reg) >> 24) & 0x7) #define CPU_CLIDR_LOUU(reg) (((reg) >> 27) & 0x7) #define CACHE_ICACHE 1 #define CACHE_DCACHE 2 #define CACHE_SEP_CACHE 3 #define CACHE_UNI_CACHE 4 /* Fault status register definitions */ #define FAULT_USER 0x10 #if __ARM_ARCH < 6 #define FAULT_TYPE_MASK 0x0f #define FAULT_WRTBUF_0 0x00 /* Vector Exception */ #define FAULT_WRTBUF_1 0x02 /* Terminal Exception */ #define FAULT_BUSERR_0 0x04 /* External Abort on Linefetch -- Section */ #define FAULT_BUSERR_1 0x06 /* External Abort on Linefetch -- Page */ #define FAULT_BUSERR_2 0x08 /* External Abort on Non-linefetch -- Section */ #define FAULT_BUSERR_3 0x0a /* External Abort on Non-linefetch -- Page */ #define FAULT_BUSTRNL1 0x0c /* External abort on Translation -- Level 1 */ #define FAULT_BUSTRNL2 0x0e /* External abort on Translation -- Level 2 */ #define FAULT_ALIGN_0 0x01 /* Alignment */ #define FAULT_ALIGN_1 0x03 /* Alignment */ #define FAULT_TRANS_S 0x05 /* Translation -- Section */ #define FAULT_TRANS_F 0x06 /* Translation -- Flag */ #define FAULT_TRANS_P 0x07 /* Translation -- Page */ #define FAULT_DOMAIN_S 0x09 /* Domain -- Section */ #define FAULT_DOMAIN_P 0x0b /* Domain -- Page */ #define FAULT_PERM_S 0x0d /* Permission -- Section */ #define FAULT_PERM_P 0x0f /* Permission -- Page */ #define FAULT_IMPRECISE 0x400 /* Imprecise exception (XSCALE) */ #define FAULT_EXTERNAL 0x400 /* External abort (armv6+) */ #define FAULT_WNR 0x800 /* Write-not-Read access (armv6+) */ #else /* __ARM_ARCH < 6 */ #define FAULT_ALIGN 0x001 /* Alignment Fault */ #define FAULT_DEBUG 0x002 /* Debug Event */ #define FAULT_ACCESS_L1 0x003 /* Access Bit (L1) */ #define FAULT_ICACHE 0x004 /* Instruction cache maintenance */ #define FAULT_TRAN_L1 0x005 /* Translation Fault (L1) */ #define FAULT_ACCESS_L2 0x006 /* Access Bit (L2) */ #define FAULT_TRAN_L2 0x007 /* Translation Fault (L2) */ #define FAULT_EA_PREC 0x008 /* External Abort */ #define FAULT_DOMAIN_L1 0x009 /* Domain Fault (L1) */ #define FAULT_DOMAIN_L2 0x00B /* Domain Fault (L2) */ #define FAULT_EA_TRAN_L1 0x00C /* External Translation Abort (L1) */ #define FAULT_PERM_L1 0x00D /* Permission Fault (L1) */ #define FAULT_EA_TRAN_L2 0x00E /* External Translation Abort (L2) */ #define FAULT_PERM_L2 0x00F /* Permission Fault (L2) */ #define FAULT_TLB_CONFLICT 0x010 /* Permission Fault (L2) */ #define FAULT_EA_IMPREC 0x016 /* Asynchronous External Abort */ #define FAULT_PE_IMPREC 0x018 /* Asynchronous Parity Error */ #define FAULT_PARITY 0x019 /* Parity Error */ #define FAULT_PE_TRAN_L1 0x01C /* Parity Error on Translation (L1) */ #define FAULT_PE_TRAN_L2 0x01E /* Parity Error on Translation (L2) */ #define FSR_TO_FAULT(fsr) (((fsr) & 0xF) | \ ((((fsr) & (1 << 10)) >> (10 - 4)))) #define FSR_LPAE (1 << 9) /* LPAE indicator */ #define FSR_WNR (1 << 11) /* Write-not-Read access */ #define FSR_EXT (1 << 12) /* DECERR/SLVERR for external*/ #define FSR_CM (1 << 13) /* Cache maintenance fault */ #endif /* !__ARM_ARCH < 6 */ /* * Address of the vector page, low and high versions. */ #ifndef __ASSEMBLER__ #define ARM_VECTORS_LOW 0x00000000U #define ARM_VECTORS_HIGH 0xffff0000U #else #define ARM_VECTORS_LOW 0 #define ARM_VECTORS_HIGH 0xffff0000 #endif /* * ARM Instructions * * 3 3 2 2 2 * 1 0 9 8 7 0 * +-------+-------------------------------------------------------+ * | cond | instruction dependant | * |c c c c| | * +-------+-------------------------------------------------------+ */ #define INSN_SIZE 4 /* Always 4 bytes */ #define INSN_COND_MASK 0xf0000000 /* Condition mask */ #define INSN_COND_AL 0xe0000000 /* Always condition */ +/* ARM register defines */ +#define ARM_REG_SIZE 4 +#define ARM_REG_NUM_PC 15 +#define ARM_REG_NUM_LR 14 +#define ARM_REG_NUM_SP 13 + #define THUMB_INSN_SIZE 2 /* Some are 4 bytes. */ #endif /* !MACHINE_ARMREG_H */ Index: head/sys/arm/include/db_machdep.h =================================================================== --- head/sys/arm/include/db_machdep.h (revision 290272) +++ head/sys/arm/include/db_machdep.h (revision 290273) @@ -1,98 +1,98 @@ /*- * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. * * from: FreeBSD: src/sys/i386/include/db_machdep.h,v 1.16 1999/10/04 * $FreeBSD$ */ #ifndef _MACHINE_DB_MACHDEP_H_ #define _MACHINE_DB_MACHDEP_H_ #include #include #include #define T_BREAKPOINT (1) typedef vm_offset_t db_addr_t; typedef int db_expr_t; #define PC_REGS() ((db_addr_t)kdb_thrctx->pcb_regs.sf_pc) #define BKPT_INST (KERNEL_BREAKPOINT) #define BKPT_SIZE (INSN_SIZE) #define BKPT_SET(inst) (BKPT_INST) #define BKPT_SKIP do { \ kdb_frame->tf_pc += BKPT_SIZE; \ } while (0) #define SOFTWARE_SSTEP 1 #define IS_BREAKPOINT_TRAP(type, code) (type == T_BREAKPOINT) #define IS_WATCHPOINT_TRAP(type, code) (0) #define inst_trap_return(ins) (0) /* ldmxx reg, {..., pc} 01800000 stack mode 000f0000 register 0000ffff register list */ /* mov pc, reg 0000000f register */ #define inst_return(ins) (((ins) & 0x0e108000) == 0x08108000 || \ ((ins) & 0x0ff0fff0) == 0x01a0f000 || \ ((ins) & 0x0ffffff0) == 0x012fff10) /* bx */ /* bl ... 00ffffff offset>>2 */ #define inst_call(ins) (((ins) & 0x0f000000) == 0x0b000000) /* b ... 00ffffff offset>>2 */ /* ldr pc, [pc, reg, lsl #2] 0000000f register */ #define inst_branch(ins) (((ins) & 0x0f000000) == 0x0a000000 || \ ((ins) & 0x0fdffff0) == 0x079ff100 || \ - ((ins) & 0x0cf0f000) == 0x0490f000 || \ + ((ins) & 0x0cd0f000) == 0x0490f000 || \ ((ins) & 0x0ffffff0) == 0x012fff30 || /* blx */ \ ((ins) & 0x0de0f000) == 0x0080f000) #define inst_load(ins) (0) #define inst_store(ins) (0) #define next_instr_address(pc, bd) ((bd) ? (pc) : ((pc) + INSN_SIZE)) #define DB_SMALL_VALUE_MAX (0x7fffffff) #define DB_SMALL_VALUE_MIN (-0x40001) #define DB_ELFSIZE 32 int db_validate_address(vm_offset_t); -u_int branch_taken (u_int insn, u_int pc); +u_int branch_taken (u_int insn, db_addr_t pc); #ifdef __ARMEB__ #define BYTE_MSF (1) #endif #endif /* !_MACHINE_DB_MACHDEP_H_ */ Index: head/sys/arm/include/machdep.h =================================================================== --- head/sys/arm/include/machdep.h (revision 290272) +++ head/sys/arm/include/machdep.h (revision 290273) @@ -1,46 +1,49 @@ /* $NetBSD: machdep.h,v 1.7 2002/02/21 02:52:21 thorpej Exp $ */ /* $FreeBSD$ */ #ifndef _MACHDEP_BOOT_MACHDEP_H_ #define _MACHDEP_BOOT_MACHDEP_H_ /* Structs that need to be initialised by initarm */ #ifdef ARM_NEW_PMAP extern vm_offset_t irqstack; extern vm_offset_t undstack; extern vm_offset_t abtstack; #else struct pv_addr; extern struct pv_addr irqstack; extern struct pv_addr undstack; extern struct pv_addr abtstack; #endif /* Define various stack sizes in pages */ #define IRQ_STACK_SIZE 1 #define ABT_STACK_SIZE 1 #define UND_STACK_SIZE 1 /* misc prototypes used by the many arm machdeps */ struct trapframe; void arm_lock_cache_line(vm_offset_t); void init_proc0(vm_offset_t kstack); void halt(void); void abort_handler(struct trapframe *, int ); void set_stackptrs(int cpu); void undefinedinstruction_bounce(struct trapframe *); /* Early boot related helper functions */ struct arm_boot_params; vm_offset_t default_parse_boot_param(struct arm_boot_params *abp); vm_offset_t freebsd_parse_boot_param(struct arm_boot_params *abp); vm_offset_t linux_parse_boot_param(struct arm_boot_params *abp); vm_offset_t fake_preload_metadata(struct arm_boot_params *abp); vm_offset_t parse_boot_param(struct arm_boot_params *abp); void arm_generic_initclocks(void); /* Board-specific attributes */ void board_set_serial(uint64_t); void board_set_revision(uint32_t); +int arm_predict_branch(void *, u_int, register_t, register_t *, + u_int (*)(void*, int), u_int (*)(void*, vm_offset_t, u_int*)); + #endif /* !_MACHINE_MACHDEP_H_ */ Index: head/sys/arm/include/proc.h =================================================================== --- head/sys/arm/include/proc.h (revision 290272) +++ head/sys/arm/include/proc.h (revision 290273) @@ -1,83 +1,85 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)proc.h 7.1 (Berkeley) 5/15/91 * from: FreeBSD: src/sys/i386/include/proc.h,v 1.11 2001/06/29 * $FreeBSD$ */ #ifndef _MACHINE_PROC_H_ #define _MACHINE_PROC_H_ #include struct md_utrap { utrap_entry_t *ut_precise[UT_MAX]; /* must be first */ int ut_refcnt; }; struct mdthread { int md_spinlock_count; /* (k) */ register_t md_saved_cspr; /* (k) */ register_t md_spurflt_addr; /* (k) Spurious page fault address. */ int md_ptrace_instr; int md_ptrace_addr; + int md_ptrace_instr_alt; + int md_ptrace_addr_alt; register_t md_tp; void *md_ras_start; void *md_ras_end; }; struct mdproc { struct md_utrap *md_utrap; void *md_sigtramp; }; #define KINFO_PROC_SIZE 816 #define MAXARGS 8 /* * This holds the syscall state for a single system call. * As some syscall arguments may be 64-bit aligned we need to ensure the * args value is 64-bit aligned. The ABI will then ensure any 64-bit * arguments are already correctly aligned, even if they were passed in * via registers, we just need to make sure we copy them to an aligned * buffer. */ struct syscall_args { u_int code; struct sysent *callp; register_t args[MAXARGS]; int narg; u_int nap; } __aligned(8); #endif /* !_MACHINE_PROC_H_ */