Index: head/sys/arm/arm/db_trace.c =================================================================== --- head/sys/arm/arm/db_trace.c (revision 294739) +++ head/sys/arm/arm/db_trace.c (revision 294740) @@ -1,183 +1,188 @@ /* $NetBSD: db_trace.c,v 1.8 2003/01/17 22:28:48 thorpej Exp $ */ /*- * Copyright (c) 2000, 2001 Ben Harris * Copyright (c) 1996 Scott K. Stevens * * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ +#include "opt_ddb.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include static void db_stack_trace_cmd(struct unwind_state *state) { const char *name; db_expr_t value; db_expr_t offset; c_db_sym_t sym; u_int reg, i; char *sep; uint16_t upd_mask; bool finished; finished = false; while (!finished) { finished = unwind_stack_one(state, 1); /* Print the frame details */ sym = db_search_symbol(state->start_pc, DB_STGY_ANY, &offset); if (sym == C_DB_SYM_NULL) { value = 0; name = "(null)"; } else db_symbol_values(sym, &name, &value); db_printf("%s() at ", name); db_printsym(state->start_pc, DB_STGY_PROC); db_printf("\n"); db_printf("\t pc = 0x%08x lr = 0x%08x (", state->start_pc, state->registers[LR]); db_printsym(state->registers[LR], DB_STGY_PROC); db_printf(")\n"); db_printf("\t sp = 0x%08x fp = 0x%08x", state->registers[SP], state->registers[FP]); /* Don't print the registers we have already printed */ upd_mask = state->update_mask & ~((1 << SP) | (1 << FP) | (1 << LR) | (1 << PC)); sep = "\n\t"; for (i = 0, reg = 0; upd_mask != 0; upd_mask >>= 1, reg++) { if ((upd_mask & 1) != 0) { db_printf("%s%sr%d = 0x%08x", sep, (reg < 10) ? " " : "", reg, state->registers[reg]); i++; if (i == 2) { sep = "\n\t"; i = 0; } else sep = " "; } } db_printf("\n"); if (finished) break; /* * Stop if directed to do so, or if we've unwound back to the * kernel entry point, or if the unwind function didn't change * anything (to avoid getting stuck in this loop forever). * If the latter happens, it's an indication that the unwind * information is incorrect somehow for the function named in * the last frame printed before you see the unwind failure * message (maybe it needs a STOP_UNWINDING). */ if (state->registers[PC] < VM_MIN_KERNEL_ADDRESS) { db_printf("Unable to unwind into user mode\n"); finished = true; } else if (state->update_mask == 0) { db_printf("Unwind failure (no registers changed)\n"); finished = true; } } } -/* XXX stubs */ void db_md_list_watchpoints() { + + dbg_show_watchpoint(); } int db_md_clr_watchpoint(db_expr_t addr, db_expr_t size) { - return (0); + + return (dbg_remove_watchpoint(addr, size)); } int db_md_set_watchpoint(db_expr_t addr, db_expr_t size) { - return (0); + + return (dbg_setup_watchpoint(addr, size, HW_WATCHPOINT_RW)); } int db_trace_thread(struct thread *thr, int count) { struct unwind_state state; struct pcb *ctx; if (thr != curthread) { ctx = kdb_thr_ctx(thr); state.registers[FP] = ctx->pcb_regs.sf_r11; state.registers[SP] = ctx->pcb_regs.sf_sp; state.registers[LR] = ctx->pcb_regs.sf_lr; state.registers[PC] = ctx->pcb_regs.sf_pc; db_stack_trace_cmd(&state); } else db_trace_self(); return (0); } void db_trace_self(void) { struct unwind_state state; uint32_t sp; /* Read the stack pointer */ __asm __volatile("mov %0, sp" : "=&r" (sp)); state.registers[FP] = (uint32_t)__builtin_frame_address(0); state.registers[SP] = sp; state.registers[LR] = (uint32_t)__builtin_return_address(0); state.registers[PC] = (uint32_t)db_trace_self; db_stack_trace_cmd(&state); } Index: head/sys/arm/arm/debug_monitor.c =================================================================== --- head/sys/arm/arm/debug_monitor.c (nonexistent) +++ head/sys/arm/arm/debug_monitor.c (revision 294740) @@ -0,0 +1,943 @@ +/* + * Copyright (c) 2015 Juniper Networks Inc. + * All rights reserved. + * + * Developed by Semihalf. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_ddb.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +enum dbg_t { + DBG_TYPE_BREAKPOINT = 0, + DBG_TYPE_WATCHPOINT = 1, +}; + +struct dbg_wb_conf { + enum dbg_t type; + enum dbg_access_t access; + db_addr_t address; + db_expr_t size; + u_int slot; +}; + +static int dbg_reset_state(void); +static int dbg_setup_breakpoint(db_expr_t, db_expr_t, u_int); +static int dbg_remove_breakpoint(u_int); +static u_int dbg_find_slot(enum dbg_t, db_expr_t); +static boolean_t dbg_check_slot_free(enum dbg_t, u_int); + +static int dbg_remove_xpoint(struct dbg_wb_conf *); +static int dbg_setup_xpoint(struct dbg_wb_conf *); + +static boolean_t dbg_capable; /* Indicates that machine is capable of using + HW watchpoints/breakpoints */ +static boolean_t dbg_ready[MAXCPU]; /* Debug arch. reset performed on this CPU */ + +static uint32_t dbg_model; /* Debug Arch. Model */ +static boolean_t dbg_ossr; /* OS Save and Restore implemented */ + +static uint32_t dbg_watchpoint_num; +static uint32_t dbg_breakpoint_num; + +static int dbg_ref_count_mme[MAXCPU]; /* Times monitor mode was enabled */ + +/* ID_DFR0 - Debug Feature Register 0 */ +#define ID_DFR0_CP_DEBUG_M_SHIFT 0 +#define ID_DFR0_CP_DEBUG_M_MASK (0xF << ID_DFR0_CP_DEBUG_M_SHIFT) +#define ID_DFR0_CP_DEBUG_M_NS (0x0) /* Not supported */ +#define ID_DFR0_CP_DEBUG_M_V6 (0x2) /* v6 Debug arch. CP14 access */ +#define ID_DFR0_CP_DEBUG_M_V6_1 (0x3) /* v6.1 Debug arch. CP14 access */ +#define ID_DFR0_CP_DEBUG_M_V7 (0x4) /* v7 Debug arch. CP14 access */ +#define ID_DFR0_CP_DEBUG_M_V7_1 (0x5) /* v7.1 Debug arch. CP14 access */ + +/* DBGDIDR - Debug ID Register */ +#define DBGDIDR_WRPS_SHIFT 28 +#define DBGDIDR_WRPS_MASK (0xF << DBGDIDR_WRPS_SHIFT) +#define DBGDIDR_WRPS_NUM(reg) \ + ((((reg) & DBGDIDR_WRPS_MASK) >> DBGDIDR_WRPS_SHIFT) + 1) + +#define DBGDIDR_BRPS_SHIFT 24 +#define DBGDIDR_BRPS_MASK (0xF << DBGDIDR_BRPS_SHIFT) +#define DBGDIDR_BRPS_NUM(reg) \ + ((((reg) & DBGDIDR_BRPS_MASK) >> DBGDIDR_BRPS_SHIFT) + 1) + +/* DBGPRSR - Device Powerdown and Reset Status Register */ +#define DBGPRSR_PU (1 << 0) /* Powerup status */ + +/* DBGOSLSR - OS Lock Status Register */ +#define DBGOSLSR_OSLM0 (1 << 0) + +/* DBGOSDLR - OS Double Lock Register */ +#define DBGPRSR_DLK (1 << 0) /* OS Double Lock set */ + +/* DBGDSCR - Debug Status and Control Register */ +#define DBGSCR_MDBG_EN (1 << 15) /* Monitor debug-mode enable */ + +/* DBGWVR - Watchpoint Value Register */ +#define DBGWVR_ADDR_MASK (~0x3U) + +/* Watchpoints/breakpoints control register bitfields */ +#define DBG_WB_CTRL_LEN_1 (0x1 << 5) +#define DBG_WB_CTRL_LEN_2 (0x3 << 5) +#define DBG_WB_CTRL_LEN_4 (0xf << 5) +#define DBG_WB_CTRL_LEN_8 (0xff << 5) +#define DBG_WB_CTRL_LEN_MASK(x) ((x) & (0xff << 5)) +#define DBG_WB_CTRL_EXEC (0x0 << 3) +#define DBG_WB_CTRL_LOAD (0x1 << 3) +#define DBG_WB_CTRL_STORE (0x2 << 3) +#define DBG_WB_CTRL_ACCESS_MASK(x) ((x) & (0x3 << 3)) + +/* Common for breakpoint and watchpoint */ +#define DBG_WB_CTRL_PL1 (0x1 << 1) +#define DBG_WB_CTRL_PL0 (0x2 << 1) +#define DBG_WB_CTRL_PLX_MASK(x) ((x) & (0x3 << 1)) +#define DBG_WB_CTRL_E (0x1 << 0) + +/* + * Watchpoint/breakpoint helpers + */ +#define DBG_BKPT_BT_SLOT 0 /* Slot for branch taken */ +#define DBG_BKPT_BNT_SLOT 1 /* Slot for branch not taken */ + +#define OP2_SHIFT 4 + +/* Opc2 numbers for coprocessor instructions */ +#define DBG_WB_BVR 4 +#define DBG_WB_BCR 5 +#define DBG_WB_WVR 6 +#define DBG_WB_WCR 7 + +#define DBG_REG_BASE_BVR (DBG_WB_BVR << OP2_SHIFT) +#define DBG_REG_BASE_BCR (DBG_WB_BCR << OP2_SHIFT) +#define DBG_REG_BASE_WVR (DBG_WB_WVR << OP2_SHIFT) +#define DBG_REG_BASE_WCR (DBG_WB_WCR << OP2_SHIFT) + +#define DBG_WB_READ(cn, cm, op2, val) do { \ + __asm __volatile("mrc p14, 0, %0, " #cn "," #cm "," #op2 : "=r" (val)); \ +} while (0) + +#define DBG_WB_WRITE(cn, cm, op2, val) do { \ + __asm __volatile("mcr p14, 0, %0, " #cn "," #cm "," #op2 :: "r" (val)); \ +} while (0) + +#define READ_WB_REG_CASE(op2, m, val) \ + case (((op2) << OP2_SHIFT) + m): \ + DBG_WB_READ(c0, c ## m, op2, val); \ + break + +#define WRITE_WB_REG_CASE(op2, m, val) \ + case (((op2) << OP2_SHIFT) + m): \ + DBG_WB_WRITE(c0, c ## m, op2, val); \ + break + +#define SWITCH_CASES_READ_WB_REG(op2, val) \ + READ_WB_REG_CASE(op2, 0, val); \ + READ_WB_REG_CASE(op2, 1, val); \ + READ_WB_REG_CASE(op2, 2, val); \ + READ_WB_REG_CASE(op2, 3, val); \ + READ_WB_REG_CASE(op2, 4, val); \ + READ_WB_REG_CASE(op2, 5, val); \ + READ_WB_REG_CASE(op2, 6, val); \ + READ_WB_REG_CASE(op2, 7, val); \ + READ_WB_REG_CASE(op2, 8, val); \ + READ_WB_REG_CASE(op2, 9, val); \ + READ_WB_REG_CASE(op2, 10, val); \ + READ_WB_REG_CASE(op2, 11, val); \ + READ_WB_REG_CASE(op2, 12, val); \ + READ_WB_REG_CASE(op2, 13, val); \ + READ_WB_REG_CASE(op2, 14, val); \ + READ_WB_REG_CASE(op2, 15, val) + +#define SWITCH_CASES_WRITE_WB_REG(op2, val) \ + WRITE_WB_REG_CASE(op2, 0, val); \ + WRITE_WB_REG_CASE(op2, 1, val); \ + WRITE_WB_REG_CASE(op2, 2, val); \ + WRITE_WB_REG_CASE(op2, 3, val); \ + WRITE_WB_REG_CASE(op2, 4, val); \ + WRITE_WB_REG_CASE(op2, 5, val); \ + WRITE_WB_REG_CASE(op2, 6, val); \ + WRITE_WB_REG_CASE(op2, 7, val); \ + WRITE_WB_REG_CASE(op2, 8, val); \ + WRITE_WB_REG_CASE(op2, 9, val); \ + WRITE_WB_REG_CASE(op2, 10, val); \ + WRITE_WB_REG_CASE(op2, 11, val); \ + WRITE_WB_REG_CASE(op2, 12, val); \ + WRITE_WB_REG_CASE(op2, 13, val); \ + WRITE_WB_REG_CASE(op2, 14, val); \ + WRITE_WB_REG_CASE(op2, 15, val) + +static uint32_t +dbg_wb_read_reg(int reg, int n) +{ + uint32_t val; + + val = 0; + + switch (reg + n) { + SWITCH_CASES_READ_WB_REG(DBG_WB_WVR, val); + SWITCH_CASES_READ_WB_REG(DBG_WB_WCR, val); + SWITCH_CASES_READ_WB_REG(DBG_WB_BVR, val); + SWITCH_CASES_READ_WB_REG(DBG_WB_BCR, val); + default: + db_printf( + "trying to read from CP14 reg. using wrong opc2 %d\n", + reg >> OP2_SHIFT); + } + + return (val); +} + +static void +dbg_wb_write_reg(int reg, int n, uint32_t val) +{ + + switch (reg + n) { + SWITCH_CASES_WRITE_WB_REG(DBG_WB_WVR, val); + SWITCH_CASES_WRITE_WB_REG(DBG_WB_WCR, val); + SWITCH_CASES_WRITE_WB_REG(DBG_WB_BVR, val); + SWITCH_CASES_WRITE_WB_REG(DBG_WB_BCR, val); + default: + db_printf( + "trying to write to CP14 reg. using wrong opc2 %d\n", + reg >> OP2_SHIFT); + } + isb(); +} + +boolean_t +kdb_cpu_pc_is_singlestep(db_addr_t pc) +{ + + if (dbg_find_slot(DBG_TYPE_BREAKPOINT, pc) != ~0U) + return (TRUE); + + return (FALSE); +} + +void +kdb_cpu_set_singlestep(void) +{ + db_expr_t inst; + db_addr_t pc, brpc; + uint32_t wcr; + u_int i; + + /* + * Disable watchpoints, e.g. stepping over watched instruction will + * trigger break exception instead of single-step exception and locks + * CPU on that instruction for ever. + */ + for (i = 0; i < dbg_watchpoint_num; i++) { + wcr = dbg_wb_read_reg(DBG_REG_BASE_WCR, i); + if ((wcr & DBG_WB_CTRL_E) != 0) { + dbg_wb_write_reg(DBG_REG_BASE_WCR, i, + (wcr & ~DBG_WB_CTRL_E)); + } + } + + pc = PC_REGS(); + + inst = db_get_value(pc, sizeof(pc), FALSE); + if (inst_branch(inst) || inst_call(inst) || inst_return(inst)) { + brpc = branch_taken(inst, pc); + dbg_setup_breakpoint(brpc, INSN_SIZE, DBG_BKPT_BT_SLOT); + } + pc = next_instr_address(pc, 0); + dbg_setup_breakpoint(pc, INSN_SIZE, DBG_BKPT_BNT_SLOT); +} + +void +kdb_cpu_clear_singlestep(void) +{ + uint32_t wvr, wcr; + u_int i; + + dbg_remove_breakpoint(DBG_BKPT_BT_SLOT); + dbg_remove_breakpoint(DBG_BKPT_BNT_SLOT); + + /* Restore all watchpoints */ + for (i = 0; i < dbg_watchpoint_num; i++) { + wcr = dbg_wb_read_reg(DBG_REG_BASE_WCR, i); + wvr = dbg_wb_read_reg(DBG_REG_BASE_WVR, i); + /* Watchpoint considered not empty if address value is not 0 */ + if ((wvr & DBGWVR_ADDR_MASK) != 0) { + dbg_wb_write_reg(DBG_REG_BASE_WCR, i, + (wcr | DBG_WB_CTRL_E)); + } + } +} + +int +dbg_setup_watchpoint(db_expr_t addr, db_expr_t size, enum dbg_access_t access) +{ + struct dbg_wb_conf conf; + + if (access == HW_BREAKPOINT_X) { + db_printf("Invalid access type for watchpoint: %d\n", access); + return (EINVAL); + } + + conf.address = addr; + conf.size = size; + conf.access = access; + conf.type = DBG_TYPE_WATCHPOINT; + + return (dbg_setup_xpoint(&conf)); +} + +int +dbg_remove_watchpoint(db_expr_t addr, db_expr_t size __unused) +{ + struct dbg_wb_conf conf; + + conf.address = addr; + conf.type = DBG_TYPE_WATCHPOINT; + + return (dbg_remove_xpoint(&conf)); +} + +static int +dbg_setup_breakpoint(db_expr_t addr, db_expr_t size, u_int slot) +{ + struct dbg_wb_conf conf; + + conf.address = addr; + conf.size = size; + conf.access = HW_BREAKPOINT_X; + conf.type = DBG_TYPE_BREAKPOINT; + conf.slot = slot; + + return (dbg_setup_xpoint(&conf)); +} + +static int +dbg_remove_breakpoint(u_int slot) +{ + struct dbg_wb_conf conf; + + /* Slot already cleared. Don't recurse */ + if (dbg_check_slot_free(DBG_TYPE_BREAKPOINT, slot)) + return (0); + + conf.slot = slot; + conf.type = DBG_TYPE_BREAKPOINT; + + return (dbg_remove_xpoint(&conf)); +} + +static const char * +dbg_watchtype_str(uint32_t type) +{ + + switch (type) { + case DBG_WB_CTRL_EXEC: + return ("execute"); + case DBG_WB_CTRL_STORE: + return ("write"); + case DBG_WB_CTRL_LOAD: + return ("read"); + case DBG_WB_CTRL_LOAD | DBG_WB_CTRL_STORE: + return ("read/write"); + default: + return ("invalid"); + } +} + +static int +dbg_watchtype_len(uint32_t len) +{ + + switch (len) { + case DBG_WB_CTRL_LEN_1: + return (1); + case DBG_WB_CTRL_LEN_2: + return (2); + case DBG_WB_CTRL_LEN_4: + return (4); + case DBG_WB_CTRL_LEN_8: + return (8); + default: + return (0); + } +} + +void +dbg_show_watchpoint(void) +{ + uint32_t wcr, len, type; + uint32_t addr; + boolean_t is_enabled; + int i; + + if (!dbg_capable) { + db_printf("Architecture does not support HW " + "breakpoints/watchpoints\n"); + return; + } + + db_printf("\nhardware watchpoints:\n"); + db_printf(" watch status type len address symbol\n"); + db_printf(" ----- -------- ---------- --- ---------- ------------------\n"); + for (i = 0; i < dbg_watchpoint_num; i++) { + wcr = dbg_wb_read_reg(DBG_REG_BASE_WCR, i); + if ((wcr & DBG_WB_CTRL_E) != 0) + is_enabled = TRUE; + else + is_enabled = FALSE; + + type = DBG_WB_CTRL_ACCESS_MASK(wcr); + len = DBG_WB_CTRL_LEN_MASK(wcr); + addr = dbg_wb_read_reg(DBG_REG_BASE_WVR, i) & DBGWVR_ADDR_MASK; + db_printf(" %-5d %-8s %10s %3d 0x%08x ", i, + is_enabled ? "enabled" : "disabled", + is_enabled ? dbg_watchtype_str(type) : "", + is_enabled ? dbg_watchtype_len(len) : 0, + addr); + db_printsym((db_addr_t)addr, DB_STGY_ANY); + db_printf("\n"); + } +} + +static boolean_t +dbg_check_slot_free(enum dbg_t type, u_int slot) +{ + uint32_t cr, vr; + uint32_t max; + + switch(type) { + case DBG_TYPE_BREAKPOINT: + max = dbg_breakpoint_num; + cr = DBG_REG_BASE_BCR; + vr = DBG_REG_BASE_BVR; + break; + case DBG_TYPE_WATCHPOINT: + max = dbg_watchpoint_num; + cr = DBG_REG_BASE_WCR; + vr = DBG_REG_BASE_WVR; + break; + default: + db_printf("%s: Unsupported event type %d\n", __func__, type); + return (FALSE); + } + + if (slot >= max) { + db_printf("%s: Invalid slot number %d, max %d\n", + __func__, slot, max - 1); + return (FALSE); + } + + if ((dbg_wb_read_reg(cr, slot) & DBG_WB_CTRL_E) == 0 && + (dbg_wb_read_reg(vr, slot) & DBGWVR_ADDR_MASK) == 0) + return (TRUE); + + return (FALSE); +} + +static u_int +dbg_find_free_slot(enum dbg_t type) +{ + u_int max, i; + + switch(type) { + case DBG_TYPE_BREAKPOINT: + max = dbg_breakpoint_num; + break; + case DBG_TYPE_WATCHPOINT: + max = dbg_watchpoint_num; + break; + default: + db_printf("Unsupported debug type\n"); + return (~0U); + } + + for (i = 0; i < max; i++) { + if (dbg_check_slot_free(type, i)) + return (i); + } + + return (~0U); +} + +static u_int +dbg_find_slot(enum dbg_t type, db_expr_t addr) +{ + uint32_t reg_addr, reg_ctrl; + u_int max, i; + + switch(type) { + case DBG_TYPE_BREAKPOINT: + max = dbg_breakpoint_num; + reg_addr = DBG_REG_BASE_BVR; + reg_ctrl = DBG_REG_BASE_BCR; + break; + case DBG_TYPE_WATCHPOINT: + max = dbg_watchpoint_num; + reg_addr = DBG_REG_BASE_WVR; + reg_ctrl = DBG_REG_BASE_WCR; + break; + default: + db_printf("Unsupported debug type\n"); + return (~0U); + } + + for (i = 0; i < max; i++) { + if ((dbg_wb_read_reg(reg_addr, i) == addr) && + ((dbg_wb_read_reg(reg_ctrl, i) & DBG_WB_CTRL_E) != 0)) + return (i); + } + + return (~0U); +} + +static __inline boolean_t +dbg_monitor_is_enabled(void) +{ + + return ((cp14_dbgdscrint_get() & DBGSCR_MDBG_EN) != 0); +} + +static int +dbg_enable_monitor(void) +{ + uint32_t dbg_dscr; + + /* Already enabled? Just increment reference counter and return */ + if (dbg_monitor_is_enabled()) { + dbg_ref_count_mme[PCPU_GET(cpuid)]++; + return (0); + } + + dbg_dscr = cp14_dbgdscrint_get(); + + switch (dbg_model) { + case ID_DFR0_CP_DEBUG_M_V6: + case ID_DFR0_CP_DEBUG_M_V6_1: /* fall through */ + cp14_dbgdscr_v6_set(dbg_dscr | DBGSCR_MDBG_EN); + break; + case ID_DFR0_CP_DEBUG_M_V7: /* fall through */ + case ID_DFR0_CP_DEBUG_M_V7_1: + cp14_dbgdscr_v7_set(dbg_dscr | DBGSCR_MDBG_EN); + break; + default: + break; + } + isb(); + + /* Verify that Monitor mode is set */ + if (dbg_monitor_is_enabled()) { + dbg_ref_count_mme[PCPU_GET(cpuid)]++; + return (0); + } + + return (ENXIO); +} + +static int +dbg_disable_monitor(void) +{ + uint32_t dbg_dscr; + + if (!dbg_monitor_is_enabled()) + return (0); + + if (--dbg_ref_count_mme[PCPU_GET(cpuid)] > 0) + return (0); + + dbg_dscr = cp14_dbgdscrint_get(); + switch (dbg_model) { + case ID_DFR0_CP_DEBUG_M_V6: + case ID_DFR0_CP_DEBUG_M_V6_1: /* fall through */ + dbg_dscr &= ~DBGSCR_MDBG_EN; + cp14_dbgdscr_v6_set(dbg_dscr); + break; + case ID_DFR0_CP_DEBUG_M_V7: /* fall through */ + case ID_DFR0_CP_DEBUG_M_V7_1: + dbg_dscr &= ~DBGSCR_MDBG_EN; + cp14_dbgdscr_v7_set(dbg_dscr); + break; + default: + return (ENXIO); + } + isb(); + + return (0); +} + +static int +dbg_setup_xpoint(struct dbg_wb_conf *conf) +{ + const char *typestr; + uint32_t cr_size, cr_priv, cr_access; + uint32_t reg_ctrl, reg_addr, ctrl, addr; + boolean_t is_bkpt; + u_int cpuid; + u_int i; + int err; + + if (!dbg_capable) + return (ENXIO); + + is_bkpt = (conf->type == DBG_TYPE_BREAKPOINT); + typestr = is_bkpt ? "breakpoint" : "watchpoint"; + + cpuid = PCPU_GET(cpuid); + if (!dbg_ready[cpuid]) { + err = dbg_reset_state(); + if (err != 0) + return (err); + dbg_ready[cpuid] = TRUE; + } + + if (is_bkpt) { + if (dbg_breakpoint_num == 0) { + db_printf("Breakpoints not supported on this architecture\n"); + return (ENXIO); + } + i = conf->slot; + if (!dbg_check_slot_free(DBG_TYPE_BREAKPOINT, i)) { + /* + * This should never happen. If it does it means that + * there is an erroneus scenario somewhere. Still, it can + * be done but let's inform the user. + */ + db_printf("ERROR: Breakpoint already set. Replacing...\n"); + } + } else { + i = dbg_find_free_slot(DBG_TYPE_WATCHPOINT); + if (i == ~0U) { + db_printf("Can not find slot for %s, max %d slots supported\n", + typestr, dbg_watchpoint_num); + return (ENXIO); + } + } + + /* Kernel access only */ + cr_priv = DBG_WB_CTRL_PL1; + + switch(conf->size) { + case 1: + cr_size = DBG_WB_CTRL_LEN_1; + break; + case 2: + cr_size = DBG_WB_CTRL_LEN_2; + break; + case 4: + cr_size = DBG_WB_CTRL_LEN_4; + break; + case 8: + cr_size = DBG_WB_CTRL_LEN_8; + break; + default: + db_printf("Unsupported address size for %s\n", typestr); + return (EINVAL); + } + + if (is_bkpt) { + cr_access = DBG_WB_CTRL_EXEC; + reg_ctrl = DBG_REG_BASE_BCR; + reg_addr = DBG_REG_BASE_BVR; + /* Always unlinked BKPT */ + ctrl = (cr_size | cr_access | cr_priv | DBG_WB_CTRL_E); + } else { + switch(conf->access) { + case HW_WATCHPOINT_R: + cr_access = DBG_WB_CTRL_LOAD; + break; + case HW_WATCHPOINT_W: + cr_access = DBG_WB_CTRL_STORE; + break; + case HW_WATCHPOINT_RW: + cr_access = DBG_WB_CTRL_LOAD | DBG_WB_CTRL_STORE; + break; + default: + db_printf("Unsupported exception level for %s\n", typestr); + return (EINVAL); + } + + reg_ctrl = DBG_REG_BASE_WCR; + reg_addr = DBG_REG_BASE_WVR; + ctrl = (cr_size | cr_access | cr_priv | DBG_WB_CTRL_E); + } + + addr = conf->address; + + dbg_wb_write_reg(reg_addr, i, addr); + dbg_wb_write_reg(reg_ctrl, i, ctrl); + + return (dbg_enable_monitor()); +} + +static int +dbg_remove_xpoint(struct dbg_wb_conf *conf) +{ + uint32_t reg_ctrl, reg_addr, addr; + u_int cpuid; + u_int i; + int err; + + if (!dbg_capable) + return (ENXIO); + + cpuid = PCPU_GET(cpuid); + if (!dbg_ready[cpuid]) { + err = dbg_reset_state(); + if (err != 0) + return (err); + dbg_ready[cpuid] = TRUE; + } + + addr = conf->address; + + if (conf->type == DBG_TYPE_BREAKPOINT) { + i = conf->slot; + reg_ctrl = DBG_REG_BASE_BCR; + reg_addr = DBG_REG_BASE_BVR; + } else { + i = dbg_find_slot(DBG_TYPE_WATCHPOINT, addr); + if (i == ~0U) { + db_printf("Can not find watchpoint for address 0%x\n", addr); + return (EINVAL); + } + reg_ctrl = DBG_REG_BASE_WCR; + reg_addr = DBG_REG_BASE_WVR; + } + + dbg_wb_write_reg(reg_ctrl, i, 0); + dbg_wb_write_reg(reg_addr, i, 0); + + return (dbg_disable_monitor()); +} + +static __inline uint32_t +dbg_get_debug_model(void) +{ + uint32_t dbg_m; + + dbg_m = ((cpuinfo.id_dfr0 & ID_DFR0_CP_DEBUG_M_MASK) >> + ID_DFR0_CP_DEBUG_M_SHIFT); + + return (dbg_m); +} + +static __inline boolean_t +dbg_get_ossr(void) +{ + + switch (dbg_model) { + case ID_DFR0_CP_DEBUG_M_V6_1: + if ((cp14_dbgoslsr_get() & DBGOSLSR_OSLM0) != 0) + return (TRUE); + + return (FALSE); + case ID_DFR0_CP_DEBUG_M_V7_1: + return (TRUE); + default: + return (FALSE); + } +} + +static __inline boolean_t +dbg_arch_supported(void) +{ + + switch (dbg_model) { + case ID_DFR0_CP_DEBUG_M_V6: + case ID_DFR0_CP_DEBUG_M_V6_1: + case ID_DFR0_CP_DEBUG_M_V7: + case ID_DFR0_CP_DEBUG_M_V7_1: /* fall through */ + return (TRUE); + default: + /* We only support valid v6.x/v7.x modes through CP14 */ + return (FALSE); + } +} + +static __inline uint32_t +dbg_get_wrp_num(void) +{ + uint32_t dbg_didr; + + dbg_didr = cp14_dbgdidr_get(); + + return (DBGDIDR_WRPS_NUM(dbg_didr)); +} + +static __inline uint32_t +dgb_get_brp_num(void) +{ + uint32_t dbg_didr; + + dbg_didr = cp14_dbgdidr_get(); + + return (DBGDIDR_BRPS_NUM(dbg_didr)); +} + +static int +dbg_reset_state(void) +{ + u_int cpuid; + size_t i; + int err; + + cpuid = PCPU_GET(cpuid); + err = 0; + + switch (dbg_model) { + case ID_DFR0_CP_DEBUG_M_V6: + /* v6 Debug logic reset upon power-up */ + return (0); + case ID_DFR0_CP_DEBUG_M_V6_1: + /* Is core power domain powered up? */ + if ((cp14_dbgprsr_get() & DBGPRSR_PU) == 0) + err = ENXIO; + + if (err != 0) + break; + + if (dbg_ossr) + goto vectr_clr; + break; + case ID_DFR0_CP_DEBUG_M_V7: + break; + case ID_DFR0_CP_DEBUG_M_V7_1: + /* Is double lock set? */ + if ((cp14_dbgosdlr_get() & DBGPRSR_DLK) != 0) + err = ENXIO; + + break; + default: + break; + } + + if (err != 0) { + db_printf("Debug facility locked (CPU%d)\n", cpuid); + return (err); + } + + /* + * DBGOSLAR is always implemented for v7.1 Debug Arch. however is + * optional for v7 (depends on OS save and restore support). + */ + if (((dbg_model & ID_DFR0_CP_DEBUG_M_V7_1) != 0) || dbg_ossr) { + /* + * Clear OS lock. + * Writing any other value than 0xC5ACCESS will unlock. + */ + cp14_dbgoslar_set(0); + isb(); + } + +vectr_clr: + /* + * After reset we must ensure that DBGVCR has a defined value. + * Disable all vector catch events. Safe to use - required in all + * implementations. + */ + cp14_dbgvcr_set(0); + isb(); + + /* + * We have limited number of {watch,break}points, each consists of + * two registers: + * - wcr/bcr regsiter configurates corresponding {watch,break}point + * behaviour + * - wvr/bvr register keeps address we are hunting for + * + * Reset all breakpoints and watchpoints. + */ + for (i = 0; i < dbg_watchpoint_num; ++i) { + dbg_wb_write_reg(DBG_REG_BASE_WCR, i, 0); + dbg_wb_write_reg(DBG_REG_BASE_WVR, i, 0); + } + + for (i = 0; i < dbg_breakpoint_num; ++i) { + dbg_wb_write_reg(DBG_REG_BASE_BCR, i, 0); + dbg_wb_write_reg(DBG_REG_BASE_BVR, i, 0); + } + + return (0); +} + +void +dbg_monitor_init(void) +{ + int err; + + /* Fetch ARM Debug Architecture model */ + dbg_model = dbg_get_debug_model(); + + if (!dbg_arch_supported()) { + db_printf("ARM Debug Architecture not supported\n"); + return; + } + + if (bootverbose) { + db_printf("ARM Debug Architecture %s\n", + (dbg_model == ID_DFR0_CP_DEBUG_M_V6) ? "v6" : + (dbg_model == ID_DFR0_CP_DEBUG_M_V6_1) ? "v6.1" : + (dbg_model == ID_DFR0_CP_DEBUG_M_V7) ? "v7" : + (dbg_model == ID_DFR0_CP_DEBUG_M_V7_1) ? "v7.1" : "unknown"); + } + + /* Do we have OS Save and Restore mechanism? */ + dbg_ossr = dbg_get_ossr(); + + /* Find out many breakpoints and watchpoints we can use */ + dbg_watchpoint_num = dbg_get_wrp_num(); + dbg_breakpoint_num = dgb_get_brp_num(); + + if (bootverbose) { + db_printf("%d watchpoints and %d breakpoints supported\n", + dbg_watchpoint_num, dbg_breakpoint_num); + } + + err = dbg_reset_state(); + if (err == 0) { + dbg_capable = TRUE; + return; + } + + db_printf("HW Breakpoints/Watchpoints not enabled on CPU%d\n", + PCPU_GET(cpuid)); +} Property changes on: head/sys/arm/arm/debug_monitor.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/arm/arm/machdep.c =================================================================== --- head/sys/arm/arm/machdep.c (revision 294739) +++ head/sys/arm/arm/machdep.c (revision 294740) @@ -1,1917 +1,1920 @@ /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */ /*- * Copyright (c) 2004 Olivier Houchard * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Machine dependant functions for kernel setup * * Created : 17/09/94 * Updated : 18/04/01 updated for new wscons */ #include "opt_compat.h" #include "opt_ddb.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include "opt_sched.h" #include "opt_timer.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif #ifdef DDB #include #if __ARM_ARCH >= 6 #include DB_SHOW_COMMAND(cp15, db_show_cp15) { u_int reg; reg = cp15_midr_get(); db_printf("Cpu ID: 0x%08x\n", reg); reg = cp15_ctr_get(); db_printf("Current Cache Lvl ID: 0x%08x\n",reg); reg = cp15_sctlr_get(); db_printf("Ctrl: 0x%08x\n",reg); reg = cp15_actlr_get(); db_printf("Aux Ctrl: 0x%08x\n",reg); reg = cp15_id_pfr0_get(); db_printf("Processor Feat 0: 0x%08x\n", reg); reg = cp15_id_pfr1_get(); db_printf("Processor Feat 1: 0x%08x\n", reg); reg = cp15_id_dfr0_get(); db_printf("Debug Feat 0: 0x%08x\n", reg); reg = cp15_id_afr0_get(); db_printf("Auxiliary Feat 0: 0x%08x\n", reg); reg = cp15_id_mmfr0_get(); db_printf("Memory Model Feat 0: 0x%08x\n", reg); reg = cp15_id_mmfr1_get(); db_printf("Memory Model Feat 1: 0x%08x\n", reg); reg = cp15_id_mmfr2_get(); db_printf("Memory Model Feat 2: 0x%08x\n", reg); reg = cp15_id_mmfr3_get(); db_printf("Memory Model Feat 3: 0x%08x\n", reg); reg = cp15_ttbr_get(); db_printf("TTB0: 0x%08x\n", reg); } DB_SHOW_COMMAND(vtop, db_show_vtop) { u_int reg; if (have_addr) { cp15_ats1cpr_set(addr); reg = cp15_par_get(); db_printf("Physical address reg: 0x%08x\n",reg); } else db_printf("show vtop \n"); } #endif /* __ARM_ARCH >= 6 */ #endif /* DDB */ #ifdef DEBUG #define debugf(fmt, args...) printf(fmt, ##args) #else #define debugf(fmt, args...) #endif struct pcpu __pcpu[MAXCPU]; struct pcpu *pcpup = &__pcpu[0]; static struct trapframe proc0_tf; uint32_t cpu_reset_address = 0; int cold = 1; vm_offset_t vector_page; int (*_arm_memcpy)(void *, void *, int, int) = NULL; int (*_arm_bzero)(void *, int, int) = NULL; int _min_memcpy_size = 0; int _min_bzero_size = 0; extern int *end; #ifdef FDT static char *loader_envp; vm_paddr_t pmap_pa; #ifdef ARM_NEW_PMAP vm_offset_t systempage; vm_offset_t irqstack; vm_offset_t undstack; vm_offset_t abtstack; #else /* * This is the number of L2 page tables required for covering max * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, * stacks etc.), uprounded to be divisible by 4. */ #define KERNEL_PT_MAX 78 static struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; struct pv_addr systempage; static struct pv_addr msgbufpv; struct pv_addr irqstack; struct pv_addr undstack; struct pv_addr abtstack; static struct pv_addr kernelstack; #endif #endif #if defined(LINUX_BOOT_ABI) #define LBABI_MAX_BANKS 10 uint32_t board_id; struct arm_lbabi_tag *atag_list; char linux_command_line[LBABI_MAX_COMMAND_LINE + 1]; char atags[LBABI_MAX_COMMAND_LINE * 2]; uint32_t memstart[LBABI_MAX_BANKS]; uint32_t memsize[LBABI_MAX_BANKS]; uint32_t membanks; #endif static uint32_t board_revision; /* hex representation of uint64_t */ static char board_serial[32]; SYSCTL_NODE(_hw, OID_AUTO, board, CTLFLAG_RD, 0, "Board attributes"); SYSCTL_UINT(_hw_board, OID_AUTO, revision, CTLFLAG_RD, &board_revision, 0, "Board revision"); SYSCTL_STRING(_hw_board, OID_AUTO, serial, CTLFLAG_RD, board_serial, 0, "Board serial"); int vfp_exists; SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD, &vfp_exists, 0, "Floating point support enabled"); void board_set_serial(uint64_t serial) { snprintf(board_serial, sizeof(board_serial)-1, "%016jx", serial); } void board_set_revision(uint32_t revision) { board_revision = revision; } void sendsig(catcher, ksi, mask) sig_t catcher; ksiginfo_t *ksi; sigset_t *mask; { struct thread *td; struct proc *p; struct trapframe *tf; struct sigframe *fp, frame; struct sigacts *psp; struct sysentvec *sysent; int onstack; int sig; int code; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; code = ksi->ksi_code; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_usr_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else fp = (struct sigframe *)td->td_frame->tf_usr_sp; /* make room on the stack */ fp--; /* make the stack aligned */ fp = (struct sigframe *)STACKALIGN(fp); /* Populate the siginfo frame. */ get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK ) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack = td->td_sigstk; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } /* * Build context to run handler in. We invoke the handler * directly, only returning via the trampoline. Note the * trampoline version numbers are coordinated with machine- * dependent code in libc. */ tf->tf_r0 = sig; tf->tf_r1 = (register_t)&fp->sf_si; tf->tf_r2 = (register_t)&fp->sf_uc; /* the trampoline uses r5 as the uc address */ tf->tf_r5 = (register_t)&fp->sf_uc; tf->tf_pc = (register_t)catcher; tf->tf_usr_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_usr_lr = (register_t)sysent->sv_sigcode_base; else tf->tf_usr_lr = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); /* Set the mode to enter in the signal handler */ #if __ARM_ARCH >= 7 if ((register_t)catcher & 1) tf->tf_spsr |= PSR_T; else tf->tf_spsr &= ~PSR_T; #endif CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr, tf->tf_usr_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } struct kva_md_info kmi; /* * arm32_vector_init: * * Initialize the vector page, and select whether or not to * relocate the vectors. * * NOTE: We expect the vector page to be mapped at its expected * destination. */ extern unsigned int page0[], page0_data[]; void arm_vector_init(vm_offset_t va, int which) { unsigned int *vectors = (int *) va; unsigned int *vectors_data = vectors + (page0_data - page0); int vec; /* * Loop through the vectors we're taking over, and copy the * vector's insn and data word. */ for (vec = 0; vec < ARM_NVEC; vec++) { if ((which & (1 << vec)) == 0) { /* Don't want to take over this vector. */ continue; } vectors[vec] = page0[vec]; vectors_data[vec] = page0_data[vec]; } /* Now sync the vectors. */ cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int)); vector_page = va; if (va == ARM_VECTORS_HIGH) { /* * Assume the MD caller knows what it's doing here, and * really does want the vector page relocated. * * Note: This has to be done here (and not just in * cpu_setup()) because the vector page needs to be * accessible *before* cpu_startup() is called. * Think ddb(9) ... * * NOTE: If the CPU control register is not readable, * this will totally fail! We'll just assume that * any system that has high vector support has a * readable CPU control register, for now. If we * ever encounter one that does not, we'll have to * rethink this. */ cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); } } static void cpu_startup(void *dummy) { struct pcb *pcb = thread0.td_pcb; const unsigned int mbyte = 1024 * 1024; #ifdef ARM_TP_ADDRESS #ifndef ARM_CACHE_LOCK_ENABLE vm_page_t m; #endif #endif identify_arm_cpu(); vm_ksubmap_init(&kmi); /* * Display the RAM layout. */ printf("real memory = %ju (%ju MB)\n", (uintmax_t)arm32_ptob(realmem), (uintmax_t)arm32_ptob(realmem) / mbyte); printf("avail memory = %ju (%ju MB)\n", (uintmax_t)arm32_ptob(vm_cnt.v_free_count), (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte); if (bootverbose) { arm_physmem_print_tables(); arm_devmap_print_table(); } bufinit(); vm_pager_bufferinit(); pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack + USPACE_SVC_STACK_TOP; pmap_set_pcb_pagedir(pmap_kernel(), pcb); #ifndef ARM_NEW_PMAP vector_page_setprot(VM_PROT_READ); pmap_postinit(); #endif #ifdef ARM_TP_ADDRESS #ifdef ARM_CACHE_LOCK_ENABLE pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS); arm_lock_cache_line(ARM_TP_ADDRESS); #else m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO); pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m)); #endif *(uint32_t *)ARM_RAS_START = 0; *(uint32_t *)ARM_RAS_END = 0xffffffff; #endif } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { cpu_dcache_wb_range((uintptr_t)ptr, len); #ifdef ARM_L2_PIPT cpu_l2cache_wb_range((uintptr_t)vtophys(ptr), len); #else cpu_l2cache_wb_range((uintptr_t)ptr, len); #endif } /* Get current clock frequency for the given cpu id. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { return (ENXIO); } void cpu_idle(int busy) { CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu); spinlock_enter(); #ifndef NO_EVENTTIMERS if (!busy) cpu_idleclock(); #endif if (!sched_runnable()) cpu_sleep(0); #ifndef NO_EVENTTIMERS if (!busy) cpu_activeclock(); #endif spinlock_exit(); CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu); } int cpu_idle_wakeup(int cpu) { return (0); } /* * Most ARM platforms don't need to do anything special to init their clocks * (they get intialized during normal device attachment), and by not defining a * cpu_initclocks() function they get this generic one. Any platform that needs * to do something special can just provide their own implementation, which will * override this one due to the weak linkage. */ void arm_generic_initclocks(void) { #ifndef NO_EVENTTIMERS #ifdef SMP if (PCPU_GET(cpuid) == 0) cpu_initclocks_bsp(); else cpu_initclocks_ap(); #else cpu_initclocks_bsp(); #endif #endif } __weak_reference(arm_generic_initclocks, cpu_initclocks); int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *tf = td->td_frame; bcopy(&tf->tf_r0, regs->r, sizeof(regs->r)); regs->r_sp = tf->tf_usr_sp; regs->r_lr = tf->tf_usr_lr; regs->r_pc = tf->tf_pc; regs->r_cpsr = tf->tf_spsr; return (0); } int fill_fpregs(struct thread *td, struct fpreg *regs) { bzero(regs, sizeof(*regs)); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *tf = td->td_frame; bcopy(regs->r, &tf->tf_r0, sizeof(regs->r)); tf->tf_usr_sp = regs->r_sp; tf->tf_usr_lr = regs->r_lr; tf->tf_pc = regs->r_pc; tf->tf_spsr &= ~PSR_FLAGS; tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS; return (0); } int set_fpregs(struct thread *td, struct fpreg *regs) { return (0); } int fill_dbregs(struct thread *td, struct dbreg *regs) { return (0); } int set_dbregs(struct thread *td, struct dbreg *regs) { return (0); } static int ptrace_read_int(struct thread *td, vm_offset_t addr, uint32_t *v) { if (proc_readmem(td, td->td_proc, addr, v, sizeof(*v)) != sizeof(*v)) return (ENOMEM); return (0); } static int ptrace_write_int(struct thread *td, vm_offset_t addr, uint32_t v) { if (proc_writemem(td, td->td_proc, addr, &v, sizeof(v)) != sizeof(v)) return (ENOMEM); return (0); } static u_int ptrace_get_usr_reg(void *cookie, int reg) { int ret; struct thread *td = cookie; KASSERT(((reg >= 0) && (reg <= ARM_REG_NUM_PC)), ("reg is outside range")); switch(reg) { case ARM_REG_NUM_PC: ret = td->td_frame->tf_pc; break; case ARM_REG_NUM_LR: ret = td->td_frame->tf_usr_lr; break; case ARM_REG_NUM_SP: ret = td->td_frame->tf_usr_sp; break; default: ret = *((register_t*)&td->td_frame->tf_r0 + reg); break; } return (ret); } static u_int ptrace_get_usr_int(void* cookie, vm_offset_t offset, u_int* val) { struct thread *td = cookie; u_int error; error = ptrace_read_int(td, offset, val); return (error); } /** * This function parses current instruction opcode and decodes * any possible jump (change in PC) which might occur after * the instruction is executed. * * @param td Thread structure of analysed task * @param cur_instr Currently executed instruction * @param alt_next_address Pointer to the variable where * the destination address of the * jump instruction shall be stored. * * @return <0> when jump is possible * otherwise */ static int ptrace_get_alternative_next(struct thread *td, uint32_t cur_instr, uint32_t *alt_next_address) { int error; if (inst_branch(cur_instr) || inst_call(cur_instr) || inst_return(cur_instr)) { error = arm_predict_branch(td, cur_instr, td->td_frame->tf_pc, alt_next_address, ptrace_get_usr_reg, ptrace_get_usr_int); return (error); } return (EINVAL); } int ptrace_single_step(struct thread *td) { struct proc *p; int error, error_alt; uint32_t cur_instr, alt_next = 0; /* TODO: This needs to be updated for Thumb-2 */ if ((td->td_frame->tf_spsr & PSR_T) != 0) return (EINVAL); KASSERT(td->td_md.md_ptrace_instr == 0, ("Didn't clear single step")); KASSERT(td->td_md.md_ptrace_instr_alt == 0, ("Didn't clear alternative single step")); p = td->td_proc; PROC_UNLOCK(p); error = ptrace_read_int(td, td->td_frame->tf_pc, &cur_instr); if (error) goto out; error = ptrace_read_int(td, td->td_frame->tf_pc + INSN_SIZE, &td->td_md.md_ptrace_instr); if (error == 0) { error = ptrace_write_int(td, td->td_frame->tf_pc + INSN_SIZE, PTRACE_BREAKPOINT); if (error) { td->td_md.md_ptrace_instr = 0; } else { td->td_md.md_ptrace_addr = td->td_frame->tf_pc + INSN_SIZE; } } error_alt = ptrace_get_alternative_next(td, cur_instr, &alt_next); if (error_alt == 0) { error_alt = ptrace_read_int(td, alt_next, &td->td_md.md_ptrace_instr_alt); if (error_alt) { td->td_md.md_ptrace_instr_alt = 0; } else { error_alt = ptrace_write_int(td, alt_next, PTRACE_BREAKPOINT); if (error_alt) td->td_md.md_ptrace_instr_alt = 0; else td->td_md.md_ptrace_addr_alt = alt_next; } } out: PROC_LOCK(p); return ((error != 0) && (error_alt != 0)); } int ptrace_clear_single_step(struct thread *td) { struct proc *p; /* TODO: This needs to be updated for Thumb-2 */ if ((td->td_frame->tf_spsr & PSR_T) != 0) return (EINVAL); if (td->td_md.md_ptrace_instr != 0) { p = td->td_proc; PROC_UNLOCK(p); ptrace_write_int(td, td->td_md.md_ptrace_addr, td->td_md.md_ptrace_instr); PROC_LOCK(p); td->td_md.md_ptrace_instr = 0; } if (td->td_md.md_ptrace_instr_alt != 0) { p = td->td_proc; PROC_UNLOCK(p); ptrace_write_int(td, td->td_md.md_ptrace_addr_alt, td->td_md.md_ptrace_instr_alt); PROC_LOCK(p); td->td_md.md_ptrace_instr_alt = 0; } return (0); } int ptrace_set_pc(struct thread *td, unsigned long addr) { td->td_frame->tf_pc = addr; return (0); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { } void spinlock_enter(void) { struct thread *td; register_t cspr; td = curthread; if (td->td_md.md_spinlock_count == 0) { cspr = disable_interrupts(PSR_I | PSR_F); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_cspr = cspr; } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t cspr; td = curthread; critical_exit(); cspr = td->td_md.md_saved_cspr; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) restore_interrupts(cspr); } /* * Clear registers on exec */ void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf = td->td_frame; memset(tf, 0, sizeof(*tf)); tf->tf_usr_sp = stack; tf->tf_usr_lr = imgp->entry_addr; tf->tf_svc_lr = 0x77777777; tf->tf_pc = imgp->entry_addr; tf->tf_spsr = PSR_USR32_MODE; } /* * Get machine context. */ int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; __greg_t *gr = mcp->__gregs; if (clear_ret & GET_MC_CLEAR_RET) { gr[_REG_R0] = 0; gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C; } else { gr[_REG_R0] = tf->tf_r0; gr[_REG_CPSR] = tf->tf_spsr; } gr[_REG_R1] = tf->tf_r1; gr[_REG_R2] = tf->tf_r2; gr[_REG_R3] = tf->tf_r3; gr[_REG_R4] = tf->tf_r4; gr[_REG_R5] = tf->tf_r5; gr[_REG_R6] = tf->tf_r6; gr[_REG_R7] = tf->tf_r7; gr[_REG_R8] = tf->tf_r8; gr[_REG_R9] = tf->tf_r9; gr[_REG_R10] = tf->tf_r10; gr[_REG_R11] = tf->tf_r11; gr[_REG_R12] = tf->tf_r12; gr[_REG_SP] = tf->tf_usr_sp; gr[_REG_LR] = tf->tf_usr_lr; gr[_REG_PC] = tf->tf_pc; return (0); } /* * Set machine context. * * However, we don't set any but the user modifiable flags, and we won't * touch the cs selector. */ int set_mcontext(struct thread *td, mcontext_t *mcp) { struct trapframe *tf = td->td_frame; const __greg_t *gr = mcp->__gregs; tf->tf_r0 = gr[_REG_R0]; tf->tf_r1 = gr[_REG_R1]; tf->tf_r2 = gr[_REG_R2]; tf->tf_r3 = gr[_REG_R3]; tf->tf_r4 = gr[_REG_R4]; tf->tf_r5 = gr[_REG_R5]; tf->tf_r6 = gr[_REG_R6]; tf->tf_r7 = gr[_REG_R7]; tf->tf_r8 = gr[_REG_R8]; tf->tf_r9 = gr[_REG_R9]; tf->tf_r10 = gr[_REG_R10]; tf->tf_r11 = gr[_REG_R11]; tf->tf_r12 = gr[_REG_R12]; tf->tf_usr_sp = gr[_REG_SP]; tf->tf_usr_lr = gr[_REG_LR]; tf->tf_pc = gr[_REG_PC]; tf->tf_spsr = gr[_REG_CPSR]; return (0); } /* * MPSAFE */ int sys_sigreturn(td, uap) struct thread *td; struct sigreturn_args /* { const struct __ucontext *sigcntxp; } */ *uap; { ucontext_t uc; int spsr; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); /* * Make sure the processor mode has not been tampered with and * interrupts have not been disabled. */ spsr = uc.uc_mcontext.__gregs[_REG_CPSR]; if ((spsr & PSR_MODE) != PSR_USR32_MODE || (spsr & (PSR_I | PSR_F)) != 0) return (EINVAL); /* Restore register context. */ set_mcontext(td, &uc.uc_mcontext); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_regs.sf_r4 = tf->tf_r4; pcb->pcb_regs.sf_r5 = tf->tf_r5; pcb->pcb_regs.sf_r6 = tf->tf_r6; pcb->pcb_regs.sf_r7 = tf->tf_r7; pcb->pcb_regs.sf_r8 = tf->tf_r8; pcb->pcb_regs.sf_r9 = tf->tf_r9; pcb->pcb_regs.sf_r10 = tf->tf_r10; pcb->pcb_regs.sf_r11 = tf->tf_r11; pcb->pcb_regs.sf_r12 = tf->tf_r12; pcb->pcb_regs.sf_pc = tf->tf_pc; pcb->pcb_regs.sf_lr = tf->tf_usr_lr; pcb->pcb_regs.sf_sp = tf->tf_usr_sp; } /* * Fake up a boot descriptor table */ vm_offset_t fake_preload_metadata(struct arm_boot_params *abp __unused) { #ifdef DDB vm_offset_t zstart = 0, zend = 0; #endif vm_offset_t lastaddr; int i = 0; static uint32_t fake_preload[35]; fake_preload[i++] = MODINFO_NAME; fake_preload[i++] = strlen("kernel") + 1; strcpy((char*)&fake_preload[i++], "kernel"); i += 1; fake_preload[i++] = MODINFO_TYPE; fake_preload[i++] = strlen("elf kernel") + 1; strcpy((char*)&fake_preload[i++], "elf kernel"); i += 2; fake_preload[i++] = MODINFO_ADDR; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = KERNVIRTADDR; fake_preload[i++] = MODINFO_SIZE; fake_preload[i++] = sizeof(uint32_t); fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR; #ifdef DDB if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) { fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4); fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8); lastaddr = *(uint32_t *)(KERNVIRTADDR + 8); zend = lastaddr; zstart = *(uint32_t *)(KERNVIRTADDR + 4); db_fetch_ksymtab(zstart, zend); } else #endif lastaddr = (vm_offset_t)&end; fake_preload[i++] = 0; fake_preload[i] = 0; preload_metadata = (void *)fake_preload; init_static_kenv(NULL, 0); return (lastaddr); } void pcpu0_init(void) { #if __ARM_ARCH >= 6 set_curthread(&thread0); #endif pcpu_init(pcpup, 0, sizeof(struct pcpu)); PCPU_SET(curthread, &thread0); } #if defined(LINUX_BOOT_ABI) vm_offset_t linux_parse_boot_param(struct arm_boot_params *abp) { struct arm_lbabi_tag *walker; uint32_t revision; uint64_t serial; /* * Linux boot ABI: r0 = 0, r1 is the board type (!= 0) and r2 * is atags or dtb pointer. If all of these aren't satisfied, * then punt. */ if (!(abp->abp_r0 == 0 && abp->abp_r1 != 0 && abp->abp_r2 != 0)) return 0; board_id = abp->abp_r1; walker = (struct arm_lbabi_tag *) (abp->abp_r2 + KERNVIRTADDR - abp->abp_physaddr); /* xxx - Need to also look for binary device tree */ if (ATAG_TAG(walker) != ATAG_CORE) return 0; atag_list = walker; while (ATAG_TAG(walker) != ATAG_NONE) { switch (ATAG_TAG(walker)) { case ATAG_CORE: break; case ATAG_MEM: arm_physmem_hardware_region(walker->u.tag_mem.start, walker->u.tag_mem.size); break; case ATAG_INITRD2: break; case ATAG_SERIAL: serial = walker->u.tag_sn.low | ((uint64_t)walker->u.tag_sn.high << 32); board_set_serial(serial); break; case ATAG_REVISION: revision = walker->u.tag_rev.rev; board_set_revision(revision); break; case ATAG_CMDLINE: /* XXX open question: Parse this for boothowto? */ bcopy(walker->u.tag_cmd.command, linux_command_line, ATAG_SIZE(walker)); break; default: break; } walker = ATAG_NEXT(walker); } /* Save a copy for later */ bcopy(atag_list, atags, (char *)walker - (char *)atag_list + ATAG_SIZE(walker)); init_static_kenv(NULL, 0); return fake_preload_metadata(abp); } #endif #if defined(FREEBSD_BOOT_LOADER) vm_offset_t freebsd_parse_boot_param(struct arm_boot_params *abp) { vm_offset_t lastaddr = 0; void *mdp; void *kmdp; #ifdef DDB vm_offset_t ksym_start; vm_offset_t ksym_end; #endif /* * Mask metadata pointer: it is supposed to be on page boundary. If * the first argument (mdp) doesn't point to a valid address the * bootloader must have passed us something else than the metadata * ptr, so we give up. Also give up if we cannot find metadta section * the loader creates that we get all this data out of. */ if ((mdp = (void *)(abp->abp_r0 & ~PAGE_MASK)) == NULL) return 0; preload_metadata = mdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) return 0; boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); loader_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); init_static_kenv(loader_envp, 0); lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); #ifdef DDB ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); db_fetch_ksymtab(ksym_start, ksym_end); #endif return lastaddr; } #endif vm_offset_t default_parse_boot_param(struct arm_boot_params *abp) { vm_offset_t lastaddr; #if defined(LINUX_BOOT_ABI) if ((lastaddr = linux_parse_boot_param(abp)) != 0) return lastaddr; #endif #if defined(FREEBSD_BOOT_LOADER) if ((lastaddr = freebsd_parse_boot_param(abp)) != 0) return lastaddr; #endif /* Fall back to hardcoded metadata. */ lastaddr = fake_preload_metadata(abp); return lastaddr; } /* * Stub version of the boot parameter parsing routine. We are * called early in initarm, before even VM has been initialized. * This routine needs to preserve any data that the boot loader * has passed in before the kernel starts to grow past the end * of the BSS, traditionally the place boot-loaders put this data. * * Since this is called so early, things that depend on the vm system * being setup (including access to some SoC's serial ports), about * all that can be done in this routine is to copy the arguments. * * This is the default boot parameter parsing routine. Individual * kernels/boards can override this weak function with one of their * own. We just fake metadata... */ __weak_reference(default_parse_boot_param, parse_boot_param); /* * Initialize proc0 */ void init_proc0(vm_offset_t kstack) { proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1; thread0.td_pcb->pcb_flags = 0; thread0.td_pcb->pcb_vfpcpu = -1; thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; } int arm_predict_branch(void *cookie, u_int insn, register_t pc, register_t *new_pc, u_int (*fetch_reg)(void*, int), u_int (*read_int)(void*, vm_offset_t, u_int*)) { u_int addr, nregs, offset = 0; int error = 0; switch ((insn >> 24) & 0xf) { case 0x2: /* add pc, reg1, #value */ case 0x0: /* add pc, reg1, reg2, lsl #offset */ addr = fetch_reg(cookie, (insn >> 16) & 0xf); if (((insn >> 16) & 0xf) == 15) addr += 8; if (insn & 0x0200000) { offset = (insn >> 7) & 0x1e; offset = (insn & 0xff) << (32 - offset) | (insn & 0xff) >> offset; } else { offset = fetch_reg(cookie, insn & 0x0f); if ((insn & 0x0000ff0) != 0x00000000) { if (insn & 0x10) nregs = fetch_reg(cookie, (insn >> 8) & 0xf); else nregs = (insn >> 7) & 0x1f; switch ((insn >> 5) & 3) { case 0: /* lsl */ offset = offset << nregs; break; case 1: /* lsr */ offset = offset >> nregs; break; default: break; /* XXX */ } } *new_pc = addr + offset; return (0); } case 0xa: /* b ... */ case 0xb: /* bl ... */ addr = ((insn << 2) & 0x03ffffff); if (addr & 0x02000000) addr |= 0xfc000000; *new_pc = (pc + 8 + addr); return (0); case 0x7: /* ldr pc, [pc, reg, lsl #2] */ addr = fetch_reg(cookie, insn & 0xf); addr = pc + 8 + (addr << 2); error = read_int(cookie, addr, &addr); *new_pc = addr; return (error); case 0x1: /* mov pc, reg */ *new_pc = fetch_reg(cookie, insn & 0xf); return (0); case 0x4: case 0x5: /* ldr pc, [reg] */ addr = fetch_reg(cookie, (insn >> 16) & 0xf); /* ldr pc, [reg, #offset] */ if (insn & (1 << 24)) offset = insn & 0xfff; if (insn & 0x00800000) addr += offset; else addr -= offset; error = read_int(cookie, addr, &addr); *new_pc = addr; return (error); case 0x8: /* ldmxx reg, {..., pc} */ case 0x9: addr = fetch_reg(cookie, (insn >> 16) & 0xf); nregs = (insn & 0x5555) + ((insn >> 1) & 0x5555); nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333); nregs = (nregs + (nregs >> 4)) & 0x0f0f; nregs = (nregs + (nregs >> 8)) & 0x001f; switch ((insn >> 23) & 0x3) { case 0x0: /* ldmda */ addr = addr - 0; break; case 0x1: /* ldmia */ addr = addr + 0 + ((nregs - 1) << 2); break; case 0x2: /* ldmdb */ addr = addr - 4; break; case 0x3: /* ldmib */ addr = addr + 4 + ((nregs - 1) << 2); break; } error = read_int(cookie, addr, &addr); *new_pc = addr; return (error); default: return (EINVAL); } } #ifdef ARM_NEW_PMAP void set_stackptrs(int cpu) { set_stackptr(PSR_IRQ32_MODE, irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_ABT32_MODE, abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_UND32_MODE, undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); } #else void set_stackptrs(int cpu) { set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_UND32_MODE, undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); } #endif #ifdef EFI #define efi_next_descriptor(ptr, size) \ ((struct efi_md *)(((uint8_t *) ptr) + size)) static void add_efi_map_entries(struct efi_map_header *efihdr, struct mem_region *mr, int *mrcnt, uint32_t *memsize) { struct efi_md *map, *p; const char *type; size_t efisz, memory_size; int ndesc, i, j; static const char *types[] = { "Reserved", "LoaderCode", "LoaderData", "BootServicesCode", "BootServicesData", "RuntimeServicesCode", "RuntimeServicesData", "ConventionalMemory", "UnusableMemory", "ACPIReclaimMemory", "ACPIMemoryNVS", "MemoryMappedIO", "MemoryMappedIOPortSpace", "PalCode" }; *mrcnt = 0; *memsize = 0; /* * Memory map data provided by UEFI via the GetMemoryMap * Boot Services API. */ efisz = roundup2(sizeof(struct efi_map_header), 0x10); map = (struct efi_md *)((uint8_t *)efihdr + efisz); if (efihdr->descriptor_size == 0) return; ndesc = efihdr->memory_size / efihdr->descriptor_size; if (boothowto & RB_VERBOSE) printf("%23s %12s %12s %8s %4s\n", "Type", "Physical", "Virtual", "#Pages", "Attr"); memory_size = 0; for (i = 0, j = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p, efihdr->descriptor_size)) { if (boothowto & RB_VERBOSE) { if (p->md_type <= EFI_MD_TYPE_PALCODE) type = types[p->md_type]; else type = ""; printf("%23s %012llx %12p %08llx ", type, p->md_phys, p->md_virt, p->md_pages); if (p->md_attr & EFI_MD_ATTR_UC) printf("UC "); if (p->md_attr & EFI_MD_ATTR_WC) printf("WC "); if (p->md_attr & EFI_MD_ATTR_WT) printf("WT "); if (p->md_attr & EFI_MD_ATTR_WB) printf("WB "); if (p->md_attr & EFI_MD_ATTR_UCE) printf("UCE "); if (p->md_attr & EFI_MD_ATTR_WP) printf("WP "); if (p->md_attr & EFI_MD_ATTR_RP) printf("RP "); if (p->md_attr & EFI_MD_ATTR_XP) printf("XP "); if (p->md_attr & EFI_MD_ATTR_RT) printf("RUNTIME"); printf("\n"); } switch (p->md_type) { case EFI_MD_TYPE_CODE: case EFI_MD_TYPE_DATA: case EFI_MD_TYPE_BS_CODE: case EFI_MD_TYPE_BS_DATA: case EFI_MD_TYPE_FREE: /* * We're allowed to use any entry with these types. */ break; default: continue; } j++; if (j >= FDT_MEM_REGIONS) break; mr[j].mr_start = p->md_phys; mr[j].mr_size = p->md_pages * PAGE_SIZE; memory_size += mr[j].mr_size; } *mrcnt = j; *memsize = memory_size; } #endif /* EFI */ #ifdef FDT static char * kenv_next(char *cp) { if (cp != NULL) { while (*cp != 0) cp++; cp++; if (*cp == 0) cp = NULL; } return (cp); } static void print_kenv(void) { char *cp; debugf("loader passed (static) kenv:\n"); if (loader_envp == NULL) { debugf(" no env, null ptr\n"); return; } debugf(" loader_envp = 0x%08x\n", (uint32_t)loader_envp); for (cp = loader_envp; cp != NULL; cp = kenv_next(cp)) debugf(" %x %s\n", (uint32_t)cp, cp); } #ifndef ARM_NEW_PMAP void * initarm(struct arm_boot_params *abp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; struct pv_addr kernel_l1pt; struct pv_addr dpcpu; vm_offset_t dtbp, freemempos, l2_start, lastaddr; uint32_t memsize, l2size; char *env; void *kmdp; u_int l1pagetable; int i, j, err_devmap, mem_regions_sz; lastaddr = parse_boot_param(abp); arm_physmem_kernaddr = abp->abp_physaddr; memsize = 0; cpuinfo_init(); set_cpufuncs(); /* * Find the dtb passed in by the boot loader. */ kmdp = preload_search_by_type("elf kernel"); if (kmdp != NULL) dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); else dtbp = (vm_offset_t)NULL; #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not retrieved (from metadata) try * to use the statically embedded one. */ if (dtbp == (vm_offset_t)NULL) dtbp = (vm_offset_t)&fdt_static_dtb; #endif if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) panic("Cannot get physical memory regions"); arm_physmem_hardware_regions(mem_regions, mem_regions_sz); /* Grab reserved memory regions information from device tree. */ if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) arm_physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); /* Platform-specific initialisation */ platform_probe_and_attach(); pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); /* Calculate number of L2 tables needed for mapping vm_page_array */ l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); l2size = (l2size >> L1_S_SHIFT) + 1; /* * Add one table for end of kernel map, one for stacks, msgbuf and * L1 and L2 tables map and one for vectors map. */ l2size += 3; /* Make it divisible by 4 */ l2size = (l2size + 3) & ~3; freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_va, (np)); \ (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR); #define alloc_pages(var, np) \ (var) = freemempos; \ freemempos += (np * PAGE_SIZE); \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) freemempos += PAGE_SIZE; valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); for (i = 0, j = 0; i < l2size; ++i) { if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { valloc_pages(kernel_pt_table[i], L2_TABLE_SIZE / PAGE_SIZE); j = i; } else { kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + L2_TABLE_SIZE_REAL * (i - j); kernel_pt_table[i].pv_pa = kernel_pt_table[i].pv_va - KERNVIRTADDR + abp->abp_physaddr; } } /* * Allocate a page for the system page mapped to 0x00000000 * or 0xffff0000. This page will just contain the system vectors * and can be shared by all processes. */ valloc_pages(systempage, 1); /* Allocate dynamic per-cpu area. */ valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu.pv_va, 0); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU); valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU); valloc_pages(undstack, UND_STACK_SIZE * MAXCPU); valloc_pages(kernelstack, kstack_pages * MAXCPU); valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_va; /* * Try to map as much as possible of kernel text and data using * 1MB section mapping and for the rest of initial kernel address * space use L2 coarse tables. * * Link L2 tables for mapping remainder of kernel (modulo 1MB) * and kernel structures */ l2_start = lastaddr & ~(L1_S_OFFSET); for (i = 0 ; i < l2size - 1; i++) pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, &kernel_pt_table[i]); pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; /* Map kernel code and data */ pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr, (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map L1 directory and allocated L2 page tables */ pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, kernel_pt_table[0].pv_pa, L2_TABLE_SIZE_REAL * l2size, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); /* Map allocated DPCPU, stacks and msgbuf */ pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, freemempos - dpcpu.pv_va, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Link and map the vector page */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, &kernel_pt_table[l2size - 1]); pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); /* Establish static device mappings. */ err_devmap = platform_devmap_init(); arm_devmap_bootstrap(l1pagetable, NULL); vm_max_kernel_address = platform_lastaddr(); cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT); pmap_pa = kernel_l1pt.pv_pa; setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); /* * Now that proper page tables are installed, call cpu_setup() to enable * instruction and data caches and other chip-specific features. */ cpu_setup(); /* * Only after the SOC registers block is mapped we can perform device * tree fixups, as they may attempt to read parameters from hardware. */ OF_interpret("perform-fixup", 0); platform_gpio_init(); cninit(); debugf("initarm: console initialized\n"); debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); debugf(" boothowto = 0x%08x\n", boothowto); debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); print_kenv(); env = kern_getenv("kernelname"); if (env != NULL) { strlcpy(kernelname, env, sizeof(kernelname)); freeenv(env); } if (err_devmap != 0) printf("WARNING: could not fully configure devmap, error=%d\n", err_devmap); platform_late_init(); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); set_stackptrs(0); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ cpu_idcache_wbinv_all(); undefined_init(); init_proc0(kernelstack.pv_va); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); pmap_bootstrap(freemempos, &kernel_l1pt); msgbufp = (void *)msgbufpv.pv_va; msgbufinit(msgbufp, msgbufsize); mutex_init(); /* * Exclude the kernel (and all the things we allocated which immediately * follow the kernel) from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_exclude_region(abp->abp_physaddr, (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); + dbg_monitor_init(); kdb_init(); return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - sizeof(struct pcb))); } #else /* !ARM_NEW_PMAP */ void * initarm(struct arm_boot_params *abp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; vm_paddr_t lastaddr; vm_offset_t dtbp, kernelstack, dpcpu; uint32_t memsize; char *env; void *kmdp; int err_devmap, mem_regions_sz; #ifdef EFI struct efi_map_header *efihdr; #endif /* get last allocated physical address */ arm_physmem_kernaddr = abp->abp_physaddr; lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr; memsize = 0; set_cpufuncs(); cpuinfo_init(); /* * Find the dtb passed in by the boot loader. */ kmdp = preload_search_by_type("elf kernel"); dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not retrieved (from metadata) try * to use the statically embedded one. */ if (dtbp == (vm_offset_t)NULL) dtbp = (vm_offset_t)&fdt_static_dtb; #endif if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); #ifdef EFI efihdr = (struct efi_map_header *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP); if (efihdr != NULL) { add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz, &memsize); } else #endif { /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) panic("Cannot get physical memory regions"); } arm_physmem_hardware_regions(mem_regions, mem_regions_sz); /* Grab reserved memory regions information from device tree. */ if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) arm_physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); /* * Set TEX remapping registers. * Setup kernel page tables and switch to kernel L1 page table. */ pmap_set_tex(); pmap_bootstrap_prepare(lastaddr); /* * Now that proper page tables are installed, call cpu_setup() to enable * instruction and data caches and other chip-specific features. */ cpu_setup(); /* Platform-specific initialisation */ platform_probe_and_attach(); pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); /* * Allocate a page for the system page mapped to 0xffff0000 * This page will just contain the system vectors and can be * shared by all processes. */ systempage = pmap_preboot_get_pages(1); /* Map the vector page. */ pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH, 1); if (virtual_end >= ARM_VECTORS_HIGH) virtual_end = ARM_VECTORS_HIGH - 1; /* Allocate dynamic per-cpu area. */ dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu, 0); /* Allocate stacks for all modes */ irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU); abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU); undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU ); kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU); /* Allocate message buffer. */ msgbufp = (void *)pmap_preboot_get_vpages( round_page(msgbufsize) / PAGE_SIZE); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ set_stackptrs(0); mutex_init(); /* Establish static device mappings. */ err_devmap = platform_devmap_init(); arm_devmap_bootstrap(0, NULL); vm_max_kernel_address = platform_lastaddr(); /* * Only after the SOC registers block is mapped we can perform device * tree fixups, as they may attempt to read parameters from hardware. */ OF_interpret("perform-fixup", 0); platform_gpio_init(); cninit(); debugf("initarm: console initialized\n"); debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); debugf(" boothowto = 0x%08x\n", boothowto); debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); debugf(" lastaddr1: 0x%08x\n", lastaddr); print_kenv(); env = kern_getenv("kernelname"); if (env != NULL) strlcpy(kernelname, env, sizeof(kernelname)); if (err_devmap != 0) printf("WARNING: could not fully configure devmap, error=%d\n", err_devmap); platform_late_init(); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ /* Set stack for exception handlers */ undefined_init(); init_proc0(kernelstack); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); enable_interrupts(PSR_A); pmap_bootstrap(0); /* Exclude the kernel (and all the things we allocated which immediately * follow the kernel) from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_exclude_region(abp->abp_physaddr, pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); /* Init message buffer. */ msgbufinit(msgbufp, msgbufsize); + dbg_monitor_init(); kdb_init(); return ((void *)STACKALIGN(thread0.td_pcb)); } #endif /* !ARM_NEW_PMAP */ #endif /* FDT */ uint32_t (*arm_cpu_fill_vdso_timehands)(struct vdso_timehands *, struct timecounter *); uint32_t cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) { return (arm_cpu_fill_vdso_timehands != NULL ? arm_cpu_fill_vdso_timehands(vdso_th, tc) : 0); } Index: head/sys/arm/arm/trap-v6.c =================================================================== --- head/sys/arm/arm/trap-v6.c (revision 294739) +++ head/sys/arm/arm/trap-v6.c (revision 294740) @@ -1,671 +1,671 @@ /*- * Copyright 2014 Olivier Houchard * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * Copyright 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_ktrace.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KDB #include #include #endif #ifdef KDTRACE_HOOKS #include #endif extern char fusubailout[]; extern char cachebailout[]; #ifdef DEBUG int last_fault_code; /* For the benefit of pmap_fault_fixup() */ #endif struct ksig { int sig; u_long code; vm_offset_t addr; }; typedef int abort_func_t(struct trapframe *, u_int, u_int, u_int, u_int, struct thread *, struct ksig *); static abort_func_t abort_fatal; static abort_func_t abort_align; static abort_func_t abort_icache; struct abort { abort_func_t *func; const char *desc; }; /* * How are the aborts handled? * * Undefined Code: * - Always fatal as we do not know what does it mean. * Imprecise External Abort: * - Always fatal, but can be handled somehow in the future. * Now, due to PCIe buggy hardware, ignored. * Precise External Abort: * - Always fatal, but who knows in the future??? * Debug Event: * - Special handling. * External Translation Abort (L1 & L2) * - Always fatal as something is screwed up in page tables or hardware. * Domain Fault (L1 & L2): * - Always fatal as we do not play game with domains. * Alignment Fault: * - Everything should be aligned in kernel with exception of user to kernel * and vice versa data copying, so if pcb_onfault is not set, it's fatal. * We generate signal in case of abort from user mode. * Instruction cache maintenance: * - According to manual, this is translation fault during cache maintenance * operation. So, it could be really complex in SMP case and fuzzy too * for cache operations working on virtual addresses. For now, we will * consider this abort as fatal. In fact, no cache maintenance on * not mapped virtual addresses should be called. As cache maintenance * operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged, * the abort is fatal for user mode as well for now. (This is good place to * note that cache maintenance on virtual address fill TLB.) * Acces Bit (L1 & L2): * - Fast hardware emulation for kernel and user mode. * Translation Fault (L1 & L2): * - Standard fault mechanism is held including vm_fault(). * Permission Fault (L1 & L2): * - Fast hardware emulation of modify bits and in other cases, standard * fault mechanism is held including vm_fault(). */ static const struct abort aborts[] = { {abort_fatal, "Undefined Code (0x000)"}, {abort_align, "Alignment Fault"}, {abort_fatal, "Debug Event"}, {NULL, "Access Bit (L1)"}, {NULL, "Instruction cache maintenance"}, {NULL, "Translation Fault (L1)"}, {NULL, "Access Bit (L2)"}, {NULL, "Translation Fault (L2)"}, {abort_fatal, "External Abort"}, {abort_fatal, "Domain Fault (L1)"}, {abort_fatal, "Undefined Code (0x00A)"}, {abort_fatal, "Domain Fault (L2)"}, {abort_fatal, "External Translation Abort (L1)"}, {NULL, "Permission Fault (L1)"}, {abort_fatal, "External Translation Abort (L2)"}, {NULL, "Permission Fault (L2)"}, {abort_fatal, "TLB Conflict Abort"}, {abort_fatal, "Undefined Code (0x401)"}, {abort_fatal, "Undefined Code (0x402)"}, {abort_fatal, "Undefined Code (0x403)"}, {abort_fatal, "Undefined Code (0x404)"}, {abort_fatal, "Undefined Code (0x405)"}, {abort_fatal, "Asynchronous External Abort"}, {abort_fatal, "Undefined Code (0x407)"}, {abort_fatal, "Asynchronous Parity Error on Memory Access"}, {abort_fatal, "Parity Error on Memory Access"}, {abort_fatal, "Undefined Code (0x40A)"}, {abort_fatal, "Undefined Code (0x40B)"}, {abort_fatal, "Parity Error on Translation (L1)"}, {abort_fatal, "Undefined Code (0x40D)"}, {abort_fatal, "Parity Error on Translation (L2)"}, {abort_fatal, "Undefined Code (0x40F)"} }; static __inline void call_trapsignal(struct thread *td, int sig, int code, vm_offset_t addr) { ksiginfo_t ksi; CTR4(KTR_TRAP, "%s: addr: %#x, sig: %d, code: %d", __func__, addr, sig, code); /* * TODO: some info would be nice to know * if we are serving data or prefetch abort. */ ksiginfo_init_trap(&ksi); ksi.ksi_signo = sig; ksi.ksi_code = code; ksi.ksi_addr = (void *)addr; trapsignal(td, &ksi); } /* * abort_imprecise() handles the following abort: * * FAULT_EA_IMPREC - Imprecise External Abort * * The imprecise means that we don't know where the abort happened, * thus FAR is undefined. The abort should not never fire, but hot * plugging or accidental hardware failure can be the cause of it. * If the abort happens, it can even be on different (thread) context. * Without any additional support, the abort is fatal, as we do not * know what really happened. * * QQQ: Some additional functionality, like pcb_onfault but global, * can be implemented. Imprecise handlers could be registered * which tell us if the abort is caused by something they know * about. They should return one of three codes like: * FAULT_IS_MINE, * FAULT_CAN_BE_MINE, * FAULT_IS_NOT_MINE. * The handlers should be called until some of them returns * FAULT_IS_MINE value or all was called. If all handlers return * FAULT_IS_NOT_MINE value, then the abort is fatal. */ static __inline void abort_imprecise(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode) { /* * XXX - We can got imprecise abort as result of access * to not-present PCI/PCIe configuration space. */ #if 0 goto out; #endif abort_fatal(tf, FAULT_EA_IMPREC, fsr, 0, prefetch, curthread, NULL); /* * Returning from this function means that we ignore * the abort for good reason. Note that imprecise abort * could fire any time even in user mode. */ #if 0 out: if (usermode) userret(curthread, tf); #endif } /* * abort_debug() handles the following abort: * * FAULT_DEBUG - Debug Event * */ static __inline void abort_debug(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode, u_int far) { if (usermode) { struct thread *td; td = curthread; call_trapsignal(td, SIGTRAP, TRAP_BRKPT, far); userret(td, tf); } else { #ifdef KDB - kdb_trap(T_BREAKPOINT, 0, tf); + kdb_trap((prefetch) ? T_BREAKPOINT : T_WATCHPOINT, 0, tf); #else printf("No debugger in kernel.\n"); #endif } } /* * Abort handler. * * FAR, FSR, and everything what can be lost after enabling * interrupts must be grabbed before the interrupts will be * enabled. Note that when interrupts will be enabled, we * could even migrate to another CPU ... * * TODO: move quick cases to ASM */ void abort_handler(struct trapframe *tf, int prefetch) { struct thread *td; vm_offset_t far, va; int idx, rv; uint32_t fsr; struct ksig ksig; struct proc *p; struct pcb *pcb; struct vm_map *map; struct vmspace *vm; vm_prot_t ftype; bool usermode; #ifdef INVARIANTS void *onfault; #endif td = curthread; fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get(); #if __ARM_ARCH >= 7 far = (prefetch) ? cp15_ifar_get() : cp15_dfar_get(); #else far = (prefetch) ? TRAPF_PC(tf) : cp15_dfar_get(); #endif idx = FSR_TO_FAULT(fsr); usermode = TRAPF_USERMODE(tf); /* Abort came from user mode? */ if (usermode) td->td_frame = tf; CTR6(KTR_TRAP, "%s: fsr %#x (idx %u) far %#x prefetch %u usermode %d", __func__, fsr, idx, far, prefetch, usermode); /* * Firstly, handle aborts that are not directly related to mapping. */ if (__predict_false(idx == FAULT_EA_IMPREC)) { abort_imprecise(tf, fsr, prefetch, usermode); return; } if (__predict_false(idx == FAULT_DEBUG)) { abort_debug(tf, fsr, prefetch, usermode, far); return; } /* * ARM has a set of unprivileged load and store instructions * (LDRT/LDRBT/STRT/STRBT ...) which are supposed to be used in other * than user mode and OS should recognize their aborts and behave * appropriately. However, there is no way how to do that reasonably * in general unless we restrict the handling somehow. * * For now, these instructions are used only in copyin()/copyout() * like functions where usermode buffers are checked in advance that * they are not from KVA space. Thus, no action is needed here. */ #ifdef ARM_NEW_PMAP rv = pmap_fault(PCPU_GET(curpmap), far, fsr, idx, usermode); if (rv == 0) { return; } else if (rv == EFAULT) { call_trapsignal(td, SIGSEGV, SEGV_MAPERR, far); userret(td, tf); return; } #endif /* * Now, when we handled imprecise and debug aborts, the rest of * aborts should be really related to mapping. */ PCPU_INC(cnt.v_trap); #ifdef KDB if (kdb_active) { kdb_reenter(); goto out; } #endif if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) { /* * Due to both processor errata and lazy TLB invalidation when * access restrictions are removed from virtual pages, memory * accesses that are allowed by the physical mapping layer may * nonetheless cause one spurious page fault per virtual page. * When the thread is executing a "no faulting" section that * is bracketed by vm_fault_{disable,enable}_pagefaults(), * every page fault is treated as a spurious page fault, * unless it accesses the same virtual address as the most * recent page fault within the same "no faulting" section. */ if (td->td_md.md_spurflt_addr != far || (td->td_pflags & TDP_RESETSPUR) != 0) { td->td_md.md_spurflt_addr = far; td->td_pflags &= ~TDP_RESETSPUR; tlb_flush_local(far & ~PAGE_MASK); return; } } else { /* * If we get a page fault while in a critical section, then * it is most likely a fatal kernel page fault. The kernel * is already going to panic trying to get a sleep lock to * do the VM lookup, so just consider it a fatal trap so the * kernel can print out a useful trap message and even get * to the debugger. * * If we get a page fault while holding a non-sleepable * lock, then it is most likely a fatal kernel page fault. * If WITNESS is enabled, then it's going to whine about * bogus LORs with various VM locks, so just skip to the * fatal trap handling directly. */ if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL, "Kernel page fault") != 0) { abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig); return; } } /* Re-enable interrupts if they were enabled previously. */ if (td->td_md.md_spinlock_count == 0) { if (__predict_true(tf->tf_spsr & PSR_I) == 0) enable_interrupts(PSR_I); if (__predict_true(tf->tf_spsr & PSR_F) == 0) enable_interrupts(PSR_F); } p = td->td_proc; if (usermode) { td->td_pticks = 0; if (td->td_cowgen != p->p_cowgen) thread_cow_update(td); } /* Invoke the appropriate handler, if necessary. */ if (__predict_false(aborts[idx].func != NULL)) { if ((aborts[idx].func)(tf, idx, fsr, far, prefetch, td, &ksig)) goto do_trapsignal; goto out; } /* * Don't pass faulting cache operation to vm_fault(). We don't want * to handle all vm stuff at this moment. */ pcb = td->td_pcb; if (__predict_false(pcb->pcb_onfault == cachebailout)) { tf->tf_r0 = far; /* return failing address */ tf->tf_pc = (register_t)pcb->pcb_onfault; return; } /* Handle remaining I-cache aborts. */ if (idx == FAULT_ICACHE) { if (abort_icache(tf, idx, fsr, far, prefetch, td, &ksig)) goto do_trapsignal; goto out; } /* * At this point, we're dealing with one of the following aborts: * * FAULT_TRAN_xx - Translation * FAULT_PERM_xx - Permission * * These are the main virtual memory-related faults signalled by * the MMU. */ /* fusubailout is used by [fs]uswintr to avoid page faulting. */ pcb = td->td_pcb; if (__predict_false(pcb->pcb_onfault == fusubailout)) { tf->tf_r0 = EFAULT; tf->tf_pc = (register_t)pcb->pcb_onfault; return; } va = trunc_page(far); if (va >= KERNBASE) { /* * Don't allow user-mode faults in kernel address space. */ if (usermode) goto nogo; map = kernel_map; } else { /* * This is a fault on non-kernel virtual memory. If curproc * is NULL or curproc->p_vmspace is NULL the fault is fatal. */ vm = (p != NULL) ? p->p_vmspace : NULL; if (vm == NULL) goto nogo; map = &vm->vm_map; if (!usermode && (td->td_intr_nesting_level != 0 || pcb->pcb_onfault == NULL)) { abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig); return; } } ftype = (fsr & FSR_WNR) ? VM_PROT_WRITE : VM_PROT_READ; if (prefetch) ftype |= VM_PROT_EXECUTE; #ifdef DEBUG last_fault_code = fsr; #endif #ifndef ARM_NEW_PMAP if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype, usermode)) { goto out; } #endif #ifdef INVARIANTS onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; #endif /* Fault in the page. */ rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); #ifdef INVARIANTS pcb->pcb_onfault = onfault; #endif if (__predict_true(rv == KERN_SUCCESS)) goto out; nogo: if (!usermode) { if (td->td_intr_nesting_level == 0 && pcb->pcb_onfault != NULL) { tf->tf_r0 = rv; tf->tf_pc = (int)pcb->pcb_onfault; return; } CTR2(KTR_TRAP, "%s: vm_fault() failed with %d", __func__, rv); abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig); return; } ksig.sig = SIGSEGV; ksig.code = (rv == KERN_PROTECTION_FAILURE) ? SEGV_ACCERR : SEGV_MAPERR; ksig.addr = far; do_trapsignal: call_trapsignal(td, ksig.sig, ksig.code, ksig.addr); out: if (usermode) userret(td, tf); } /* * abort_fatal() handles the following data aborts: * * FAULT_DEBUG - Debug Event * FAULT_ACCESS_xx - Acces Bit * FAULT_EA_PREC - Precise External Abort * FAULT_DOMAIN_xx - Domain Fault * FAULT_EA_TRAN_xx - External Translation Abort * FAULT_EA_IMPREC - Imprecise External Abort * + all undefined codes for ABORT * * We should never see these on a properly functioning system. * * This function is also called by the other handlers if they * detect a fatal problem. * * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort. */ static int abort_fatal(struct trapframe *tf, u_int idx, u_int fsr, u_int far, u_int prefetch, struct thread *td, struct ksig *ksig) { bool usermode; const char *mode; const char *rw_mode; usermode = TRAPF_USERMODE(tf); #ifdef KDTRACE_HOOKS if (!usermode) { if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far)) return (0); } #endif mode = usermode ? "user" : "kernel"; rw_mode = fsr & FSR_WNR ? "write" : "read"; disable_interrupts(PSR_I|PSR_F); if (td != NULL) { printf("Fatal %s mode data abort: '%s' on %s\n", mode, aborts[idx].desc, rw_mode); printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr); if (idx != FAULT_EA_IMPREC) printf("%08x, ", far); else printf("Invalid, "); printf("spsr=%08x\n", tf->tf_spsr); } else { printf("Fatal %s mode prefetch abort at 0x%08x\n", mode, tf->tf_pc); printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr); } printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n", tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n", tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n", tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); printf("r12=%08x, ", tf->tf_r12); if (usermode) printf("usp=%08x, ulr=%08x", tf->tf_usr_sp, tf->tf_usr_lr); else printf("ssp=%08x, slr=%08x", tf->tf_svc_sp, tf->tf_svc_lr); printf(", pc =%08x\n\n", tf->tf_pc); #ifdef KDB if (debugger_on_panic || kdb_active) kdb_trap(fsr, 0, tf); #endif panic("Fatal abort"); /*NOTREACHED*/ } /* * abort_align() handles the following data abort: * * FAULT_ALIGN - Alignment fault * * Everything should be aligned in kernel with exception of user to kernel * and vice versa data copying, so if pcb_onfault is not set, it's fatal. * We generate signal in case of abort from user mode. */ static int abort_align(struct trapframe *tf, u_int idx, u_int fsr, u_int far, u_int prefetch, struct thread *td, struct ksig *ksig) { bool usermode; usermode = TRAPF_USERMODE(tf); if (!usermode) { if (td->td_intr_nesting_level == 0 && td != NULL && td->td_pcb->pcb_onfault != NULL) { tf->tf_r0 = EFAULT; tf->tf_pc = (int)td->td_pcb->pcb_onfault; return (0); } abort_fatal(tf, idx, fsr, far, prefetch, td, ksig); } /* Deliver a bus error signal to the process */ ksig->code = BUS_ADRALN; ksig->sig = SIGBUS; ksig->addr = far; return (1); } /* * abort_icache() handles the following data abort: * * FAULT_ICACHE - Instruction cache maintenance * * According to manual, FAULT_ICACHE is translation fault during cache * maintenance operation. In fact, no cache maintenance operation on * not mapped virtual addresses should be called. As cache maintenance * operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged, * the abort is concider as fatal for now. However, all the matter with * cache maintenance operation on virtual addresses could be really complex * and fuzzy in SMP case, so maybe in future standard fault mechanism * should be held here including vm_fault() calling. */ static int abort_icache(struct trapframe *tf, u_int idx, u_int fsr, u_int far, u_int prefetch, struct thread *td, struct ksig *ksig) { abort_fatal(tf, idx, fsr, far, prefetch, td, ksig); return(0); } Index: head/sys/arm/include/cpu-v6.h =================================================================== --- head/sys/arm/include/cpu-v6.h (revision 294739) +++ head/sys/arm/include/cpu-v6.h (revision 294740) @@ -1,612 +1,624 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MACHINE_CPU_V6_H #define MACHINE_CPU_V6_H /* There are no user serviceable parts here, they may change without notice */ #ifndef _KERNEL #error Only include this file in the kernel #else #include #include "machine/atomic.h" #include "machine/cpufunc.h" #include "machine/cpuinfo.h" #include "machine/sysreg.h" #define CPU_ASID_KERNEL 0 vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t); vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t); /* * Macros to generate CP15 (system control processor) read/write functions. */ #define _FX(s...) #s #define _RF0(fname, aname...) \ static __inline register_t \ fname(void) \ { \ register_t reg; \ __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \ return(reg); \ } #define _R64F0(fname, aname) \ static __inline uint64_t \ fname(void) \ { \ uint64_t reg; \ __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \ return(reg); \ } #define _WF0(fname, aname...) \ static __inline void \ fname(void) \ { \ __asm __volatile("mcr\t" _FX(aname)); \ } #define _WF1(fname, aname...) \ static __inline void \ fname(register_t reg) \ { \ __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \ } #define _W64F1(fname, aname...) \ static __inline void \ fname(uint64_t reg) \ { \ __asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \ } /* * Raw CP15 maintenance operations * !!! not for external use !!! */ /* TLB */ _WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */ #if __ARM_ARCH >= 7 && defined SMP _WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */ #endif _WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */ #if __ARM_ARCH >= 7 && defined SMP _WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */ #endif _WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */ #if __ARM_ARCH >= 7 && defined SMP _WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */ #endif _WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */ _WF1(_CP15_TTB_SET, CP15_TTBR0(%0)) /* Cache and Branch predictor */ _WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */ #if __ARM_ARCH >= 7 && defined SMP _WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */ #endif _WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */ _WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */ _WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */ _WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */ #if __ARM_ARCH >= 7 _WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */ #endif _WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */ _WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */ _WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */ _WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */ #if __ARM_ARCH >= 7 && defined SMP _WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */ #endif _WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */ /* * Publicly accessible functions */ +/* CP14 Debug Registers */ +_RF0(cp14_dbgdidr_get, CP14_DBGDIDR(%0)) +_RF0(cp14_dbgprsr_get, CP14_DBGPRSR(%0)) +_RF0(cp14_dbgoslsr_get, CP14_DBGOSLSR(%0)) +_RF0(cp14_dbgosdlr_get, CP14_DBGOSDLR(%0)) +_RF0(cp14_dbgdscrint_get, CP14_DBGDSCRint(%0)) + +_WF1(cp14_dbgdscr_v6_set, CP14_DBGDSCRext_V6(%0)) +_WF1(cp14_dbgdscr_v7_set, CP14_DBGDSCRext_V7(%0)) +_WF1(cp14_dbgvcr_set, CP14_DBGVCR(%0)) +_WF1(cp14_dbgoslar_set, CP14_DBGOSLAR(%0)) + /* Various control registers */ _RF0(cp15_cpacr_get, CP15_CPACR(%0)) _WF1(cp15_cpacr_set, CP15_CPACR(%0)) _RF0(cp15_dfsr_get, CP15_DFSR(%0)) _RF0(cp15_ifsr_get, CP15_IFSR(%0)) _WF1(cp15_prrr_set, CP15_PRRR(%0)) _WF1(cp15_nmrr_set, CP15_NMRR(%0)) _RF0(cp15_ttbr_get, CP15_TTBR0(%0)) _RF0(cp15_dfar_get, CP15_DFAR(%0)) #if __ARM_ARCH >= 7 _RF0(cp15_ifar_get, CP15_IFAR(%0)) _RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0)) #endif /* ARMv6+ and XScale */ _RF0(cp15_actlr_get, CP15_ACTLR(%0)) _WF1(cp15_actlr_set, CP15_ACTLR(%0)) #if __ARM_ARCH >= 6 _WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0)) _WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0)) _RF0(cp15_par_get, CP15_PAR(%0)) _RF0(cp15_sctlr_get, CP15_SCTLR(%0)) #endif /*CPU id registers */ _RF0(cp15_midr_get, CP15_MIDR(%0)) _RF0(cp15_ctr_get, CP15_CTR(%0)) _RF0(cp15_tcmtr_get, CP15_TCMTR(%0)) _RF0(cp15_tlbtr_get, CP15_TLBTR(%0)) _RF0(cp15_mpidr_get, CP15_MPIDR(%0)) _RF0(cp15_revidr_get, CP15_REVIDR(%0)) _RF0(cp15_ccsidr_get, CP15_CCSIDR(%0)) _RF0(cp15_clidr_get, CP15_CLIDR(%0)) _RF0(cp15_aidr_get, CP15_AIDR(%0)) _WF1(cp15_csselr_set, CP15_CSSELR(%0)) _RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0)) _RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0)) _RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0)) _RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0)) _RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0)) _RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0)) _RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0)) _RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0)) _RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0)) _RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0)) _RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0)) _RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0)) _RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0)) _RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0)) _RF0(cp15_cbar_get, CP15_CBAR(%0)) /* Performance Monitor registers */ #if __ARM_ARCH == 6 && defined(CPU_ARM1176) _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) _RF0(cp15_pmcr_get, CP15_PMCR(%0)) _WF1(cp15_pmcr_set, CP15_PMCR(%0)) _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) #elif __ARM_ARCH > 6 _RF0(cp15_pmcr_get, CP15_PMCR(%0)) _WF1(cp15_pmcr_set, CP15_PMCR(%0)) _RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0)) _WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0)) _WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0)) _RF0(cp15_pmovsr_get, CP15_PMOVSR(%0)) _WF1(cp15_pmovsr_set, CP15_PMOVSR(%0)) _WF1(cp15_pmswinc_set, CP15_PMSWINC(%0)) _RF0(cp15_pmselr_get, CP15_PMSELR(%0)) _WF1(cp15_pmselr_set, CP15_PMSELR(%0)) _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) _RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0)) _WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0)) _RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0)) _WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0)) _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) _RF0(cp15_pminten_get, CP15_PMINTENSET(%0)) _WF1(cp15_pminten_set, CP15_PMINTENSET(%0)) _WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0)) #endif _RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0)) _WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0)) _RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0)) _WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0)) _RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0)) _WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0)) /* Generic Timer registers - only use when you know the hardware is available */ _RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0)) _WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0)) _RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0)) _WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0)) _RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0)) _WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0)) _RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0)) _WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0)) _RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0)) _WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0)) _RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0)) _WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0)) _RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0)) _WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0)) _RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0)) _WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0)) _RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0)) _WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0)) _R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0)) _R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0)) _R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0)) _W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0)) _R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0)) _W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0)) _R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0)) _W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0)) _R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0)) _W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0)) #undef _FX #undef _RF0 #undef _WF0 #undef _WF1 #if __ARM_ARCH >= 6 /* * Cache and TLB maintenance operations for armv6+ code. The #else block * provides armv4/v5 implementations for a few of these used in common code. */ /* * TLB maintenance operations. */ /* Local (i.e. not broadcasting ) operations. */ /* Flush all TLB entries (even global). */ static __inline void tlb_flush_all_local(void) { dsb(); _CP15_TLBIALL(); dsb(); } /* Flush all not global TLB entries. */ static __inline void tlb_flush_all_ng_local(void) { dsb(); _CP15_TLBIASID(CPU_ASID_KERNEL); dsb(); } /* Flush single TLB entry (even global). */ static __inline void tlb_flush_local(vm_offset_t va) { KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); dsb(); _CP15_TLBIMVA(va | CPU_ASID_KERNEL); dsb(); } /* Flush range of TLB entries (even global). */ static __inline void tlb_flush_range_local(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, size)); dsb(); for (; va < eva; va += PAGE_SIZE) _CP15_TLBIMVA(va | CPU_ASID_KERNEL); dsb(); } /* Broadcasting operations. */ #if __ARM_ARCH >= 7 && defined SMP static __inline void tlb_flush_all(void) { dsb(); _CP15_TLBIALLIS(); dsb(); } static __inline void tlb_flush_all_ng(void) { dsb(); _CP15_TLBIASIDIS(CPU_ASID_KERNEL); dsb(); } static __inline void tlb_flush(vm_offset_t va) { KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); dsb(); _CP15_TLBIMVAAIS(va); dsb(); } static __inline void tlb_flush_range(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, size)); dsb(); for (; va < eva; va += PAGE_SIZE) _CP15_TLBIMVAAIS(va); dsb(); } #else /* SMP */ #define tlb_flush_all() tlb_flush_all_local() #define tlb_flush_all_ng() tlb_flush_all_ng_local() #define tlb_flush(va) tlb_flush_local(va) #define tlb_flush_range(va, size) tlb_flush_range_local(va, size) #endif /* SMP */ /* * Cache maintenance operations. */ /* Sync I and D caches to PoU */ static __inline void icache_sync(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { #if __ARM_ARCH >= 7 && defined SMP _CP15_DCCMVAU(va); #else _CP15_DCCMVAC(va); #endif } dsb(); #if __ARM_ARCH >= 7 && defined SMP _CP15_ICIALLUIS(); #else _CP15_ICIALLU(); #endif dsb(); isb(); } /* Invalidate I cache */ static __inline void icache_inv_all(void) { #if __ARM_ARCH >= 7 && defined SMP _CP15_ICIALLUIS(); #else _CP15_ICIALLU(); #endif dsb(); isb(); } /* Invalidate branch predictor buffer */ static __inline void bpb_inv_all(void) { #if __ARM_ARCH >= 7 && defined SMP _CP15_BPIALLIS(); #else _CP15_BPIALL(); #endif dsb(); isb(); } /* Write back D-cache to PoU */ static __inline void dcache_wb_pou(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { #if __ARM_ARCH >= 7 && defined SMP _CP15_DCCMVAU(va); #else _CP15_DCCMVAC(va); #endif } dsb(); } /* * Invalidate D-cache to PoC * * Caches are invalidated from outermost to innermost as fresh cachelines * flow in this direction. In given range, if there was no dirty cacheline * in any cache before, no stale cacheline should remain in them after this * operation finishes. */ static __inline void dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; dsb(); /* invalidate L2 first */ cpu_l2cache_inv_range(pa, size); /* then L1 */ va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); } /* * Discard D-cache lines to PoC, prior to overwrite by DMA engine. * * Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't * flow into L1 while invalidating. This routine is intended to be used only * when invalidating a buffer before a DMA operation loads new data into memory. * The concern in this case is that dirty lines are not evicted to main memory, * overwriting the DMA data. For that reason, the L1 is done first to ensure * that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned. */ static __inline void dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; /* invalidate L1 first */ dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); /* then L2 */ cpu_l2cache_inv_range(pa, size); } /* * Write back D-cache to PoC * * Caches are written back from innermost to outermost as dirty cachelines * flow in this direction. In given range, no dirty cacheline should remain * in any cache after this operation finishes. */ static __inline void dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCCMVAC(va); } dsb(); cpu_l2cache_wb_range(pa, size); } /* Write back and invalidate D-cache to PoC */ static __inline void dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) { vm_offset_t va; vm_offset_t eva = sva + size; dsb(); /* write back L1 first */ va = sva & ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCCMVAC(va); } dsb(); /* then write back and invalidate L2 */ cpu_l2cache_wbinv_range(pa, size); /* then invalidate L1 */ va = sva & ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); } /* Set TTB0 register */ static __inline void cp15_ttbr_set(uint32_t reg) { dsb(); _CP15_TTB_SET(reg); dsb(); _CP15_BPIALL(); dsb(); isb(); tlb_flush_all_ng_local(); } #else /* ! __ARM_ARCH >= 6 */ /* * armv4/5 compatibility shims. * * These functions provide armv4 cache maintenance using the new armv6 names. * Included here are just the functions actually used now in common code; it may * be necessary to add things here over time. * * The callers of the dcache functions expect these routines to handle address * and size values which are not aligned to cacheline boundaries; the armv4 and * armv5 asm code handles that. */ static __inline void dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { cpu_dcache_inv_range(va, size); cpu_l2cache_inv_range(va, size); } static __inline void dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { /* See armv6 code, above, for why we do L2 before L1 in this case. */ cpu_l2cache_inv_range(va, size); cpu_dcache_inv_range(va, size); } static __inline void dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { cpu_dcache_wb_range(va, size); cpu_l2cache_wb_range(va, size); } #endif /* __ARM_ARCH >= 6 */ #endif /* _KERNEL */ #endif /* !MACHINE_CPU_V6_H */ Index: head/sys/arm/include/db_machdep.h =================================================================== --- head/sys/arm/include/db_machdep.h (revision 294739) +++ head/sys/arm/include/db_machdep.h (revision 294740) @@ -1,98 +1,105 @@ /*- * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. * * from: FreeBSD: src/sys/i386/include/db_machdep.h,v 1.16 1999/10/04 * $FreeBSD$ */ #ifndef _MACHINE_DB_MACHDEP_H_ #define _MACHINE_DB_MACHDEP_H_ #include #include #include +#include #define T_BREAKPOINT (1) +#define T_WATCHPOINT (2) typedef vm_offset_t db_addr_t; typedef int db_expr_t; #define PC_REGS() ((db_addr_t)kdb_thrctx->pcb_regs.sf_pc) #define BKPT_INST (KERNEL_BREAKPOINT) #define BKPT_SIZE (INSN_SIZE) #define BKPT_SET(inst) (BKPT_INST) #define BKPT_SKIP do { \ kdb_frame->tf_pc += BKPT_SIZE; \ } while (0) -#define SOFTWARE_SSTEP 1 +#if __ARM_ARCH >= 6 +#define db_clear_single_step kdb_cpu_clear_singlestep +#define db_set_single_step kdb_cpu_set_singlestep +#define db_pc_is_singlestep kdb_cpu_pc_is_singlestep +#else +#define SOFTWARE_SSTEP 1 +#endif #define IS_BREAKPOINT_TRAP(type, code) (type == T_BREAKPOINT) -#define IS_WATCHPOINT_TRAP(type, code) (0) - +#define IS_WATCHPOINT_TRAP(type, code) (type == T_WATCHPOINT) #define inst_trap_return(ins) (0) /* ldmxx reg, {..., pc} 01800000 stack mode 000f0000 register 0000ffff register list */ /* mov pc, reg 0000000f register */ #define inst_return(ins) (((ins) & 0x0e108000) == 0x08108000 || \ ((ins) & 0x0ff0fff0) == 0x01a0f000 || \ ((ins) & 0x0ffffff0) == 0x012fff10) /* bx */ /* bl ... 00ffffff offset>>2 */ #define inst_call(ins) (((ins) & 0x0f000000) == 0x0b000000) /* b ... 00ffffff offset>>2 */ /* ldr pc, [pc, reg, lsl #2] 0000000f register */ #define inst_branch(ins) (((ins) & 0x0f000000) == 0x0a000000 || \ ((ins) & 0x0fdffff0) == 0x079ff100 || \ ((ins) & 0x0cd0f000) == 0x0490f000 || \ ((ins) & 0x0ffffff0) == 0x012fff30 || /* blx */ \ ((ins) & 0x0de0f000) == 0x0080f000) #define inst_load(ins) (0) #define inst_store(ins) (0) #define next_instr_address(pc, bd) ((bd) ? (pc) : ((pc) + INSN_SIZE)) #define DB_SMALL_VALUE_MAX (0x7fffffff) #define DB_SMALL_VALUE_MIN (-0x40001) #define DB_ELFSIZE 32 int db_validate_address(vm_offset_t); u_int branch_taken (u_int insn, db_addr_t pc); #ifdef __ARMEB__ #define BYTE_MSF (1) #endif #endif /* !_MACHINE_DB_MACHDEP_H_ */ Index: head/sys/arm/include/debug_monitor.h =================================================================== --- head/sys/arm/include/debug_monitor.h (nonexistent) +++ head/sys/arm/include/debug_monitor.h (revision 294740) @@ -0,0 +1,80 @@ +/*- + * Copyright (c) 2014 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Semihalf under + * the sponsorship of the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_DEBUG_MONITOR_H_ +#define _MACHINE_DEBUG_MONITOR_H_ + +#ifdef DDB + +#include + +enum dbg_access_t { + HW_BREAKPOINT_X = 0, + HW_WATCHPOINT_R = 1, + HW_WATCHPOINT_W = 2, + HW_WATCHPOINT_RW = HW_WATCHPOINT_R | HW_WATCHPOINT_W, +}; + +#if __ARM_ARCH >= 6 +void dbg_monitor_init(void); +void dbg_show_watchpoint(void); +int dbg_setup_watchpoint(db_expr_t, db_expr_t, enum dbg_access_t); +int dbg_remove_watchpoint(db_expr_t, db_expr_t); +#else /* __ARM_ARCH >= 6 */ +static __inline void +dbg_show_watchpoint(void) +{ +} +static __inline int +dbg_setup_watchpoint(db_expr_t addr __unused, db_expr_t size __unused, + enum dbg_access_t access __unused) +{ + return (ENXIO); +} +static __inline int +dbg_remove_watchpoint(db_expr_t addr __unused, db_expr_t size __unused) +{ + return (ENXIO); +} +static __inline void +dbg_monitor_init(void) +{ +} +#endif /* __ARM_ARCH < 6 */ + +#else /* DDB */ +static __inline void +dbg_monitor_init(void) +{ +} +#endif + +#endif /* _MACHINE_DEBUG_MONITOR_H_ */ Property changes on: head/sys/arm/include/debug_monitor.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/arm/include/kdb.h =================================================================== --- head/sys/arm/include/kdb.h (revision 294739) +++ head/sys/arm/include/kdb.h (revision 294740) @@ -1,60 +1,67 @@ /*- * Copyright (c) 2004 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_KDB_H_ #define _MACHINE_KDB_H_ #include #include #include +#include #define KDB_STOPPEDPCB(pc) &stoppcbs[pc->pc_cpuid] +#if __ARM_ARCH >= 6 +extern void kdb_cpu_clear_singlestep(void); +extern void kdb_cpu_set_singlestep(void); +boolean_t kdb_cpu_pc_is_singlestep(db_addr_t); +#else static __inline void kdb_cpu_clear_singlestep(void) { } static __inline void kdb_cpu_set_singlestep(void) { } +#endif static __inline void kdb_cpu_sync_icache(unsigned char *addr, size_t size) { cpu_icache_sync_range((vm_offset_t)addr, size); } static __inline void kdb_cpu_trap(int type, int code) { } #endif /* _MACHINE_KDB_H_ */ Index: head/sys/arm/include/sysreg.h =================================================================== --- head/sys/arm/include/sysreg.h (revision 294739) +++ head/sys/arm/include/sysreg.h (revision 294740) @@ -1,289 +1,307 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Macros to make working with the System Control Registers simpler. * * Note that when register r0 is hard-coded in these definitions it means the * cp15 operation neither reads nor writes the register, and r0 is used only * because some syntatically-valid register name has to appear at that point to * keep the asm parser happy. */ #ifndef MACHINE_SYSREG_H #define MACHINE_SYSREG_H #include /* + * CP14 registers + */ +#if __ARM_ARCH >= 6 + +#define CP14_DBGDIDR(rr) p14, 0, rr, c0, c0, 0 /* Debug ID Register */ +#define CP14_DBGDSCRext_V6(rr) p14, 0, rr, c0, c1, 0 /* Debug Status and Ctrl Register v6 */ +#define CP14_DBGDSCRext_V7(rr) p14, 0, rr, c0, c2, 2 /* Debug Status and Ctrl Register v7 */ +#define CP14_DBGVCR(rr) p14, 0, rr, c0, c7, 0 /* Vector Catch Register */ +#define CP14_DBGOSLAR(rr) p14, 0, rr, c1, c0, 4 /* OS Lock Access Register */ +#define CP14_DBGOSLSR(rr) p14, 0, rr, c1, c1, 4 /* OS Lock Status Register */ +#define CP14_DBGOSDLR(rr) p14, 0, rr, c1, c3, 4 /* OS Double Lock Register */ +#define CP14_DBGPRSR(rr) p14, 0, rr, c1, c5, 4 /* Device Powerdown and Reset Status */ + +#define CP14_DBGDSCRint(rr) CP14_DBGDSCRext_V6(rr) /* Debug Status and Ctrl internal view */ + +#endif + +/* * CP15 C0 registers */ #define CP15_MIDR(rr) p15, 0, rr, c0, c0, 0 /* Main ID Register */ #define CP15_CTR(rr) p15, 0, rr, c0, c0, 1 /* Cache Type Register */ #define CP15_TCMTR(rr) p15, 0, rr, c0, c0, 2 /* TCM Type Register */ #define CP15_TLBTR(rr) p15, 0, rr, c0, c0, 3 /* TLB Type Register */ #define CP15_MPIDR(rr) p15, 0, rr, c0, c0, 5 /* Multiprocessor Affinity Register */ #define CP15_REVIDR(rr) p15, 0, rr, c0, c0, 6 /* Revision ID Register */ #define CP15_ID_PFR0(rr) p15, 0, rr, c0, c1, 0 /* Processor Feature Register 0 */ #define CP15_ID_PFR1(rr) p15, 0, rr, c0, c1, 1 /* Processor Feature Register 1 */ #define CP15_ID_DFR0(rr) p15, 0, rr, c0, c1, 2 /* Debug Feature Register 0 */ #define CP15_ID_AFR0(rr) p15, 0, rr, c0, c1, 3 /* Auxiliary Feature Register 0 */ #define CP15_ID_MMFR0(rr) p15, 0, rr, c0, c1, 4 /* Memory Model Feature Register 0 */ #define CP15_ID_MMFR1(rr) p15, 0, rr, c0, c1, 5 /* Memory Model Feature Register 1 */ #define CP15_ID_MMFR2(rr) p15, 0, rr, c0, c1, 6 /* Memory Model Feature Register 2 */ #define CP15_ID_MMFR3(rr) p15, 0, rr, c0, c1, 7 /* Memory Model Feature Register 3 */ #define CP15_ID_ISAR0(rr) p15, 0, rr, c0, c2, 0 /* Instruction Set Attribute Register 0 */ #define CP15_ID_ISAR1(rr) p15, 0, rr, c0, c2, 1 /* Instruction Set Attribute Register 1 */ #define CP15_ID_ISAR2(rr) p15, 0, rr, c0, c2, 2 /* Instruction Set Attribute Register 2 */ #define CP15_ID_ISAR3(rr) p15, 0, rr, c0, c2, 3 /* Instruction Set Attribute Register 3 */ #define CP15_ID_ISAR4(rr) p15, 0, rr, c0, c2, 4 /* Instruction Set Attribute Register 4 */ #define CP15_ID_ISAR5(rr) p15, 0, rr, c0, c2, 5 /* Instruction Set Attribute Register 5 */ #define CP15_CCSIDR(rr) p15, 1, rr, c0, c0, 0 /* Cache Size ID Registers */ #define CP15_CLIDR(rr) p15, 1, rr, c0, c0, 1 /* Cache Level ID Register */ #define CP15_AIDR(rr) p15, 1, rr, c0, c0, 7 /* Auxiliary ID Register */ #define CP15_CSSELR(rr) p15, 2, rr, c0, c0, 0 /* Cache Size Selection Register */ /* * CP15 C1 registers */ #define CP15_SCTLR(rr) p15, 0, rr, c1, c0, 0 /* System Control Register */ #define CP15_ACTLR(rr) p15, 0, rr, c1, c0, 1 /* IMPLEMENTATION DEFINED Auxiliary Control Register */ #define CP15_CPACR(rr) p15, 0, rr, c1, c0, 2 /* Coprocessor Access Control Register */ #define CP15_SCR(rr) p15, 0, rr, c1, c1, 0 /* Secure Configuration Register */ #define CP15_SDER(rr) p15, 0, rr, c1, c1, 1 /* Secure Debug Enable Register */ #define CP15_NSACR(rr) p15, 0, rr, c1, c1, 2 /* Non-Secure Access Control Register */ /* * CP15 C2 registers */ #define CP15_TTBR0(rr) p15, 0, rr, c2, c0, 0 /* Translation Table Base Register 0 */ #define CP15_TTBR1(rr) p15, 0, rr, c2, c0, 1 /* Translation Table Base Register 1 */ #define CP15_TTBCR(rr) p15, 0, rr, c2, c0, 2 /* Translation Table Base Control Register */ /* * CP15 C3 registers */ #define CP15_DACR(rr) p15, 0, rr, c3, c0, 0 /* Domain Access Control Register */ /* * CP15 C5 registers */ #define CP15_DFSR(rr) p15, 0, rr, c5, c0, 0 /* Data Fault Status Register */ #if __ARM_ARCH >= 6 /* From ARMv6: */ #define CP15_IFSR(rr) p15, 0, rr, c5, c0, 1 /* Instruction Fault Status Register */ #endif #if __ARM_ARCH >= 7 /* From ARMv7: */ #define CP15_ADFSR(rr) p15, 0, rr, c5, c1, 0 /* Auxiliary Data Fault Status Register */ #define CP15_AIFSR(rr) p15, 0, rr, c5, c1, 1 /* Auxiliary Instruction Fault Status Register */ #endif /* * CP15 C6 registers */ #define CP15_DFAR(rr) p15, 0, rr, c6, c0, 0 /* Data Fault Address Register */ #if __ARM_ARCH >= 6 /* From ARMv6k: */ #define CP15_IFAR(rr) p15, 0, rr, c6, c0, 2 /* Instruction Fault Address Register */ #endif /* * CP15 C7 registers */ #if __ARM_ARCH >= 7 && defined(SMP) /* From ARMv7: */ #define CP15_ICIALLUIS p15, 0, r0, c7, c1, 0 /* Instruction cache invalidate all PoU, IS */ #define CP15_BPIALLIS p15, 0, r0, c7, c1, 6 /* Branch predictor invalidate all IS */ #endif #define CP15_PAR(rr) p15, 0, rr, c7, c4, 0 /* Physical Address Register */ #define CP15_ICIALLU p15, 0, r0, c7, c5, 0 /* Instruction cache invalidate all PoU */ #define CP15_ICIMVAU(rr) p15, 0, rr, c7, c5, 1 /* Instruction cache invalidate */ #if __ARM_ARCH == 6 /* Deprecated in ARMv7 */ #define CP15_CP15ISB p15, 0, r0, c7, c5, 4 /* ISB */ #endif #define CP15_BPIALL p15, 0, r0, c7, c5, 6 /* Branch predictor invalidate all */ #define CP15_BPIMVA p15, 0, rr, c7, c5, 7 /* Branch predictor invalidate by MVA */ #if __ARM_ARCH == 6 /* Only ARMv6: */ #define CP15_DCIALL p15, 0, r0, c7, c6, 0 /* Data cache invalidate all */ #endif #define CP15_DCIMVAC(rr) p15, 0, rr, c7, c6, 1 /* Data cache invalidate by MVA PoC */ #define CP15_DCISW(rr) p15, 0, rr, c7, c6, 2 /* Data cache invalidate by set/way */ #define CP15_ATS1CPR(rr) p15, 0, rr, c7, c8, 0 /* Stage 1 Current state PL1 read */ #define CP15_ATS1CPW(rr) p15, 0, rr, c7, c8, 1 /* Stage 1 Current state PL1 write */ #define CP15_ATS1CUR(rr) p15, 0, rr, c7, c8, 2 /* Stage 1 Current state unprivileged read */ #define CP15_ATS1CUW(rr) p15, 0, rr, c7, c8, 3 /* Stage 1 Current state unprivileged write */ #if __ARM_ARCH >= 7 /* From ARMv7: */ #define CP15_ATS12NSOPR(rr) p15, 0, rr, c7, c8, 4 /* Stages 1 and 2 Non-secure only PL1 read */ #define CP15_ATS12NSOPW(rr) p15, 0, rr, c7, c8, 5 /* Stages 1 and 2 Non-secure only PL1 write */ #define CP15_ATS12NSOUR(rr) p15, 0, rr, c7, c8, 6 /* Stages 1 and 2 Non-secure only unprivileged read */ #define CP15_ATS12NSOUW(rr) p15, 0, rr, c7, c8, 7 /* Stages 1 and 2 Non-secure only unprivileged write */ #endif #if __ARM_ARCH == 6 /* Only ARMv6: */ #define CP15_DCCALL p15, 0, r0, c7, c10, 0 /* Data cache clean all */ #endif #define CP15_DCCMVAC(rr) p15, 0, rr, c7, c10, 1 /* Data cache clean by MVA PoC */ #define CP15_DCCSW(rr) p15, 0, rr, c7, c10, 2 /* Data cache clean by set/way */ #if __ARM_ARCH == 6 /* Only ARMv6: */ #define CP15_CP15DSB p15, 0, r0, c7, c10, 4 /* DSB */ #define CP15_CP15DMB p15, 0, r0, c7, c10, 5 /* DMB */ #define CP15_CP15WFI p15, 0, r0, c7, c0, 4 /* WFI */ #endif #if __ARM_ARCH >= 7 /* From ARMv7: */ #define CP15_DCCMVAU(rr) p15, 0, rr, c7, c11, 1 /* Data cache clean by MVA PoU */ #endif #if __ARM_ARCH == 6 /* Only ARMv6: */ #define CP15_DCCIALL p15, 0, r0, c7, c14, 0 /* Data cache clean and invalidate all */ #endif #define CP15_DCCIMVAC(rr) p15, 0, rr, c7, c14, 1 /* Data cache clean and invalidate by MVA PoC */ #define CP15_DCCISW(rr) p15, 0, rr, c7, c14, 2 /* Data cache clean and invalidate by set/way */ /* * CP15 C8 registers */ #if __ARM_ARCH >= 7 && defined(SMP) /* From ARMv7: */ #define CP15_TLBIALLIS p15, 0, r0, c8, c3, 0 /* Invalidate entire unified TLB IS */ #define CP15_TLBIMVAIS(rr) p15, 0, rr, c8, c3, 1 /* Invalidate unified TLB by MVA IS */ #define CP15_TLBIASIDIS(rr) p15, 0, rr, c8, c3, 2 /* Invalidate unified TLB by ASID IS */ #define CP15_TLBIMVAAIS(rr) p15, 0, rr, c8, c3, 3 /* Invalidate unified TLB by MVA, all ASID IS */ #endif #define CP15_TLBIALL p15, 0, r0, c8, c7, 0 /* Invalidate entire unified TLB */ #define CP15_TLBIMVA(rr) p15, 0, rr, c8, c7, 1 /* Invalidate unified TLB by MVA */ #define CP15_TLBIASID(rr) p15, 0, rr, c8, c7, 2 /* Invalidate unified TLB by ASID */ #if __ARM_ARCH >= 6 /* From ARMv6: */ #define CP15_TLBIMVAA(rr) p15, 0, rr, c8, c7, 3 /* Invalidate unified TLB by MVA, all ASID */ #endif /* * CP15 C9 registers */ #if __ARM_ARCH == 6 && defined(CPU_ARM1176) #define CP15_PMUSERENR(rr) p15, 0, rr, c15, c9, 0 /* Access Validation Control Register */ #define CP15_PMCR(rr) p15, 0, rr, c15, c12, 0 /* Performance Monitor Control Register */ #define CP15_PMCCNTR(rr) p15, 0, rr, c15, c12, 1 /* PM Cycle Count Register */ #elif __ARM_ARCH > 6 #define CP15_L2CTLR(rr) p15, 1, rr, c9, c0, 2 /* L2 Control Register */ #define CP15_PMCR(rr) p15, 0, rr, c9, c12, 0 /* Performance Monitor Control Register */ #define CP15_PMCNTENSET(rr) p15, 0, rr, c9, c12, 1 /* PM Count Enable Set Register */ #define CP15_PMCNTENCLR(rr) p15, 0, rr, c9, c12, 2 /* PM Count Enable Clear Register */ #define CP15_PMOVSR(rr) p15, 0, rr, c9, c12, 3 /* PM Overflow Flag Status Register */ #define CP15_PMSWINC(rr) p15, 0, rr, c9, c12, 4 /* PM Software Increment Register */ #define CP15_PMSELR(rr) p15, 0, rr, c9, c12, 5 /* PM Event Counter Selection Register */ #define CP15_PMCCNTR(rr) p15, 0, rr, c9, c13, 0 /* PM Cycle Count Register */ #define CP15_PMXEVTYPER(rr) p15, 0, rr, c9, c13, 1 /* PM Event Type Select Register */ #define CP15_PMXEVCNTRR(rr) p15, 0, rr, c9, c13, 2 /* PM Event Count Register */ #define CP15_PMUSERENR(rr) p15, 0, rr, c9, c14, 0 /* PM User Enable Register */ #define CP15_PMINTENSET(rr) p15, 0, rr, c9, c14, 1 /* PM Interrupt Enable Set Register */ #define CP15_PMINTENCLR(rr) p15, 0, rr, c9, c14, 2 /* PM Interrupt Enable Clear Register */ #endif /* * CP15 C10 registers */ /* Without LPAE this is PRRR, with LPAE it's MAIR0 */ #define CP15_PRRR(rr) p15, 0, rr, c10, c2, 0 /* Primary Region Remap Register */ #define CP15_MAIR0(rr) p15, 0, rr, c10, c2, 0 /* Memory Attribute Indirection Register 0 */ /* Without LPAE this is NMRR, with LPAE it's MAIR1 */ #define CP15_NMRR(rr) p15, 0, rr, c10, c2, 1 /* Normal Memory Remap Register */ #define CP15_MAIR1(rr) p15, 0, rr, c10, c2, 1 /* Memory Attribute Indirection Register 1 */ #define CP15_AMAIR0(rr) p15, 0, rr, c10, c3, 0 /* Auxiliary Memory Attribute Indirection Register 0 */ #define CP15_AMAIR1(rr) p15, 0, rr, c10, c3, 1 /* Auxiliary Memory Attribute Indirection Register 1 */ /* * CP15 C12 registers */ #define CP15_VBAR(rr) p15, 0, rr, c12, c0, 0 /* Vector Base Address Register */ #define CP15_MVBAR(rr) p15, 0, rr, c12, c0, 1 /* Monitor Vector Base Address Register */ #define CP15_ISR(rr) p15, 0, rr, c12, c1, 0 /* Interrupt Status Register */ /* * CP15 C13 registers */ #define CP15_FCSEIDR(rr) p15, 0, rr, c13, c0, 0 /* FCSE Process ID Register */ #define CP15_CONTEXTIDR(rr) p15, 0, rr, c13, c0, 1 /* Context ID Register */ #define CP15_TPIDRURW(rr) p15, 0, rr, c13, c0, 2 /* User Read/Write Thread ID Register */ #define CP15_TPIDRURO(rr) p15, 0, rr, c13, c0, 3 /* User Read-Only Thread ID Register */ #define CP15_TPIDRPRW(rr) p15, 0, rr, c13, c0, 4 /* PL1 only Thread ID Register */ /* * CP15 C14 registers * These are the Generic Timer registers and may be unallocated on some SoCs. * Only use these when you know the Generic Timer is available. */ #define CP15_CNTFRQ(rr) p15, 0, rr, c14, c0, 0 /* Counter Frequency Register */ #define CP15_CNTKCTL(rr) p15, 0, rr, c14, c1, 0 /* Timer PL1 Control Register */ #define CP15_CNTP_TVAL(rr) p15, 0, rr, c14, c2, 0 /* PL1 Physical Timer Value Register */ #define CP15_CNTP_CTL(rr) p15, 0, rr, c14, c2, 1 /* PL1 Physical Timer Control Register */ #define CP15_CNTV_TVAL(rr) p15, 0, rr, c14, c3, 0 /* Virtual Timer Value Register */ #define CP15_CNTV_CTL(rr) p15, 0, rr, c14, c3, 1 /* Virtual Timer Control Register */ #define CP15_CNTHCTL(rr) p15, 4, rr, c14, c1, 0 /* Timer PL2 Control Register */ #define CP15_CNTHP_TVAL(rr) p15, 4, rr, c14, c2, 0 /* PL2 Physical Timer Value Register */ #define CP15_CNTHP_CTL(rr) p15, 4, rr, c14, c2, 1 /* PL2 Physical Timer Control Register */ /* 64-bit registers for use with mcrr/mrrc */ #define CP15_CNTPCT(rq, rr) p15, 0, rq, rr, c14 /* Physical Count Register */ #define CP15_CNTVCT(rq, rr) p15, 1, rq, rr, c14 /* Virtual Count Register */ #define CP15_CNTP_CVAL(rq, rr) p15, 2, rq, rr, c14 /* PL1 Physical Timer Compare Value Register */ #define CP15_CNTV_CVAL(rq, rr) p15, 3, rq, rr, c14 /* Virtual Timer Compare Value Register */ #define CP15_CNTVOFF(rq, rr) p15, 4, rq, rr, c14 /* Virtual Offset Register */ #define CP15_CNTHP_CVAL(rq, rr) p15, 6, rq, rr, c14 /* PL2 Physical Timer Compare Value Register */ /* * CP15 C15 registers */ #define CP15_CBAR(rr) p15, 4, rr, c15, c0, 0 /* Configuration Base Address Register */ #endif /* !MACHINE_SYSREG_H */ Index: head/sys/conf/files.arm =================================================================== --- head/sys/conf/files.arm (revision 294739) +++ head/sys/conf/files.arm (revision 294740) @@ -1,138 +1,139 @@ # $FreeBSD$ arm/arm/autoconf.c standard arm/arm/bcopy_page.S standard arm/arm/bcopyinout.S standard arm/arm/blockio.S standard arm/arm/bus_space_asm_generic.S standard arm/arm/bus_space_base.c optional fdt arm/arm/bus_space_generic.c standard arm/arm/busdma_machdep.c optional !armv6 arm/arm/busdma_machdep-v6.c optional armv6 arm/arm/copystr.S standard arm/arm/cpufunc.c standard arm/arm/cpufunc_asm.S standard arm/arm/cpufunc_asm_arm9.S optional cpu_arm9 arm/arm/cpufunc_asm_arm10.S optional cpu_arm9e arm/arm/cpufunc_asm_arm11.S optional cpu_arm1176 arm/arm/cpufunc_asm_arm11x6.S optional cpu_arm1176 arm/arm/cpufunc_asm_armv4.S optional cpu_arm9 | cpu_arm9e | cpu_fa526 | cpu_xscale_80321 | cpu_xscale_pxa2x0 | cpu_xscale_ixp425 | cpu_xscale_80219 | cpu_xscale_81342 arm/arm/cpufunc_asm_armv5_ec.S optional cpu_arm9e arm/arm/cpufunc_asm_armv6.S optional cpu_arm1176 arm/arm/cpufunc_asm_armv7.S optional cpu_cortexa | cpu_krait | cpu_mv_pj4b arm/arm/cpufunc_asm_fa526.S optional cpu_fa526 arm/arm/cpufunc_asm_pj4b.S optional cpu_mv_pj4b arm/arm/cpufunc_asm_sheeva.S optional cpu_arm9e arm/arm/cpufunc_asm_xscale.S optional cpu_xscale_80321 | cpu_xscale_pxa2x0 | cpu_xscale_ixp425 | cpu_xscale_80219 | cpu_xscale_81342 arm/arm/cpufunc_asm_xscale_c3.S optional cpu_xscale_81342 arm/arm/cpuinfo.c standard arm/arm/cpu_asm-v6.S optional armv6 arm/arm/db_disasm.c optional ddb arm/arm/db_interface.c optional ddb arm/arm/db_trace.c optional ddb +arm/arm/debug_monitor.c optional ddb armv6 arm/arm/devmap.c standard arm/arm/disassem.c optional ddb arm/arm/dump_machdep.c standard arm/arm/elf_machdep.c standard arm/arm/elf_note.S standard arm/arm/exception.S standard arm/arm/fiq.c standard arm/arm/fiq_subr.S standard arm/arm/fusu.S standard arm/arm/gdb_machdep.c optional gdb arm/arm/generic_timer.c optional generic_timer arm/arm/gic.c optional gic arm/arm/hdmi_if.m optional hdmi arm/arm/identcpu.c standard arm/arm/in_cksum.c optional inet | inet6 arm/arm/in_cksum_arm.S optional inet | inet6 arm/arm/intr.c optional !arm_intrng kern/subr_intr.c optional arm_intrng arm/arm/locore.S standard no-obj arm/arm/machdep.c standard arm/arm/machdep_intr.c standard arm/arm/mem.c optional mem arm/arm/minidump_machdep.c optional mem arm/arm/mp_machdep.c optional smp arm/arm/mpcore_timer.c optional mpcore_timer arm/arm/nexus.c standard arm/arm/ofw_machdep.c optional fdt arm/arm/physmem.c standard kern/pic_if.m optional arm_intrng arm/arm/pl190.c optional pl190 arm/arm/pl310.c optional pl310 arm/arm/platform.c optional platform arm/arm/platform_if.m optional platform arm/arm/pmap.c optional !armv6 arm/arm/pmap-v6.c optional armv6 !arm_new_pmap arm/arm/pmap-v6-new.c optional armv6 arm_new_pmap arm/arm/pmu.c optional pmu | fdt hwpmc arm/arm/sc_machdep.c optional sc arm/arm/setcpsr.S standard arm/arm/setstack.s standard arm/arm/stack_machdep.c optional ddb | stack arm/arm/stdatomic.c standard \ compile-with "${NORMAL_C:N-Wmissing-prototypes}" arm/arm/support.S standard arm/arm/swtch.S standard arm/arm/sys_machdep.c standard arm/arm/syscall.c standard arm/arm/trap.c optional !armv6 arm/arm/trap-v6.c optional armv6 arm/arm/uio_machdep.c standard arm/arm/undefined.c standard arm/arm/unwind.c optional ddb | kdtrace_hooks arm/arm/vm_machdep.c standard arm/arm/vfp.c standard board_id.h standard \ dependency "$S/arm/conf/genboardid.awk $S/arm/conf/mach-types" \ compile-with "${AWK} -f $S/arm/conf/genboardid.awk $S/arm/conf/mach-types > board_id.h" \ no-obj no-implicit-rule before-depend \ clean "board_id.h" cddl/compat/opensolaris/kern/opensolaris_atomic.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/dev/dtrace/arm/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/arm/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/arm/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" crypto/blowfish/bf_enc.c optional crypto | ipsec crypto/des/des_enc.c optional crypto | ipsec | netsmb dev/dwc/if_dwc.c optional dwc dev/dwc/if_dwc_if.m optional dwc dev/fb/fb.c optional sc dev/fdt/fdt_arm_platform.c optional platform fdt dev/hwpmc/hwpmc_arm.c optional hwpmc dev/hwpmc/hwpmc_armv7.c optional hwpmc armv6 dev/psci/psci.c optional psci dev/psci/psci_arm.S optional psci dev/syscons/scgfbrndr.c optional sc dev/syscons/scterm-teken.c optional sc dev/syscons/scvtb.c optional sc dev/uart/uart_cpu_fdt.c optional uart fdt font.h optional sc \ compile-with "uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h" \ no-obj no-implicit-rule before-depend \ clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8" kern/subr_busdma_bufalloc.c standard kern/subr_sfbuf.c standard libkern/arm/aeabi_unwind.c standard libkern/arm/divsi3.S standard libkern/arm/ffs.S standard libkern/arm/ldivmod.S standard libkern/arm/ldivmod_helper.c standard libkern/arm/memclr.S standard libkern/arm/memcpy.S standard libkern/arm/memset.S standard libkern/arm/muldi3.c standard libkern/ashldi3.c standard libkern/ashrdi3.c standard libkern/divdi3.c standard libkern/ffsl.c standard libkern/ffsll.c standard libkern/fls.c standard libkern/flsl.c standard libkern/flsll.c standard libkern/lshrdi3.c standard libkern/moddi3.c standard libkern/qdivrem.c standard libkern/ucmpdi2.c standard libkern/udivdi3.c standard libkern/umoddi3.c standard