diff --git a/usr.sbin/bhyve/aarch64/Makefile.inc b/usr.sbin/bhyve/aarch64/Makefile.inc --- a/usr.sbin/bhyve/aarch64/Makefile.inc +++ b/usr.sbin/bhyve/aarch64/Makefile.inc @@ -7,3 +7,4 @@ SRCS+= vmm_instruction_emul.c BHYVE_FDT_SUPPORT= +BHYVE_GDB_SUPPORT= diff --git a/usr.sbin/bhyve/aarch64/bhyverun_machdep.c b/usr.sbin/bhyve/aarch64/bhyverun_machdep.c --- a/usr.sbin/bhyve/aarch64/bhyverun_machdep.c +++ b/usr.sbin/bhyve/aarch64/bhyverun_machdep.c @@ -99,6 +99,7 @@ " -C: include guest memory in core file\n" " -c: number of CPUs and/or topology specification\n" " -D: destroy on power-off\n" + " -G: start a debug server\n" " -h: help\n" " -k: key=value flat config file\n" " -m: memory size\n" @@ -119,7 +120,7 @@ const char *optstr; int c; - optstr = "hCDSWk:f:o:p:c:s:m:U:"; + optstr = "hCDSWk:f:o:p:G:c:s:m:U:"; while ((c = getopt(argc, argv, optstr)) != -1) { switch (c) { case 'c': @@ -134,6 +135,9 @@ case 'D': set_config_bool("destroy_on_poweroff", true); break; + case 'G': + bhyve_parse_gdb_options(optarg); + break; case 'k': bhyve_parse_simple_config_file(optarg); break; diff --git a/usr.sbin/bhyve/aarch64/vmexit.c b/usr.sbin/bhyve/aarch64/vmexit.c --- a/usr.sbin/bhyve/aarch64/vmexit.c +++ b/usr.sbin/bhyve/aarch64/vmexit.c @@ -49,6 +49,7 @@ #include "bhyverun.h" #include "config.h" #include "debug.h" +#include "gdb.h" #include "mem.h" #include "vmexit.h" @@ -112,9 +113,10 @@ } static int -vmexit_debug(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, +vmexit_debug(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_run *vmrun __unused) { + gdb_cpu_suspend(vcpu); return (VMEXIT_CONTINUE); } @@ -250,6 +252,20 @@ return (VMEXIT_ABORT); } +static int +vmexit_brk(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_run *vmrun) +{ + gdb_cpu_breakpoint(vcpu, vmrun->vm_exit); + return (VMEXIT_CONTINUE); +} + +static int +vmexit_ss(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_run *vmrun) +{ + gdb_cpu_debug(vcpu, vmrun->vm_exit); + return (VMEXIT_CONTINUE); +} + const vmexit_handler_t vmexit_handlers[VM_EXITCODE_MAX] = { [VM_EXITCODE_BOGUS] = vmexit_bogus, [VM_EXITCODE_INST_EMUL] = vmexit_inst_emul, @@ -257,4 +273,6 @@ [VM_EXITCODE_DEBUG] = vmexit_debug, [VM_EXITCODE_SMCCC] = vmexit_smccc, [VM_EXITCODE_HYP] = vmexit_hyp, + [VM_EXITCODE_BRK] = vmexit_brk, + [VM_EXITCODE_SS] = vmexit_ss, }; diff --git a/usr.sbin/bhyve/gdb.c b/usr.sbin/bhyve/gdb.c --- a/usr.sbin/bhyve/gdb.c +++ b/usr.sbin/bhyve/gdb.c @@ -36,10 +36,17 @@ #include #include +#ifdef __aarch64__ +#include +#endif #include +#ifdef __amd64__ #include +#endif #include + #include + #include #ifndef WITHOUT_CAPSICUM #include @@ -73,9 +80,19 @@ */ #define GDB_SIGNAL_TRAP 5 +#if defined(__amd64__) #define GDB_BP_SIZE 1 #define GDB_BP_INSTR (uint8_t []){0xcc} #define GDB_PC_REGNAME VM_REG_GUEST_RIP +#define GDB_BREAKPOINT_CAP VM_CAP_BPT_EXIT +#elif defined(__aarch64__) +#define GDB_BP_SIZE 4 +#define GDB_BP_INSTR (uint8_t []){0x00, 0x00, 0x20, 0xd4} +#define GDB_PC_REGNAME VM_REG_GUEST_PC +#define GDB_BREAKPOINT_CAP VM_CAP_BRK_EXIT +#else +#error "Unsupported architecture" +#endif _Static_assert(sizeof(GDB_BP_INSTR) == GDB_BP_SIZE, "GDB_BP_INSTR has wrong size"); @@ -146,10 +163,13 @@ static int cur_vcpu, stopped_vcpu; static bool gdb_active = false; -static const struct gdb_reg { +struct gdb_reg { enum vm_reg_name id; int size; -} gdb_regset[] = { +} + +#ifdef __amd64__ +static const gdb_regset[] = { { .id = VM_REG_GUEST_RAX, .size = 8 }, { .id = VM_REG_GUEST_RBX, .size = 8 }, { .id = VM_REG_GUEST_RCX, .size = 8 }, @@ -191,6 +211,44 @@ { .id = VM_REG_GUEST_TPR, .size = 8 }, { .id = VM_REG_GUEST_EFER, .size = 8 }, }; +#else /* __aarch64__ */ +static const gdb_regset[] = { + { .id = VM_REG_GUEST_X0, .size = 8 }, + { .id = VM_REG_GUEST_X1, .size = 8 }, + { .id = VM_REG_GUEST_X2, .size = 8 }, + { .id = VM_REG_GUEST_X3, .size = 8 }, + { .id = VM_REG_GUEST_X4, .size = 8 }, + { .id = VM_REG_GUEST_X5, .size = 8 }, + { .id = VM_REG_GUEST_X6, .size = 8 }, + { .id = VM_REG_GUEST_X7, .size = 8 }, + { .id = VM_REG_GUEST_X8, .size = 8 }, + { .id = VM_REG_GUEST_X9, .size = 8 }, + { .id = VM_REG_GUEST_X10, .size = 8 }, + { .id = VM_REG_GUEST_X11, .size = 8 }, + { .id = VM_REG_GUEST_X12, .size = 8 }, + { .id = VM_REG_GUEST_X13, .size = 8 }, + { .id = VM_REG_GUEST_X14, .size = 8 }, + { .id = VM_REG_GUEST_X15, .size = 8 }, + { .id = VM_REG_GUEST_X16, .size = 8 }, + { .id = VM_REG_GUEST_X17, .size = 8 }, + { .id = VM_REG_GUEST_X18, .size = 8 }, + { .id = VM_REG_GUEST_X19, .size = 8 }, + { .id = VM_REG_GUEST_X20, .size = 8 }, + { .id = VM_REG_GUEST_X21, .size = 8 }, + { .id = VM_REG_GUEST_X22, .size = 8 }, + { .id = VM_REG_GUEST_X23, .size = 8 }, + { .id = VM_REG_GUEST_X24, .size = 8 }, + { .id = VM_REG_GUEST_X25, .size = 8 }, + { .id = VM_REG_GUEST_X26, .size = 8 }, + { .id = VM_REG_GUEST_X27, .size = 8 }, + { .id = VM_REG_GUEST_X28, .size = 8 }, + { .id = VM_REG_GUEST_X29, .size = 8 }, + { .id = VM_REG_GUEST_LR, .size = 8 }, + { .id = VM_REG_GUEST_SP, .size = 8 }, + { .id = VM_REG_GUEST_PC, .size = 8 }, + { .id = VM_REG_GUEST_CPSR, .size = 8 }, +}; +#endif #ifdef GDB_LOG #include @@ -228,6 +286,7 @@ static int guest_paging_info(struct vcpu *vcpu, struct vm_guest_paging *paging) { +#ifdef __amd64__ uint64_t regs[4]; const int regset[4] = { VM_REG_GUEST_CR0, @@ -262,6 +321,31 @@ else paging->paging_mode = PAGING_MODE_PAE; return (0); +#else /* __aarch64__ */ + uint64_t regs[6]; + const int regset[6] = { + VM_REG_GUEST_TTBR0_EL1, + VM_REG_GUEST_TTBR1_EL1, + VM_REG_GUEST_TCR_EL1, + VM_REG_GUEST_TCR2_EL1, + VM_REG_GUEST_SCTLR_EL1, + VM_REG_GUEST_CPSR, + }; + + if (vm_get_register_set(vcpu, nitems(regset), regset, regs) == -1) + return (-1); + + memset(paging, 0, sizeof(*paging)); + paging->ttbr0_addr = regs[0] & ~(TTBR_ASID_MASK | TTBR_CnP); + paging->ttbr1_addr = regs[1] & ~(TTBR_ASID_MASK | TTBR_CnP); + paging->tcr_el1 = regs[2]; + paging->tcr2_el1 = regs[3]; + paging->flags = regs[5] & (PSR_M_MASK | PSR_M_32); + if ((regs[4] & SCTLR_M) != 0) + paging->flags |= VM_GP_MMU_ENABLED; + + return (0); +#endif /* __aarch64__ */ } /* @@ -294,7 +378,11 @@ static uint64_t guest_pc(struct vm_exit *vme) { +#ifdef __amd64__ return (vme->rip); +#else /* __aarch64__ */ + return (vme->pc); +#endif } static void @@ -762,6 +850,7 @@ { int error; +#ifdef __amd64__ /* * If the MTRAP cap fails, we are running on an AMD host. * In that case, we request DB exits caused by RFLAGS.TF. @@ -771,23 +860,31 @@ error = vm_set_capability(vcpu, VM_CAP_RFLAGS_TF, val); if (error == 0) (void)vm_set_capability(vcpu, VM_CAP_MASK_HWINTR, val); - +#else /* __aarch64__ */ + error = vm_set_capability(vcpu, VM_CAP_SS_EXIT, val); + if (error == 0) + error = vm_set_capability(vcpu, VM_CAP_MASK_HWINTR, val); +#endif return (error); } /* - * Checks whether single-stepping is enabled for a given vCPU. + * Checks whether single-stepping is supported for a given vCPU. */ static int _gdb_check_step(struct vcpu *vcpu) { +#ifdef __amd64__ int val; if (vm_get_capability(vcpu, VM_CAP_MTRAP_EXIT, &val) != 0) { if (vm_get_capability(vcpu, VM_CAP_RFLAGS_TF, &val) != 0) - return -1; + return (-1); } - return 0; +#else /* __aarch64__ */ + (void)vcpu; +#endif + return (0); } /* @@ -809,7 +906,7 @@ vcpus[vcpuid] = vcpu; CPU_SET(vcpuid, &vcpus_active); if (!TAILQ_EMPTY(&breakpoints)) { - vm_set_capability(vcpu, VM_CAP_BPT_EXIT, 1); + vm_set_capability(vcpu, GDB_BREAKPOINT_CAP, 1); debug("$vCPU %d enabled breakpoint exits\n", vcpuid); } @@ -912,7 +1009,7 @@ } /* - * A general handler for VM_EXITCODE_DB. + * A general handler for single-step exceptions. * Handles RFLAGS.TF exits on AMD SVM. */ void @@ -921,10 +1018,15 @@ if (!gdb_active) return; +#ifdef __amd64__ /* RFLAGS.TF exit? */ if (vmexit->u.dbg.trace_trap) { gdb_cpu_step(vcpu); } +#else /* __aarch64__ */ + (void)vmexit; + gdb_cpu_step(vcpu); +#endif } /* @@ -998,11 +1100,19 @@ } else { debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpuid, guest_pc(vmexit)); +#ifdef __amd64__ error = vm_set_register(vcpu, VM_REG_GUEST_ENTRY_INST_LENGTH, vmexit->u.bpt.inst_length); assert(error == 0); error = vm_inject_exception(vcpu, IDT_BP, 0, 0, 0); assert(error == 0); +#else /* __aarch64__ */ + uint64_t esr; + + esr = (EXCP_BRK << ESR_ELx_EC_SHIFT) | vmexit->u.hyp.esr_el2; + error = vm_inject_exception(vcpu, esr, 0); + assert(error == 0); +#endif } pthread_mutex_unlock(&gdb_lock); } @@ -1053,8 +1163,10 @@ start_packet(); for (size_t i = 0; i < nitems(gdb_regset); i++) { +#ifdef GDB_REG_FIRST_EXT if (gdb_regset[i].id == GDB_REG_FIRST_EXT) break; +#endif append_unsigned_native(regvals[i], gdb_regset[i].size); } finish_packet(); @@ -1318,7 +1430,7 @@ while (!CPU_EMPTY(&mask)) { vcpu = CPU_FFS(&mask) - 1; CPU_CLR(vcpu, &mask); - if (vm_set_capability(vcpus[vcpu], VM_CAP_BPT_EXIT, + if (vm_set_capability(vcpus[vcpu], GDB_BREAKPOINT_CAP, enable ? 1 : 0) < 0) return (false); debug("$vCPU %d %sabled breakpoint exits\n", vcpu, @@ -1327,6 +1439,20 @@ return (true); } +static void +write_instr(uint8_t *dest, uint8_t *instr, size_t len) +{ + memcpy(dest, instr, len); +#ifdef __arm64__ + __asm __volatile( + "dc cvau, %0\n" + "dsb ish\n" + "ic ialluis\n" + "dsb ish\n" + : : "r" (dest) : "memory"); +#endif +} + static void remove_all_sw_breakpoints(void) { @@ -1339,7 +1465,7 @@ TAILQ_FOREACH_SAFE(bp, &breakpoints, link, nbp) { debug("remove breakpoint at %#lx\n", bp->gpa); cp = paddr_guest2host(ctx, bp->gpa, sizeof(bp->shadow_inst)); - memcpy(cp, bp->shadow_inst, sizeof(bp->shadow_inst)); + write_instr(cp, bp->shadow_inst, sizeof(bp->shadow_inst)); TAILQ_REMOVE(&breakpoints, bp, link); free(bp); } @@ -1395,14 +1521,15 @@ bp = malloc(sizeof(*bp)); bp->gpa = gpa; memcpy(bp->shadow_inst, cp, sizeof(bp->shadow_inst)); - memcpy(cp, GDB_BP_INSTR, sizeof(bp->shadow_inst)); + write_instr(cp, GDB_BP_INSTR, sizeof(bp->shadow_inst)); TAILQ_INSERT_TAIL(&breakpoints, bp, link); debug("new breakpoint at %#lx\n", gpa); } } else { if (bp != NULL) { debug("remove breakpoint at %#lx\n", gpa); - memcpy(cp, bp->shadow_inst, sizeof(bp->shadow_inst)); + write_instr(cp, bp->shadow_inst, + sizeof(bp->shadow_inst)); TAILQ_REMOVE(&breakpoints, bp, link); free(bp); if (TAILQ_EMPTY(&breakpoints)) diff --git a/usr.sbin/bhyve/gdb/Makefile b/usr.sbin/bhyve/gdb/Makefile --- a/usr.sbin/bhyve/gdb/Makefile +++ b/usr.sbin/bhyve/gdb/Makefile @@ -6,6 +6,9 @@ .if ${MACHINE_ARCH} == "amd64" XMLARCH= i386:x86-64 FILES+= amd64.xml +.elif ${MACHINE_ARCH} == "aarch64" +XMLARCH= aarch64 +FILES+= aarch64-core.xml .endif .if !make(install*) diff --git a/usr.sbin/bhyve/gdb/aarch64-core.xml b/usr.sbin/bhyve/gdb/aarch64-core.xml new file mode 100644 --- /dev/null +++ b/usr.sbin/bhyve/gdb/aarch64-core.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +