diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h --- a/sys/amd64/include/vmm.h +++ b/sys/amd64/include/vmm.h @@ -497,6 +497,7 @@ VM_CAP_RDPID, VM_CAP_RDTSCP, VM_CAP_IPI_EXIT, + VM_CAP_SSTEP_MASK_HWINTR, VM_CAP_MAX }; diff --git a/sys/amd64/vmm/intel/vmx.h b/sys/amd64/vmm/intel/vmx.h --- a/sys/amd64/vmm/intel/vmx.h +++ b/sys/amd64/vmm/intel/vmx.h @@ -126,6 +126,10 @@ GUEST_MSR_NUM /* must be the last enumeration */ }; +struct vmxdbg { + int shadow_if; +}; + struct vmx_vcpu { struct vmx *vmx; struct vcpu *vcpu; @@ -136,6 +140,7 @@ struct vmxctx ctx; struct vmxcap cap; struct vmxstate state; + struct vmxdbg dbg; struct vm_mtrr mtrr; int vcpuid; }; diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c --- a/sys/amd64/vmm/intel/vmx.c +++ b/sys/amd64/vmm/intel/vmx.c @@ -3628,6 +3628,29 @@ vlapic = vm_lapic(vcpu->vcpu); vlapic->ipi_exit = val; break; + case VM_CAP_SSTEP_MASK_HWINTR: { + uint64_t rflags; + + retval = 0; + error = vmx_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags); + KASSERT(error == 0, + ("%s: vmx_getreg error %d", __func__, error)); + + if (val) { + /* Save current IF bit and disable interrupts. */ + vcpu->dbg.shadow_if = rflags & PSL_I; + rflags &= ~PSL_I; + } else { + /* Restore shadowed IF bit. */ + rflags &= ~PSL_I; + rflags |= vcpu->dbg.shadow_if; + } + + error = vmx_setreg(vcpu, VM_REG_GUEST_RFLAGS, rflags); + KASSERT(error == 0, + ("%s: vmx_setreg error %d", __func__, error)); + break; + } default: break; } diff --git a/usr.sbin/bhyve/gdb.c b/usr.sbin/bhyve/gdb.c --- a/usr.sbin/bhyve/gdb.c +++ b/usr.sbin/bhyve/gdb.c @@ -801,6 +801,9 @@ if (vs->stepping) { error = vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, 1); assert(error == 0); + + error = vm_set_capability(vcpu, VM_CAP_SSTEP_MASK_HWINTR, 1); + assert(error == 0); } } @@ -853,6 +856,8 @@ vs->stepping = false; vs->stepped = true; vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, 0); + vm_set_capability(vcpu, VM_CAP_SSTEP_MASK_HWINTR, 0); + while (vs->stepped) { if (stopped_vcpu == -1) { debug("$vCPU %d reporting step\n", vcpuid);