Changeset View
Changeset View
Standalone View
Standalone View
sys/amd64/vmm/intel/vmx.c
Show First 20 Lines • Show All 182 Lines • ▼ Show 20 Lines | SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, | ||||
&pirvec, 0, "APICv posted interrupt vector"); | &pirvec, 0, "APICv posted interrupt vector"); | ||||
static struct unrhdr *vpid_unr; | static struct unrhdr *vpid_unr; | ||||
static u_int vpid_alloc_failed; | static u_int vpid_alloc_failed; | ||||
SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, | SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, | ||||
&vpid_alloc_failed, 0, NULL); | &vpid_alloc_failed, 0, NULL); | ||||
/* | /* | ||||
* The definitions of SDT probes for VMX. | |||||
*/ | |||||
SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, | |||||
"struct vmx *", "int", "struct vm_exit *"); | |||||
SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, | |||||
"struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); | |||||
SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, | |||||
"struct vmx *", "int", "struct vm_exit *", "uint64_t"); | |||||
SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, | |||||
"struct vmx *", "int", "struct vm_exit *", "uint32_t"); | |||||
SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, | |||||
"struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); | |||||
SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, | |||||
"struct vmx *", "int", "struct vm_exit *"); | |||||
SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, | |||||
"struct vmx *", "int", "struct vm_exit *"); | |||||
SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, | |||||
"struct vmx *", "int", "struct vm_exit *"); | |||||
SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, | |||||
"struct vmx *", "int", "struct vm_exit *"); | |||||
SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, | |||||
"struct vmx *", "int", "struct vm_exit *", "uint32_t"); | |||||
SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, | |||||
"struct vmx *", "int", "struct vm_exit *"); | |||||
SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, | |||||
"struct vmx *", "int", "struct vm_exit *"); | |||||
SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, | |||||
"struct vmx *", "int", "struct vm_exit *"); | |||||
SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, | |||||
"struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); | |||||
SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, | |||||
"struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); | |||||
SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, | |||||
"struct vmx *", "int", "struct vm_exit *", "uint64_t"); | |||||
SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, | |||||
"struct vmx *", "int", "struct vm_exit *"); | |||||
SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, | |||||
"struct vmx *", "int", "struct vm_exit *"); | |||||
SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, | |||||
"struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); | |||||
SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, | |||||
"struct vmx *", "int", "struct vm_exit *"); | |||||
SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, | |||||
"struct vmx *", "int", "struct vm_exit *"); | |||||
SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, | |||||
"struct vmx *", "int", "struct vm_exit *"); | |||||
SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, | |||||
"struct vmx *", "int", "struct vm_exit *", "uint32_t"); | |||||
SDT_PROBE_DEFINE4(vmm, vmx, exit, return, | |||||
"struct vmx *", "int", "struct vm_exit *", "int"); | |||||
/* | |||||
* Use the last page below 4GB as the APIC access address. This address is | * Use the last page below 4GB as the APIC access address. This address is | ||||
* occupied by the boot firmware so it is guaranteed that it will not conflict | * occupied by the boot firmware so it is guaranteed that it will not conflict | ||||
* with a page in system memory. | * with a page in system memory. | ||||
*/ | */ | ||||
#define APIC_ACCESS_ADDRESS 0xFFFFF000 | #define APIC_ACCESS_ADDRESS 0xFFFFF000 | ||||
static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); | static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); | ||||
static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); | static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); | ||||
▲ Show 20 Lines • Show All 1,914 Lines • ▼ Show 20 Lines | vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) | ||||
handled = UNHANDLED; | handled = UNHANDLED; | ||||
vmxctx = &vmx->ctx[vcpu]; | vmxctx = &vmx->ctx[vcpu]; | ||||
qual = vmexit->u.vmx.exit_qualification; | qual = vmexit->u.vmx.exit_qualification; | ||||
reason = vmexit->u.vmx.exit_reason; | reason = vmexit->u.vmx.exit_reason; | ||||
vmexit->exitcode = VM_EXITCODE_BOGUS; | vmexit->exitcode = VM_EXITCODE_BOGUS; | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); | ||||
SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit); | |||||
/* | /* | ||||
* VM-entry failures during or after loading guest state. | * VM-entry failures during or after loading guest state. | ||||
* | * | ||||
* These VM-exits are uncommon but must be handled specially | * These VM-exits are uncommon but must be handled specially | ||||
* as most VM-exit fields are not populated as usual. | * as most VM-exit fields are not populated as usual. | ||||
*/ | */ | ||||
if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) { | if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) { | ||||
▲ Show 20 Lines • Show All 86 Lines • ▼ Show 20 Lines | if (ts->reason == TSR_IDT_GATE) { | ||||
vmexit->inst_length = 0; | vmexit->inst_length = 0; | ||||
if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { | if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { | ||||
ts->errcode_valid = 1; | ts->errcode_valid = 1; | ||||
ts->errcode = vmcs_idt_vectoring_err(); | ts->errcode = vmcs_idt_vectoring_err(); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; | vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; | ||||
SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts); | |||||
VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, " | VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, " | ||||
"%s errcode 0x%016lx", ts->reason, ts->tsssel, | "%s errcode 0x%016lx", ts->reason, ts->tsssel, | ||||
ts->ext ? "external" : "internal", | ts->ext ? "external" : "internal", | ||||
((uint64_t)ts->errcode << 32) | ts->errcode_valid); | ((uint64_t)ts->errcode << 32) | ts->errcode_valid); | ||||
break; | break; | ||||
case EXIT_REASON_CR_ACCESS: | case EXIT_REASON_CR_ACCESS: | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); | ||||
SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual); | |||||
switch (qual & 0xf) { | switch (qual & 0xf) { | ||||
case 0: | case 0: | ||||
handled = vmx_emulate_cr0_access(vmx, vcpu, qual); | handled = vmx_emulate_cr0_access(vmx, vcpu, qual); | ||||
break; | break; | ||||
case 4: | case 4: | ||||
handled = vmx_emulate_cr4_access(vmx, vcpu, qual); | handled = vmx_emulate_cr4_access(vmx, vcpu, qual); | ||||
break; | break; | ||||
case 8: | case 8: | ||||
handled = vmx_emulate_cr8_access(vmx, vcpu, qual); | handled = vmx_emulate_cr8_access(vmx, vcpu, qual); | ||||
break; | break; | ||||
} | } | ||||
break; | break; | ||||
case EXIT_REASON_RDMSR: | case EXIT_REASON_RDMSR: | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); | ||||
retu = false; | retu = false; | ||||
ecx = vmxctx->guest_rcx; | ecx = vmxctx->guest_rcx; | ||||
VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx); | VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx); | ||||
SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpu, vmexit, ecx); | |||||
error = emulate_rdmsr(vmx, vcpu, ecx, &retu); | error = emulate_rdmsr(vmx, vcpu, ecx, &retu); | ||||
if (error) { | if (error) { | ||||
vmexit->exitcode = VM_EXITCODE_RDMSR; | vmexit->exitcode = VM_EXITCODE_RDMSR; | ||||
vmexit->u.msr.code = ecx; | vmexit->u.msr.code = ecx; | ||||
} else if (!retu) { | } else if (!retu) { | ||||
handled = HANDLED; | handled = HANDLED; | ||||
} else { | } else { | ||||
/* Return to userspace with a valid exitcode */ | /* Return to userspace with a valid exitcode */ | ||||
KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, | KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, | ||||
("emulate_rdmsr retu with bogus exitcode")); | ("emulate_rdmsr retu with bogus exitcode")); | ||||
} | } | ||||
break; | break; | ||||
case EXIT_REASON_WRMSR: | case EXIT_REASON_WRMSR: | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); | ||||
retu = false; | retu = false; | ||||
eax = vmxctx->guest_rax; | eax = vmxctx->guest_rax; | ||||
ecx = vmxctx->guest_rcx; | ecx = vmxctx->guest_rcx; | ||||
edx = vmxctx->guest_rdx; | edx = vmxctx->guest_rdx; | ||||
VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx", | VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx", | ||||
ecx, (uint64_t)edx << 32 | eax); | ecx, (uint64_t)edx << 32 | eax); | ||||
SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpu, ecx, | |||||
(uint64_t)edx << 32 | eax); | |||||
error = emulate_wrmsr(vmx, vcpu, ecx, | error = emulate_wrmsr(vmx, vcpu, ecx, | ||||
(uint64_t)edx << 32 | eax, &retu); | (uint64_t)edx << 32 | eax, &retu); | ||||
if (error) { | if (error) { | ||||
vmexit->exitcode = VM_EXITCODE_WRMSR; | vmexit->exitcode = VM_EXITCODE_WRMSR; | ||||
vmexit->u.msr.code = ecx; | vmexit->u.msr.code = ecx; | ||||
vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; | vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; | ||||
} else if (!retu) { | } else if (!retu) { | ||||
handled = HANDLED; | handled = HANDLED; | ||||
} else { | } else { | ||||
/* Return to userspace with a valid exitcode */ | /* Return to userspace with a valid exitcode */ | ||||
KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, | KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, | ||||
("emulate_wrmsr retu with bogus exitcode")); | ("emulate_wrmsr retu with bogus exitcode")); | ||||
} | } | ||||
break; | break; | ||||
case EXIT_REASON_HLT: | case EXIT_REASON_HLT: | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); | ||||
SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit); | |||||
vmexit->exitcode = VM_EXITCODE_HLT; | vmexit->exitcode = VM_EXITCODE_HLT; | ||||
vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); | vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); | ||||
if (virtual_interrupt_delivery) | if (virtual_interrupt_delivery) | ||||
vmexit->u.hlt.intr_status = | vmexit->u.hlt.intr_status = | ||||
vmcs_read(VMCS_GUEST_INTR_STATUS); | vmcs_read(VMCS_GUEST_INTR_STATUS); | ||||
else | else | ||||
vmexit->u.hlt.intr_status = 0; | vmexit->u.hlt.intr_status = 0; | ||||
break; | break; | ||||
case EXIT_REASON_MTF: | case EXIT_REASON_MTF: | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); | ||||
SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit); | |||||
vmexit->exitcode = VM_EXITCODE_MTRAP; | vmexit->exitcode = VM_EXITCODE_MTRAP; | ||||
vmexit->inst_length = 0; | vmexit->inst_length = 0; | ||||
break; | break; | ||||
case EXIT_REASON_PAUSE: | case EXIT_REASON_PAUSE: | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); | ||||
SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit); | |||||
vmexit->exitcode = VM_EXITCODE_PAUSE; | vmexit->exitcode = VM_EXITCODE_PAUSE; | ||||
break; | break; | ||||
case EXIT_REASON_INTR_WINDOW: | case EXIT_REASON_INTR_WINDOW: | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); | ||||
SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); | |||||
vmx_clear_int_window_exiting(vmx, vcpu); | vmx_clear_int_window_exiting(vmx, vcpu); | ||||
return (1); | return (1); | ||||
case EXIT_REASON_EXT_INTR: | case EXIT_REASON_EXT_INTR: | ||||
/* | /* | ||||
* External interrupts serve only to cause VM exits and allow | * External interrupts serve only to cause VM exits and allow | ||||
* the host interrupt handler to run. | * the host interrupt handler to run. | ||||
* | * | ||||
* If this external interrupt triggers a virtual interrupt | * If this external interrupt triggers a virtual interrupt | ||||
* to a VM, then that state will be recorded by the | * to a VM, then that state will be recorded by the | ||||
* host interrupt handler in the VM's softc. We will inject | * host interrupt handler in the VM's softc. We will inject | ||||
* this virtual interrupt during the subsequent VM enter. | * this virtual interrupt during the subsequent VM enter. | ||||
*/ | */ | ||||
intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); | intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); | ||||
SDT_PROBE4(vmm, vmx, exit, interrupt, | |||||
vmx, vcpu, vmexit, intr_info); | |||||
/* | /* | ||||
* XXX: Ignore this exit if VMCS_INTR_VALID is not set. | * XXX: Ignore this exit if VMCS_INTR_VALID is not set. | ||||
* This appears to be a bug in VMware Fusion? | * This appears to be a bug in VMware Fusion? | ||||
*/ | */ | ||||
if (!(intr_info & VMCS_INTR_VALID)) | if (!(intr_info & VMCS_INTR_VALID)) | ||||
return (1); | return (1); | ||||
KASSERT((intr_info & VMCS_INTR_VALID) != 0 && | KASSERT((intr_info & VMCS_INTR_VALID) != 0 && | ||||
(intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, | (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, | ||||
("VM exit interruption info invalid: %#x", intr_info)); | ("VM exit interruption info invalid: %#x", intr_info)); | ||||
vmx_trigger_hostintr(intr_info & 0xff); | vmx_trigger_hostintr(intr_info & 0xff); | ||||
/* | /* | ||||
* This is special. We want to treat this as an 'handled' | * This is special. We want to treat this as an 'handled' | ||||
* VM-exit but not increment the instruction pointer. | * VM-exit but not increment the instruction pointer. | ||||
*/ | */ | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); | ||||
return (1); | return (1); | ||||
case EXIT_REASON_NMI_WINDOW: | case EXIT_REASON_NMI_WINDOW: | ||||
SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); | |||||
/* Exit to allow the pending virtual NMI to be injected */ | /* Exit to allow the pending virtual NMI to be injected */ | ||||
if (vm_nmi_pending(vmx->vm, vcpu)) | if (vm_nmi_pending(vmx->vm, vcpu)) | ||||
vmx_inject_nmi(vmx, vcpu); | vmx_inject_nmi(vmx, vcpu); | ||||
vmx_clear_nmi_window_exiting(vmx, vcpu); | vmx_clear_nmi_window_exiting(vmx, vcpu); | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); | ||||
return (1); | return (1); | ||||
case EXIT_REASON_INOUT: | case EXIT_REASON_INOUT: | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); | ||||
Show All 11 Lines | if (vmexit->u.inout.string) { | ||||
vmx_paging_info(&vis->paging); | vmx_paging_info(&vis->paging); | ||||
vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS); | vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS); | ||||
vis->cr0 = vmcs_read(VMCS_GUEST_CR0); | vis->cr0 = vmcs_read(VMCS_GUEST_CR0); | ||||
vis->index = inout_str_index(vmx, vcpu, in); | vis->index = inout_str_index(vmx, vcpu, in); | ||||
vis->count = inout_str_count(vmx, vcpu, vis->inout.rep); | vis->count = inout_str_count(vmx, vcpu, vis->inout.rep); | ||||
vis->addrsize = inout_str_addrsize(inst_info); | vis->addrsize = inout_str_addrsize(inst_info); | ||||
inout_str_seginfo(vmx, vcpu, inst_info, in, vis); | inout_str_seginfo(vmx, vcpu, inst_info, in, vis); | ||||
} | } | ||||
SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit); | |||||
break; | break; | ||||
case EXIT_REASON_CPUID: | case EXIT_REASON_CPUID: | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); | ||||
SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit); | |||||
handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); | handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); | ||||
break; | break; | ||||
case EXIT_REASON_EXCEPTION: | case EXIT_REASON_EXCEPTION: | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); | ||||
intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); | intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); | ||||
KASSERT((intr_info & VMCS_INTR_VALID) != 0, | KASSERT((intr_info & VMCS_INTR_VALID) != 0, | ||||
("VM exit interruption info invalid: %#x", intr_info)); | ("VM exit interruption info invalid: %#x", intr_info)); | ||||
▲ Show 20 Lines • Show All 48 Lines • ▼ Show 20 Lines | case EXIT_REASON_EXCEPTION: | ||||
/* Reflect all other exceptions back into the guest */ | /* Reflect all other exceptions back into the guest */ | ||||
errcode_valid = errcode = 0; | errcode_valid = errcode = 0; | ||||
if (intr_info & VMCS_INTR_DEL_ERRCODE) { | if (intr_info & VMCS_INTR_DEL_ERRCODE) { | ||||
errcode_valid = 1; | errcode_valid = 1; | ||||
errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); | errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); | ||||
} | } | ||||
VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into " | VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into " | ||||
"the guest", intr_vec, errcode); | "the guest", intr_vec, errcode); | ||||
SDT_PROBE5(vmm, vmx, exit, exception, | |||||
vmx, vcpu, vmexit, intr_vec, errcode); | |||||
error = vm_inject_exception(vmx->vm, vcpu, intr_vec, | error = vm_inject_exception(vmx->vm, vcpu, intr_vec, | ||||
errcode_valid, errcode, 0); | errcode_valid, errcode, 0); | ||||
KASSERT(error == 0, ("%s: vm_inject_exception error %d", | KASSERT(error == 0, ("%s: vm_inject_exception error %d", | ||||
__func__, error)); | __func__, error)); | ||||
return (1); | return (1); | ||||
case EXIT_REASON_EPT_FAULT: | case EXIT_REASON_EPT_FAULT: | ||||
/* | /* | ||||
* If 'gpa' lies within the address space allocated to | * If 'gpa' lies within the address space allocated to | ||||
* memory then this must be a nested page fault otherwise | * memory then this must be a nested page fault otherwise | ||||
* this must be an instruction that accesses MMIO space. | * this must be an instruction that accesses MMIO space. | ||||
*/ | */ | ||||
gpa = vmcs_gpa(); | gpa = vmcs_gpa(); | ||||
if (vm_mem_allocated(vmx->vm, vcpu, gpa) || | if (vm_mem_allocated(vmx->vm, vcpu, gpa) || | ||||
apic_access_fault(vmx, vcpu, gpa)) { | apic_access_fault(vmx, vcpu, gpa)) { | ||||
vmexit->exitcode = VM_EXITCODE_PAGING; | vmexit->exitcode = VM_EXITCODE_PAGING; | ||||
vmexit->inst_length = 0; | vmexit->inst_length = 0; | ||||
vmexit->u.paging.gpa = gpa; | vmexit->u.paging.gpa = gpa; | ||||
vmexit->u.paging.fault_type = ept_fault_type(qual); | vmexit->u.paging.fault_type = ept_fault_type(qual); | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); | ||||
SDT_PROBE5(vmm, vmx, exit, nestedfault, | |||||
vmx, vcpu, vmexit, gpa, qual); | |||||
} else if (ept_emulation_fault(qual)) { | } else if (ept_emulation_fault(qual)) { | ||||
vmexit_inst_emul(vmexit, gpa, vmcs_gla()); | vmexit_inst_emul(vmexit, gpa, vmcs_gla()); | ||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1); | ||||
SDT_PROBE4(vmm, vmx, exit, mmiofault, | |||||
vmx, vcpu, vmexit, gpa); | |||||
} | } | ||||
/* | /* | ||||
* If Virtual NMIs control is 1 and the VM-exit is due to an | * If Virtual NMIs control is 1 and the VM-exit is due to an | ||||
* EPT fault during the execution of IRET then we must restore | * EPT fault during the execution of IRET then we must restore | ||||
* the state of "virtual-NMI blocking" before resuming. | * the state of "virtual-NMI blocking" before resuming. | ||||
* | * | ||||
* See description of "NMI unblocking due to IRET" in | * See description of "NMI unblocking due to IRET" in | ||||
* "Exit Qualification for EPT Violations". | * "Exit Qualification for EPT Violations". | ||||
*/ | */ | ||||
if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && | if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && | ||||
(qual & EXIT_QUAL_NMIUDTI) != 0) | (qual & EXIT_QUAL_NMIUDTI) != 0) | ||||
vmx_restore_nmi_blocking(vmx, vcpu); | vmx_restore_nmi_blocking(vmx, vcpu); | ||||
break; | break; | ||||
case EXIT_REASON_VIRTUALIZED_EOI: | case EXIT_REASON_VIRTUALIZED_EOI: | ||||
vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; | vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; | ||||
vmexit->u.ioapic_eoi.vector = qual & 0xFF; | vmexit->u.ioapic_eoi.vector = qual & 0xFF; | ||||
SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit); | |||||
vmexit->inst_length = 0; /* trap-like */ | vmexit->inst_length = 0; /* trap-like */ | ||||
break; | break; | ||||
case EXIT_REASON_APIC_ACCESS: | case EXIT_REASON_APIC_ACCESS: | ||||
SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit); | |||||
handled = vmx_handle_apic_access(vmx, vcpu, vmexit); | handled = vmx_handle_apic_access(vmx, vcpu, vmexit); | ||||
break; | break; | ||||
case EXIT_REASON_APIC_WRITE: | case EXIT_REASON_APIC_WRITE: | ||||
/* | /* | ||||
* APIC-write VM exit is trap-like so the %rip is already | * APIC-write VM exit is trap-like so the %rip is already | ||||
* pointing to the next instruction. | * pointing to the next instruction. | ||||
*/ | */ | ||||
vmexit->inst_length = 0; | vmexit->inst_length = 0; | ||||
vlapic = vm_lapic(vmx->vm, vcpu); | vlapic = vm_lapic(vmx->vm, vcpu); | ||||
SDT_PROBE4(vmm, vmx, exit, apicwrite, | |||||
vmx, vcpu, vmexit, vlapic); | |||||
handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); | handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); | ||||
break; | break; | ||||
case EXIT_REASON_XSETBV: | case EXIT_REASON_XSETBV: | ||||
SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit); | |||||
handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); | handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); | ||||
break; | break; | ||||
case EXIT_REASON_MONITOR: | case EXIT_REASON_MONITOR: | ||||
SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); | |||||
vmexit->exitcode = VM_EXITCODE_MONITOR; | vmexit->exitcode = VM_EXITCODE_MONITOR; | ||||
break; | break; | ||||
case EXIT_REASON_MWAIT: | case EXIT_REASON_MWAIT: | ||||
SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); | |||||
vmexit->exitcode = VM_EXITCODE_MWAIT; | vmexit->exitcode = VM_EXITCODE_MWAIT; | ||||
break; | break; | ||||
default: | default: | ||||
SDT_PROBE4(vmm, vmx, exit, unknown, | |||||
vmx, vcpu, vmexit, reason); | |||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); | vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); | ||||
break; | break; | ||||
} | } | ||||
if (handled) { | if (handled) { | ||||
/* | /* | ||||
* It is possible that control is returned to userland | * It is possible that control is returned to userland | ||||
* even though we were able to handle the VM exit in the | * even though we were able to handle the VM exit in the | ||||
Show All 19 Lines | if (vmexit->exitcode == VM_EXITCODE_BOGUS) { | ||||
vmexit->u.vmx.inst_error = 0; | vmexit->u.vmx.inst_error = 0; | ||||
} else { | } else { | ||||
/* | /* | ||||
* The exitcode and collateral have been populated. | * The exitcode and collateral have been populated. | ||||
* The VM exit will be processed further in userland. | * The VM exit will be processed further in userland. | ||||
*/ | */ | ||||
} | } | ||||
} | } | ||||
SDT_PROBE4(vmm, vmx, exit, return, | |||||
vmx, vcpu, vmexit, handled); | |||||
return (handled); | return (handled); | ||||
} | } | ||||
static __inline void | static __inline void | ||||
vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) | vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) | ||||
{ | { | ||||
KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, | KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, | ||||
▲ Show 20 Lines • Show All 1,020 Lines • Show Last 20 Lines |