Index: sys/amd64/vmm/intel/vmx.c =================================================================== --- sys/amd64/vmm/intel/vmx.c +++ sys/amd64/vmm/intel/vmx.c @@ -886,6 +886,7 @@ gd = &idt[vector]; + KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " "invalid vector %d", vector)); KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", @@ -2164,6 +2165,33 @@ return (UNHANDLED); } +/* + * If the NMI-exiting VM execution control is set to '1' then an NMI in + * non-root operation causes a VM-exit. NMI blocking is in effect so it is + * sufficient to simply vector to the NMI handler via a software interrupt. + * However, this must be done before maskable interrupts are enabled + * otherwise the "iret" issued by an interrupt handler will incorrectly + * clear NMI blocking. + */ +static void +vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) +{ + uint32_t intr_info; + + KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); + + intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); + KASSERT((intr_info & VMCS_INTR_VALID) != 0, + ("VM exit interruption info invalid: %#x", intr_info)); + + if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { + KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " + "to NMI has invalid vector: %#x", intr_info)); + VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler"); + __asm __volatile("int $2"); + } +} + static enum task_switch_reason vmx_task_switch_reason(uint64_t qual) { @@ -2237,6 +2265,7 @@ uint64_t exitintinfo, qual, gpa; bool retu; + KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); @@ -2258,10 +2287,17 @@ */ if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) { VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry"); + enable_intr(); __asm __volatile("int $18"); return (1); } + /* + * Handle NMIs before enabling interrupts. + */ + if (reason == EXIT_REASON_EXCEPTION) + vmx_exit_handle_nmi(vmx, vcpu, vmexit); + /* * VM exits that can be triggered during event delivery need to * be handled specially by re-injecting the event if the IDT @@ -2312,6 +2348,47 @@ } } + /* + * Handle external interrupts before enabling interrupts, else + * preemption may defer interrupt handling indefinitely. + */ + if (reason == EXIT_REASON_EXT_INTR) { + /* + * External interrupts serve only to cause VM exits and allow + * the host interrupt handler to run. + * + * If this external interrupt triggers a virtual interrupt + * to a VM, then that state will be recorded by the + * host interrupt handler in the VM's softc. We will inject + * this virtual interrupt during the subsequent VM enter. + */ + intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); + SDT_PROBE4(vmm, vmx, exit, interrupt, + vmx, vcpu, vmexit, intr_info); + + /* + * XXX: Ignore this exit if VMCS_INTR_VALID is not set. + * This appears to be a bug in VMware Fusion? + */ + if (!(intr_info & VMCS_INTR_VALID)) { + enable_intr(); + return (1); + } + KASSERT((intr_info & VMCS_INTR_VALID) != 0 && + (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, + ("VM exit interruption info invalid: %#x", intr_info)); + vmx_trigger_hostintr(intr_info & 0xff); + enable_intr(); + + /* + * This is special. We want to treat this as an 'handled' + * VM-exit but not increment the instruction pointer. + */ + vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); + return (1); + } + enable_intr(); + switch (reason) { case EXIT_REASON_TASK_SWITCH: ts = &vmexit->u.task_switch; @@ -2441,37 +2518,6 @@ SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); vmx_clear_int_window_exiting(vmx, vcpu); return (1); - case EXIT_REASON_EXT_INTR: - /* - * External interrupts serve only to cause VM exits and allow - * the host interrupt handler to run. - * - * If this external interrupt triggers a virtual interrupt - * to a VM, then that state will be recorded by the - * host interrupt handler in the VM's softc. We will inject - * this virtual interrupt during the subsequent VM enter. - */ - intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); - SDT_PROBE4(vmm, vmx, exit, interrupt, - vmx, vcpu, vmexit, intr_info); - - /* - * XXX: Ignore this exit if VMCS_INTR_VALID is not set. - * This appears to be a bug in VMware Fusion? - */ - if (!(intr_info & VMCS_INTR_VALID)) - return (1); - KASSERT((intr_info & VMCS_INTR_VALID) != 0 && - (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, - ("VM exit interruption info invalid: %#x", intr_info)); - vmx_trigger_hostintr(intr_info & 0xff); - - /* - * This is special. We want to treat this as an 'handled' - * VM-exit but not increment the instruction pointer. - */ - vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); - return (1); case EXIT_REASON_NMI_WINDOW: SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); /* Exit to allow the pending virtual NMI to be injected */ @@ -2706,10 +2752,13 @@ vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) { + KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, ("vmx_exit_inst_error: invalid inst_fail_status %d", vmxctx->inst_fail_status)); + enable_intr(); + vmexit->inst_length = 0; vmexit->exitcode = VM_EXITCODE_VMX; vmexit->u.vmx.status = vmxctx->inst_fail_status; @@ -2728,36 +2777,6 @@ } } -/* - * If the NMI-exiting VM execution control is set to '1' then an NMI in - * non-root operation causes a VM-exit. NMI blocking is in effect so it is - * sufficient to simply vector to the NMI handler via a software interrupt. - * However, this must be done before maskable interrupts are enabled - * otherwise the "iret" issued by an interrupt handler will incorrectly - * clear NMI blocking. - */ -static __inline void -vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) -{ - uint32_t intr_info; - - KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); - - if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) - return; - - intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); - KASSERT((intr_info & VMCS_INTR_VALID) != 0, - ("VM exit interruption info invalid: %#x", intr_info)); - - if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { - KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " - "to NMI has invalid vector: %#x", intr_info)); - VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler"); - __asm __volatile("int $2"); - } -} - static __inline void vmx_dr_enter_guest(struct vmxctx *vmxctx) { @@ -2964,14 +2983,10 @@ /* Update 'nextrip' */ vmx->state[vcpu].nextrip = rip; - if (rc == VMX_GUEST_VMEXIT) { - vmx_exit_handle_nmi(vmx, vcpu, vmexit); - enable_intr(); + if (rc == VMX_GUEST_VMEXIT) handled = vmx_exit_process(vmx, vcpu, vmexit); - } else { - enable_intr(); + else vmx_exit_inst_error(vmxctx, rc, vmexit); - } launched = 1; vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled); rip = vmexit->rip; Index: sys/amd64/vmm/intel/vmx_support.S =================================================================== --- sys/amd64/vmm/intel/vmx_support.S +++ sys/amd64/vmm/intel/vmx_support.S @@ -315,7 +315,7 @@ * %rdi = interrupt handler entry point * * Calling sequence described in the "Instruction Set Reference" for the "INT" - * instruction in Intel SDM, Vol 2. + * instruction in Intel SDM, Vol 2. Interrupts must be disabled. */ ENTRY(vmx_call_isr) VENTER @@ -325,7 +325,6 @@ pushq %r11 /* %rsp */ pushfq /* %rflags */ pushq $KERNEL_CS /* %cs */ - cli /* disable interrupts */ callq *%rdi /* push %rip and call isr */ VLEAVE ret