Changeset View
Changeset View
Standalone View
Standalone View
sys/amd64/vmm/amd/svm.c
Show First 20 Lines • Show All 76 Lines • ▼ Show 20 Lines | |||||
#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ | #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ | ||||
#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ | #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ | ||||
#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ | #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ | ||||
#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ | #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ | ||||
#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ | #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ | ||||
#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ | #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ | ||||
#define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ | #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ | ||||
/* | |||||
* Bitmap for all exceptions excluding unimplemented vectors 2 and 9. | |||||
*/ | |||||
#define ALL_EXCEPTIONS_BITMAP 0xFFFFFDFB | |||||
#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ | #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ | ||||
VMCB_CACHE_IOPM | \ | VMCB_CACHE_IOPM | \ | ||||
VMCB_CACHE_I | \ | VMCB_CACHE_I | \ | ||||
VMCB_CACHE_TPR | \ | VMCB_CACHE_TPR | \ | ||||
VMCB_CACHE_CR2 | \ | VMCB_CACHE_CR2 | \ | ||||
VMCB_CACHE_CR | \ | VMCB_CACHE_CR | \ | ||||
VMCB_CACHE_DT | \ | VMCB_CACHE_DT | \ | ||||
VMCB_CACHE_SEG | \ | VMCB_CACHE_SEG | \ | ||||
Show All 28 Lines | |||||
/* | /* | ||||
* SVM host state saved area of size 4KB for each core. | * SVM host state saved area of size 4KB for each core. | ||||
*/ | */ | ||||
static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); | static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); | ||||
static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); | static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); | ||||
static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); | static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); | ||||
static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); | static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); | ||||
static VMM_STAT_AMD(VMEXIT_EXCEPTION_DURING_IRET, "VM exits due to exceptions " | |||||
"during iret"); | |||||
static VMM_STAT_AMD(NMI_SPECULATIVE_UNBLOCKING, "Number of times vNMI " | |||||
"unblocked speculatively"); | |||||
static VMM_STAT_AMD(NMI_PRECISE_UNBLOCKING, "Number of times vNMI " | |||||
"unblocked precisely"); | |||||
static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); | static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); | ||||
static __inline int | static __inline int | ||||
flush_by_asid(void) | flush_by_asid(void) | ||||
{ | { | ||||
return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); | return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); | ||||
▲ Show 20 Lines • Show All 259 Lines • ▼ Show 20 Lines | |||||
static __inline void | static __inline void | ||||
svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) | svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) | ||||
{ | { | ||||
svm_set_intercept(sc, vcpu, off, bitmask, 1); | svm_set_intercept(sc, vcpu, off, bitmask, 1); | ||||
} | } | ||||
static void | static void | ||||
set_exception_bitmap(struct svm_softc *sc, int vcpu, uint32_t newval) | |||||
{ | |||||
struct vmcb_ctrl *ctrl; | |||||
uint32_t oldval; | |||||
ctrl = svm_get_vmcb_ctrl(sc, vcpu); | |||||
oldval = ctrl->intercept[VMCB_EXC_INTCPT]; | |||||
if (newval != oldval) { | |||||
ctrl->intercept[VMCB_EXC_INTCPT] = newval; | |||||
svm_set_dirty(sc, vcpu, VMCB_CACHE_I); | |||||
VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " | |||||
"from %#x to %#x", VMCB_EXC_INTCPT, oldval, newval); | |||||
} | |||||
} | |||||
static void | |||||
vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, | vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, | ||||
uint64_t msrpm_base_pa, uint64_t np_pml4) | uint64_t msrpm_base_pa, uint64_t np_pml4) | ||||
{ | { | ||||
struct vmcb_ctrl *ctrl; | struct vmcb_ctrl *ctrl; | ||||
struct vmcb_state *state; | struct vmcb_state *state; | ||||
uint32_t mask; | uint32_t mask; | ||||
int n; | int n; | ||||
Show All 19 Lines | else | ||||
svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); | svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); | ||||
} | } | ||||
/* | /* | ||||
* Intercept everything when tracing guest exceptions otherwise | * Intercept everything when tracing guest exceptions otherwise | ||||
* just intercept machine check exception. | * just intercept machine check exception. | ||||
*/ | */ | ||||
if (vcpu_trace_exceptions(sc->vm, vcpu)) { | if (vcpu_trace_exceptions(sc->vm, vcpu)) | ||||
for (n = 0; n < 32; n++) { | mask = ALL_EXCEPTIONS_BITMAP; | ||||
/* | else | ||||
* Skip unimplemented vectors in the exception bitmap. | mask = BIT(IDT_MC); | ||||
*/ | set_exception_bitmap(sc, vcpu, mask); | ||||
if (n == 2 || n == 9) { | |||||
continue; | |||||
} | |||||
svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); | |||||
} | |||||
} else { | |||||
svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); | |||||
} | |||||
/* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ | /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ | ||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); | svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); | ||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); | svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); | ||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); | svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); | ||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); | svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); | ||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); | svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); | ||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); | svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); | ||||
▲ Show 20 Lines • Show All 562 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
struct vmcb_ctrl *ctrl; | struct vmcb_ctrl *ctrl; | ||||
ctrl = svm_get_vmcb_ctrl(sc, vcpu); | ctrl = svm_get_vmcb_ctrl(sc, vcpu); | ||||
*val = ctrl->intr_shadow; | *val = ctrl->intr_shadow; | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | |||||
* Once an NMI is injected it blocks delivery of further NMIs until the handler | |||||
* executes an IRET. The IRET intercept is enabled when an NMI is injected to | |||||
* to track when the vcpu is done handling the NMI. | |||||
*/ | |||||
static int | |||||
nmi_blocked(struct svm_softc *sc, int vcpu) | |||||
{ | |||||
int blocked; | |||||
blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, | |||||
VMCB_INTCPT_IRET); | |||||
return (blocked); | |||||
} | |||||
static void | static void | ||||
enable_nmi_blocking(struct svm_softc *sc, int vcpu) | nmi_enable_iret_intercept(struct svm_softc *sc, int vcpu) | ||||
{ | { | ||||
struct svm_vcpu *vcpustate; | |||||
KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); | vcpustate = svm_get_vcpu(sc, vcpu); | ||||
VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); | KASSERT(!vcpustate->nmi.blocking, ("invalid vNMI blocking state %d", | ||||
vcpustate->nmi.blocking)); | |||||
vcpustate->nmi.blocking = NMI_IRET_INTERCEPT; | |||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); | svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); | ||||
VCPU_CTR0(sc->vm, vcpu, "vNMI iret intercept enabled"); | |||||
} | } | ||||
static void | static void | ||||
clear_nmi_blocking(struct svm_softc *sc, int vcpu) | nmi_enable_iret_tracing(struct svm_softc *sc, int vcpu) | ||||
{ | { | ||||
struct svm_vcpu *vcpustate; | |||||
struct vmcb_state *state; | |||||
struct vmcb_ctrl *ctrl; | |||||
struct vmcb *vmcb; | |||||
int error; | int error; | ||||
KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); | vcpustate = svm_get_vcpu(sc, vcpu); | ||||
VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); | ctrl = svm_get_vmcb_ctrl(sc, vcpu); | ||||
/* | vmcb = svm_get_vmcb(sc, vcpu); | ||||
* When the IRET intercept is cleared the vcpu will attempt to execute | state = &vmcb->state; | ||||
* the "iret" when it runs next. However, it is possible to inject | |||||
* another NMI into the vcpu before the "iret" has actually executed. | KASSERT(vcpustate->nmi.blocking == NMI_IRET_INTERCEPT, | ||||
* | ("invalid vNMI blocking state %d", vcpustate->nmi.blocking)); | ||||
* For e.g. if the "iret" encounters a #NPF when accessing the stack | |||||
* it will trap back into the hypervisor. If an NMI is pending for | |||||
* the vcpu it will be injected into the guest. | |||||
* | |||||
* XXX this needs to be fixed | |||||
*/ | |||||
svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); | svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); | ||||
/* | /* | ||||
* Set 'intr_shadow' to prevent an NMI from being injected on the | * Set 'intr_shadow' to prevent an NMI from being injected on the | ||||
* immediate VMRUN. | * immediate VMRUN. | ||||
*/ | */ | ||||
error = svm_modify_intr_shadow(sc, vcpu, 1); | error = svm_modify_intr_shadow(sc, vcpu, 1); | ||||
KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); | KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); | ||||
/* | |||||
* XXX | |||||
* Single stepping using the trap flag does not work across a task | |||||
* switch so we unblock vNMIs right here. A vNMI can be prematurely | |||||
* injected into the vcpu if a #VMEXIT is triggered before the "iret" | |||||
* can finish execution (e.g. #NPF). | |||||
*/ | |||||
if (state->rflags & PSL_NT) { | |||||
vcpustate->nmi.blocking = 0; | |||||
vmm_stat_incr(sc->vm, vcpu, NMI_SPECULATIVE_UNBLOCKING, 1); | |||||
VCPU_CTR0(sc->vm, vcpu, "vNMI unblocked speculatively"); | |||||
return; | |||||
} | } | ||||
/* | |||||
* Single step "iret" which can trigger a #VMEXIT for the following | |||||
* reasons: | |||||
* | |||||
* 1. The "iret" executes successfully in which case the single step | |||||
* will trigger a VMEXIT_EXCEPTION (IDT_DB). | |||||
* 2. The "iret" triggers an exception which in turn will cause a | |||||
* VMEXIT_EXCEPTION (IDT_GP, IDT_PF, IDT_SS etc). | |||||
* 3. An #VMEXIT is triggered by reasons unrelated to the "iret". | |||||
* For e.g. nested page fault, hardware interrupt or NMI. | |||||
* | |||||
* From section "Handling Multiple NMIs" from the Intel SDM | |||||
* cases (1) and (2) will unblock vNMIs. | |||||
*/ | |||||
vcpustate->nmi.blocking = NMI_IRET_TRACING; | |||||
vcpustate->nmi.rflags = state->rflags; | |||||
state->rflags |= PSL_RF | PSL_T; | |||||
vcpustate->nmi.exception_bitmap = ctrl->intercept[VMCB_EXC_INTCPT]; | |||||
set_exception_bitmap(sc, vcpu, ALL_EXCEPTIONS_BITMAP); | |||||
VCPU_CTR4(sc->vm, vcpu, "vNMI iret tracing enabled: " | |||||
"rflags (%#lx/%#lx) exception_bitmap (%#08x/%#08x)", | |||||
vcpustate->nmi.rflags, state->rflags, | |||||
vcpustate->nmi.exception_bitmap, ALL_EXCEPTIONS_BITMAP); | |||||
} | |||||
static void | |||||
nmi_unblock(struct svm_softc *sc, int vcpu, bool restore_rflags) | |||||
{ | |||||
struct svm_vcpu *vcpustate; | |||||
struct vmcb_state *state; | |||||
struct vmcb *vmcb; | |||||
vcpustate = svm_get_vcpu(sc, vcpu); | |||||
vmcb = svm_get_vmcb(sc, vcpu); | |||||
state = &vmcb->state; | |||||
KASSERT(vcpustate->nmi.blocking == NMI_IRET_TRACING, | |||||
("invalid vNMI blocking state %d", vcpustate->nmi.blocking)); | |||||
/* | |||||
* If the "iret" execution triggered an exception then restore the | |||||
* PSL_RF and PSL_T bits in %rflags before injecting the exception | |||||
* into the guest. | |||||
* | |||||
* If the "iret" instruction completes successfully then %rflags has | |||||
* already been restored from the NMI stack. | |||||
*/ | |||||
if (restore_rflags) { | |||||
state->rflags &= ~(PSL_RF | PSL_T); | |||||
state->rflags |= (vcpustate->nmi.rflags & (PSL_RF | PSL_T)); | |||||
} | |||||
set_exception_bitmap(sc, vcpu, vcpustate->nmi.exception_bitmap); | |||||
vcpustate->nmi.blocking = 0; | |||||
vmm_stat_incr(sc->vm, vcpu, NMI_PRECISE_UNBLOCKING, 1); | |||||
VCPU_CTR0(sc->vm, vcpu, "vNMIs unblocked precisely"); | |||||
} | |||||
static int | static int | ||||
emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, | emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, | ||||
bool *retu) | bool *retu) | ||||
{ | { | ||||
int error; | int error; | ||||
if (lapic_msr(num)) | if (lapic_msr(num)) | ||||
error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); | error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); | ||||
▲ Show 20 Lines • Show All 111 Lines • ▼ Show 20 Lines | vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) | ||||
vme->u.svm.exitinfo2 = info2; | vme->u.svm.exitinfo2 = info2; | ||||
} | } | ||||
static int | static int | ||||
svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) | svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) | ||||
{ | { | ||||
struct vmcb *vmcb; | struct vmcb *vmcb; | ||||
struct vmcb_state *state; | struct vmcb_state *state; | ||||
struct svm_vcpu *vcpustate; | |||||
struct vmcb_ctrl *ctrl; | struct vmcb_ctrl *ctrl; | ||||
struct svm_regctx *ctx; | struct svm_regctx *ctx; | ||||
uint64_t code, info1, info2, val; | uint64_t code, info1, info2, val; | ||||
uint32_t eax, ecx, edx; | uint32_t eax, ecx, edx; | ||||
int error, errcode_valid, handled, idtvec, reflect; | int error, errcode_valid, handled, idtvec, reflect; | ||||
bool retu; | bool retu; | ||||
ctx = svm_get_guest_regctx(svm_sc, vcpu); | ctx = svm_get_guest_regctx(svm_sc, vcpu); | ||||
vcpustate = svm_get_vcpu(svm_sc, vcpu); | |||||
vmcb = svm_get_vmcb(svm_sc, vcpu); | vmcb = svm_get_vmcb(svm_sc, vcpu); | ||||
state = &vmcb->state; | state = &vmcb->state; | ||||
ctrl = &vmcb->ctrl; | ctrl = &vmcb->ctrl; | ||||
handled = 0; | handled = 0; | ||||
code = ctrl->exitcode; | code = ctrl->exitcode; | ||||
info1 = ctrl->exitinfo1; | info1 = ctrl->exitinfo1; | ||||
info2 = ctrl->exitinfo2; | info2 = ctrl->exitinfo2; | ||||
Show All 25 Lines | svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) | ||||
svm_save_intinfo(svm_sc, vcpu); | svm_save_intinfo(svm_sc, vcpu); | ||||
switch (code) { | switch (code) { | ||||
case VMCB_EXIT_IRET: | case VMCB_EXIT_IRET: | ||||
/* | /* | ||||
* Restart execution at "iret" but with the intercept cleared. | * Restart execution at "iret" but with the intercept cleared. | ||||
*/ | */ | ||||
vmexit->inst_length = 0; | vmexit->inst_length = 0; | ||||
clear_nmi_blocking(svm_sc, vcpu); | nmi_enable_iret_tracing(svm_sc, vcpu); | ||||
handled = 1; | handled = 1; | ||||
break; | break; | ||||
case VMCB_EXIT_VINTR: /* interrupt window exiting */ | case VMCB_EXIT_VINTR: /* interrupt window exiting */ | ||||
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); | vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); | ||||
handled = 1; | handled = 1; | ||||
break; | break; | ||||
case VMCB_EXIT_INTR: /* external interrupt */ | case VMCB_EXIT_INTR: /* external interrupt */ | ||||
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); | vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); | ||||
handled = 1; | handled = 1; | ||||
break; | break; | ||||
case VMCB_EXIT_NMI: /* external NMI */ | case VMCB_EXIT_NMI: /* external NMI */ | ||||
handled = 1; | handled = 1; | ||||
break; | break; | ||||
case 0x40 ... 0x5F: | case 0x40 ... 0x5F: | ||||
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); | vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); | ||||
reflect = 1; | reflect = 1; | ||||
idtvec = code - 0x40; | idtvec = code - 0x40; | ||||
if (vcpustate->nmi.blocking == NMI_IRET_TRACING) { | |||||
if (idtvec == IDT_DB) { | |||||
/* Don't reflect #DB into the guest */ | |||||
reflect = 0; | |||||
/* | |||||
* APMv2 Section 15.2.2 #DB (Debug): | |||||
* The value saved for DR6 and DR7 matches | |||||
* what would be visible to a #DB handler. | |||||
*/ | |||||
KASSERT((state->dr6 & (1 << 14)) != 0, | |||||
("DR6.BS not set (%#lx)", state->dr6)); | |||||
} else { | |||||
vmm_stat_incr(svm_sc->vm, vcpu, | |||||
VMEXIT_EXCEPTION_DURING_IRET, 1); | |||||
} | |||||
nmi_unblock(svm_sc, vcpu, idtvec == IDT_DB ? 0 : 1); | |||||
} | |||||
switch (idtvec) { | switch (idtvec) { | ||||
case IDT_MC: | case IDT_MC: | ||||
/* | /* | ||||
* Call the machine check handler by hand. Also don't | * Call the machine check handler by hand. Also don't | ||||
* reflect the machine check back into the guest. | * reflect the machine check back into the guest. | ||||
*/ | */ | ||||
reflect = 0; | reflect = 0; | ||||
VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); | VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); | ||||
▲ Show 20 Lines • Show All 222 Lines • ▼ Show 20 Lines | svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) | ||||
* | * | ||||
* An event might also be pending because an exception was injected | * An event might also be pending because an exception was injected | ||||
* by the hypervisor (e.g. #PF during instruction emulation). | * by the hypervisor (e.g. #PF during instruction emulation). | ||||
*/ | */ | ||||
svm_inj_intinfo(sc, vcpu); | svm_inj_intinfo(sc, vcpu); | ||||
/* NMI event has priority over interrupts. */ | /* NMI event has priority over interrupts. */ | ||||
if (vm_nmi_pending(sc->vm, vcpu)) { | if (vm_nmi_pending(sc->vm, vcpu)) { | ||||
if (nmi_blocked(sc, vcpu)) { | if (vcpustate->nmi.blocking) { | ||||
/* | /* | ||||
* Can't inject another NMI if the guest has not | * Can't inject another NMI if the guest has not | ||||
* yet executed an "iret" after the last NMI. | * yet executed an "iret" after the last NMI. | ||||
*/ | */ | ||||
VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " | VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to %s ", | ||||
"to NMI-blocking"); | vcpustate->nmi.blocking == NMI_IRET_INTERCEPT ? | ||||
"iret intercept" : "iret tracing"); | |||||
} else if (ctrl->intr_shadow) { | } else if (ctrl->intr_shadow) { | ||||
/* | /* | ||||
* Can't inject an NMI if the vcpu is in an intr_shadow. | * Can't inject an NMI if the vcpu is in an intr_shadow. | ||||
*/ | */ | ||||
VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " | VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " | ||||
"interrupt shadow"); | "interrupt shadow"); | ||||
need_intr_window = 1; | need_intr_window = 1; | ||||
goto done; | goto done; | ||||
Show All 19 Lines | if (vm_nmi_pending(sc->vm, vcpu)) { | ||||
} else { | } else { | ||||
vm_nmi_clear(sc->vm, vcpu); | vm_nmi_clear(sc->vm, vcpu); | ||||
/* Inject NMI, vector number is not used */ | /* Inject NMI, vector number is not used */ | ||||
svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, | svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, | ||||
IDT_NMI, 0, false); | IDT_NMI, 0, false); | ||||
/* virtual NMI blocking is now in effect */ | /* virtual NMI blocking is now in effect */ | ||||
enable_nmi_blocking(sc, vcpu); | nmi_enable_iret_intercept(sc, vcpu); | ||||
VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); | VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); | ||||
} | } | ||||
} | } | ||||
if (!vm_extint_pending(sc->vm, vcpu)) { | if (!vm_extint_pending(sc->vm, vcpu)) { | ||||
/* | /* | ||||
* APIC interrupts are delivered using the V_IRQ offload. | * APIC interrupts are delivered using the V_IRQ offload. | ||||
▲ Show 20 Lines • Show All 118 Lines • ▼ Show 20 Lines | KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || | ||||
(state->rflags & PSL_I) == 0 || ctrl->intr_shadow, | (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, | ||||
("Bogus intr_window_exiting: eventinj (%#lx), " | ("Bogus intr_window_exiting: eventinj (%#lx), " | ||||
"intr_shadow (%u), rflags (%#lx)", | "intr_shadow (%u), rflags (%#lx)", | ||||
ctrl->eventinj, ctrl->intr_shadow, state->rflags)); | ctrl->eventinj, ctrl->intr_shadow, state->rflags)); | ||||
enable_intr_window_exiting(sc, vcpu); | enable_intr_window_exiting(sc, vcpu); | ||||
} else { | } else { | ||||
disable_intr_window_exiting(sc, vcpu); | disable_intr_window_exiting(sc, vcpu); | ||||
} | } | ||||
#ifdef INVARIANTS | |||||
if (vcpustate->nmi.blocking == NMI_IRET_TRACING) { | |||||
KASSERT((state->rflags & (PSL_RF | PSL_T)) == (PSL_RF | PSL_T), | |||||
("invalid rflags value during iret tracing (%#lx)", | |||||
state->rflags)); | |||||
KASSERT(ctrl->intr_shadow, ("vcpu must be in interrupt " | |||||
"shadow during iret tracing")); | |||||
KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, | |||||
("event injection not expected during iret tracing")); | |||||
} | |||||
#endif | |||||
} | } | ||||
static __inline void | static __inline void | ||||
restore_host_tss(void) | restore_host_tss(void) | ||||
{ | { | ||||
struct system_segment_descriptor *tss_sd; | struct system_segment_descriptor *tss_sd; | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 490 Lines • Show Last 20 Lines |