Changeset View
Changeset View
Standalone View
Standalone View
head/sys/amd64/vmm/intel/vmx.c
Show First 20 Lines • Show All 169 Lines • ▼ Show 20 Lines | |||||
static int cap_monitor_trap; | static int cap_monitor_trap; | ||||
SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD, | SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD, | ||||
&cap_monitor_trap, 0, "Monitor trap flag"); | &cap_monitor_trap, 0, "Monitor trap flag"); | ||||
static int cap_invpcid; | static int cap_invpcid; | ||||
SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid, | SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid, | ||||
0, "Guests are allowed to use INVPCID"); | 0, "Guests are allowed to use INVPCID"); | ||||
static int tpr_shadowing; | |||||
SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, tpr_shadowing, CTLFLAG_RD, | |||||
&tpr_shadowing, 0, "TPR shadowing support"); | |||||
static int virtual_interrupt_delivery; | static int virtual_interrupt_delivery; | ||||
SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, | SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, | ||||
&virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); | &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); | ||||
static int posted_interrupts; | static int posted_interrupts; | ||||
SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD, | SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD, | ||||
&posted_interrupts, 0, "APICv posted interrupt support"); | &posted_interrupts, 0, "APICv posted interrupt support"); | ||||
▲ Show 20 Lines • Show All 439 Lines • ▼ Show 20 Lines | vmx_restore(void) | ||||
if (vmxon_enabled[curcpu]) | if (vmxon_enabled[curcpu]) | ||||
vmxon(vmxon_region[curcpu]); | vmxon(vmxon_region[curcpu]); | ||||
} | } | ||||
static int | static int | ||||
vmx_init(int ipinum) | vmx_init(int ipinum) | ||||
{ | { | ||||
int error, use_tpr_shadow; | int error; | ||||
uint64_t basic, fixed0, fixed1, feature_control; | uint64_t basic, fixed0, fixed1, feature_control; | ||||
uint32_t tmp, procbased2_vid_bits; | uint32_t tmp, procbased2_vid_bits; | ||||
/* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ | /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ | ||||
if (!(cpu_feature2 & CPUID2_VMX)) { | if (!(cpu_feature2 & CPUID2_VMX)) { | ||||
printf("vmx_init: processor does not support VMX operation\n"); | printf("vmx_init: processor does not support VMX operation\n"); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 107 Lines • ▼ Show 20 Lines | cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, | ||||
PROCBASED2_UNRESTRICTED_GUEST, 0, | PROCBASED2_UNRESTRICTED_GUEST, 0, | ||||
&tmp) == 0); | &tmp) == 0); | ||||
cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, | cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, | ||||
MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, | MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, | ||||
&tmp) == 0); | &tmp) == 0); | ||||
/* | /* | ||||
* Check support for TPR shadow. | |||||
*/ | |||||
error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, | |||||
MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, | |||||
&tmp); | |||||
if (error == 0) { | |||||
tpr_shadowing = 1; | |||||
TUNABLE_INT_FETCH("hw.vmm.vmx.use_tpr_shadowing", | |||||
&tpr_shadowing); | |||||
} | |||||
if (tpr_shadowing) { | |||||
procbased_ctls |= PROCBASED_USE_TPR_SHADOW; | |||||
procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING; | |||||
procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING; | |||||
} | |||||
/* | |||||
* Check support for virtual interrupt delivery. | * Check support for virtual interrupt delivery. | ||||
*/ | */ | ||||
procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | | procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | | ||||
PROCBASED2_VIRTUALIZE_X2APIC_MODE | | PROCBASED2_VIRTUALIZE_X2APIC_MODE | | ||||
PROCBASED2_APIC_REGISTER_VIRTUALIZATION | | PROCBASED2_APIC_REGISTER_VIRTUALIZATION | | ||||
PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); | PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); | ||||
use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, | |||||
MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, | |||||
&tmp) == 0); | |||||
error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, | error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, | ||||
procbased2_vid_bits, 0, &tmp); | procbased2_vid_bits, 0, &tmp); | ||||
if (error == 0 && use_tpr_shadow) { | if (error == 0 && tpr_shadowing) { | ||||
virtual_interrupt_delivery = 1; | virtual_interrupt_delivery = 1; | ||||
TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", | TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", | ||||
&virtual_interrupt_delivery); | &virtual_interrupt_delivery); | ||||
} | } | ||||
if (virtual_interrupt_delivery) { | if (virtual_interrupt_delivery) { | ||||
procbased_ctls |= PROCBASED_USE_TPR_SHADOW; | procbased_ctls |= PROCBASED_USE_TPR_SHADOW; | ||||
procbased_ctls2 |= procbased2_vid_bits; | procbased_ctls2 |= procbased2_vid_bits; | ||||
procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; | procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; | ||||
/* | /* | ||||
* No need to emulate accesses to %CR8 if virtual | |||||
* interrupt delivery is enabled. | |||||
*/ | |||||
procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING; | |||||
procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING; | |||||
/* | |||||
* Check for Posted Interrupts only if Virtual Interrupt | * Check for Posted Interrupts only if Virtual Interrupt | ||||
* Delivery is enabled. | * Delivery is enabled. | ||||
*/ | */ | ||||
error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, | error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, | ||||
MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, | MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, | ||||
&tmp); | &tmp); | ||||
if (error == 0) { | if (error == 0) { | ||||
pirvec = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) : | pirvec = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) : | ||||
▲ Show 20 Lines • Show All 252 Lines • ▼ Show 20 Lines | if (vcpu_trace_exceptions(vm, i)) | ||||
exc_bitmap = 0xffffffff; | exc_bitmap = 0xffffffff; | ||||
else | else | ||||
exc_bitmap = 1 << IDT_MC; | exc_bitmap = 1 << IDT_MC; | ||||
error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap); | error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap); | ||||
vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; | vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; | ||||
error += vmwrite(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); | error += vmwrite(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); | ||||
if (virtual_interrupt_delivery) { | if (tpr_shadowing) { | ||||
error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); | |||||
error += vmwrite(VMCS_VIRTUAL_APIC, | error += vmwrite(VMCS_VIRTUAL_APIC, | ||||
vtophys(&vmx->apic_page[i])); | vtophys(&vmx->apic_page[i])); | ||||
} | |||||
if (virtual_interrupt_delivery) { | |||||
error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); | |||||
error += vmwrite(VMCS_EOI_EXIT0, 0); | error += vmwrite(VMCS_EOI_EXIT0, 0); | ||||
error += vmwrite(VMCS_EOI_EXIT1, 0); | error += vmwrite(VMCS_EOI_EXIT1, 0); | ||||
error += vmwrite(VMCS_EOI_EXIT2, 0); | error += vmwrite(VMCS_EOI_EXIT2, 0); | ||||
error += vmwrite(VMCS_EOI_EXIT3, 0); | error += vmwrite(VMCS_EOI_EXIT3, 0); | ||||
} | } | ||||
if (posted_interrupts) { | if (posted_interrupts) { | ||||
error += vmwrite(VMCS_PIR_VECTOR, pirvec); | error += vmwrite(VMCS_PIR_VECTOR, pirvec); | ||||
error += vmwrite(VMCS_PIR_DESC, | error += vmwrite(VMCS_PIR_DESC, | ||||
▲ Show 20 Lines • Show All 1,587 Lines • ▼ Show 20 Lines | vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) | ||||
case EXIT_REASON_MONITOR: | case EXIT_REASON_MONITOR: | ||||
SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); | SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); | ||||
vmexit->exitcode = VM_EXITCODE_MONITOR; | vmexit->exitcode = VM_EXITCODE_MONITOR; | ||||
break; | break; | ||||
case EXIT_REASON_MWAIT: | case EXIT_REASON_MWAIT: | ||||
SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); | SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); | ||||
vmexit->exitcode = VM_EXITCODE_MWAIT; | vmexit->exitcode = VM_EXITCODE_MWAIT; | ||||
break; | break; | ||||
case EXIT_REASON_TPR: | |||||
vlapic = vm_lapic(vmx->vm, vcpu); | |||||
vlapic_sync_tpr(vlapic); | |||||
vmexit->inst_length = 0; | |||||
handled = HANDLED; | |||||
break; | |||||
case EXIT_REASON_VMCALL: | case EXIT_REASON_VMCALL: | ||||
case EXIT_REASON_VMCLEAR: | case EXIT_REASON_VMCLEAR: | ||||
case EXIT_REASON_VMLAUNCH: | case EXIT_REASON_VMLAUNCH: | ||||
case EXIT_REASON_VMPTRLD: | case EXIT_REASON_VMPTRLD: | ||||
case EXIT_REASON_VMPTRST: | case EXIT_REASON_VMPTRST: | ||||
case EXIT_REASON_VMREAD: | case EXIT_REASON_VMREAD: | ||||
case EXIT_REASON_VMRESUME: | case EXIT_REASON_VMRESUME: | ||||
case EXIT_REASON_VMWRITE: | case EXIT_REASON_VMWRITE: | ||||
▲ Show 20 Lines • Show All 270 Lines • ▼ Show 20 Lines | do { | ||||
if (vcpu_debugged(vm, vcpu)) { | if (vcpu_debugged(vm, vcpu)) { | ||||
enable_intr(); | enable_intr(); | ||||
vm_exit_debug(vmx->vm, vcpu, rip); | vm_exit_debug(vmx->vm, vcpu, rip); | ||||
break; | break; | ||||
} | } | ||||
/* | /* | ||||
* If TPR Shadowing is enabled, the TPR Threshold | |||||
* must be updated right before entering the guest. | |||||
*/ | |||||
if (tpr_shadowing && !virtual_interrupt_delivery) { | |||||
if ((vmx->cap[vcpu].proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0) { | |||||
vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); | |||||
} | |||||
} | |||||
/* | |||||
* VM exits restore the base address but not the | * VM exits restore the base address but not the | ||||
* limits of GDTR and IDTR. The VMCS only stores the | * limits of GDTR and IDTR. The VMCS only stores the | ||||
* base address, so VM exits set the limits to 0xffff. | * base address, so VM exits set the limits to 0xffff. | ||||
* Save and restore the full GDTR and IDTR to restore | * Save and restore the full GDTR and IDTR to restore | ||||
* the limits. | * the limits. | ||||
* | * | ||||
* The VMCS does not save the LDTR at all, and VM | * The VMCS does not save the LDTR at all, and VM | ||||
* exits clear LDTR as if a NULL selector were loaded. | * exits clear LDTR as if a NULL selector were loaded. | ||||
▲ Show 20 Lines • Show All 671 Lines • ▼ Show 20 Lines | if (level) | ||||
val |= mask; | val |= mask; | ||||
else | else | ||||
val &= ~mask; | val &= ~mask; | ||||
vmcs_write(VMCS_EOI_EXIT(vector), val); | vmcs_write(VMCS_EOI_EXIT(vector), val); | ||||
VMCLEAR(vmcs); | VMCLEAR(vmcs); | ||||
} | } | ||||
static void | static void | ||||
vmx_enable_x2apic_mode(struct vlapic *vlapic) | vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) | ||||
{ | { | ||||
struct vmx *vmx; | struct vmx *vmx; | ||||
struct vmcs *vmcs; | struct vmcs *vmcs; | ||||
uint32_t proc_ctls; | |||||
int vcpuid; | |||||
vcpuid = vlapic->vcpuid; | |||||
vmx = ((struct vlapic_vtx *)vlapic)->vmx; | |||||
vmcs = &vmx->vmcs[vcpuid]; | |||||
proc_ctls = vmx->cap[vcpuid].proc_ctls; | |||||
proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; | |||||
proc_ctls |= PROCBASED_CR8_LOAD_EXITING; | |||||
proc_ctls |= PROCBASED_CR8_STORE_EXITING; | |||||
vmx->cap[vcpuid].proc_ctls = proc_ctls; | |||||
VMPTRLD(vmcs); | |||||
vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); | |||||
VMCLEAR(vmcs); | |||||
} | |||||
static void | |||||
vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) | |||||
{ | |||||
struct vmx *vmx; | |||||
struct vmcs *vmcs; | |||||
uint32_t proc_ctls2; | uint32_t proc_ctls2; | ||||
int vcpuid, error; | int vcpuid, error; | ||||
vcpuid = vlapic->vcpuid; | vcpuid = vlapic->vcpuid; | ||||
vmx = ((struct vlapic_vtx *)vlapic)->vmx; | vmx = ((struct vlapic_vtx *)vlapic)->vmx; | ||||
vmcs = &vmx->vmcs[vcpuid]; | vmcs = &vmx->vmcs[vcpuid]; | ||||
proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; | proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; | ||||
▲ Show 20 Lines • Show All 141 Lines • ▼ Show 20 Lines | vmx_vlapic_init(void *arg, int vcpuid) | ||||
vlapic->vm = vmx->vm; | vlapic->vm = vmx->vm; | ||||
vlapic->vcpuid = vcpuid; | vlapic->vcpuid = vcpuid; | ||||
vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; | vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; | ||||
vlapic_vtx = (struct vlapic_vtx *)vlapic; | vlapic_vtx = (struct vlapic_vtx *)vlapic; | ||||
vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; | vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; | ||||
vlapic_vtx->vmx = vmx; | vlapic_vtx->vmx = vmx; | ||||
if (tpr_shadowing) { | |||||
vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; | |||||
} | |||||
if (virtual_interrupt_delivery) { | if (virtual_interrupt_delivery) { | ||||
vlapic->ops.set_intr_ready = vmx_set_intr_ready; | vlapic->ops.set_intr_ready = vmx_set_intr_ready; | ||||
vlapic->ops.pending_intr = vmx_pending_intr; | vlapic->ops.pending_intr = vmx_pending_intr; | ||||
vlapic->ops.intr_accepted = vmx_intr_accepted; | vlapic->ops.intr_accepted = vmx_intr_accepted; | ||||
vlapic->ops.set_tmr = vmx_set_tmr; | vlapic->ops.set_tmr = vmx_set_tmr; | ||||
vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode; | vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; | ||||
} | } | ||||
if (posted_interrupts) | if (posted_interrupts) | ||||
vlapic->ops.post_intr = vmx_post_intr; | vlapic->ops.post_intr = vmx_post_intr; | ||||
vlapic_init(vlapic); | vlapic_init(vlapic); | ||||
return (vlapic); | return (vlapic); | ||||
Show All 28 Lines |