Index: sys/amd64/vmm/intel/vmx.c =================================================================== --- sys/amd64/vmm/intel/vmx.c +++ sys/amd64/vmm/intel/vmx.c @@ -172,6 +172,10 @@ SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid, 0, "Guests are allowed to use INVPCID"); +static int tpr_shadowing; +SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, tpr_shadowing, CTLFLAG_RD, + &tpr_shadowing, 0, "TPR shadowing support"); + static int virtual_interrupt_delivery; SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); @@ -627,7 +631,7 @@ static int vmx_init(int ipinum) { - int error, use_tpr_shadow; + int error; uint64_t basic, fixed0, fixed1, feature_control; uint32_t tmp, procbased2_vid_bits; @@ -750,6 +754,24 @@ MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, &tmp) == 0); + /* + * Check support for TPR shadow. + */ + error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, + MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, + &tmp); + if (error == 0) { + tpr_shadowing = 1; + TUNABLE_INT_FETCH("hw.vmm.vmx.use_tpr_shadowing", + &tpr_shadowing); + } + + if (tpr_shadowing) { + procbased_ctls |= PROCBASED_USE_TPR_SHADOW; + procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING; + procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING; + } + /* * Check support for virtual interrupt delivery. */ @@ -758,13 +780,9 @@ PROCBASED2_APIC_REGISTER_VIRTUALIZATION | PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); - use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, - MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, - &tmp) == 0); - error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, procbased2_vid_bits, 0, &tmp); - if (error == 0 && use_tpr_shadow) { + if (error == 0 && tpr_shadowing) { virtual_interrupt_delivery = 1; TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", &virtual_interrupt_delivery); @@ -775,13 +793,6 @@ procbased_ctls2 |= procbased2_vid_bits; procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; - /* - * No need to emulate accesses to %CR8 if virtual - * interrupt delivery is enabled. - */ - procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING; - procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING; - /* * Check for Posted Interrupts only if Virtual Interrupt * Delivery is enabled. @@ -1051,10 +1062,13 @@ vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; error += vmwrite(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); - if (virtual_interrupt_delivery) { - error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); + if (tpr_shadowing) { error += vmwrite(VMCS_VIRTUAL_APIC, vtophys(&vmx->apic_page[i])); + } + + if (virtual_interrupt_delivery) { + error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); error += vmwrite(VMCS_EOI_EXIT0, 0); error += vmwrite(VMCS_EOI_EXIT1, 0); error += vmwrite(VMCS_EOI_EXIT2, 0); @@ -2313,6 +2327,14 @@ } } + /* + * If TPR Shadowing is enabled, update the local APICs PPR. + */ + if (tpr_shadowing && !virtual_interrupt_delivery) { + vlapic = vm_lapic(vmx->vm, vcpu); + vlapic_update_ppr(vlapic); + } + switch (reason) { case EXIT_REASON_TASK_SWITCH: ts = &vmexit->u.task_switch; @@ -2658,6 +2680,10 @@ SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); vmexit->exitcode = VM_EXITCODE_MWAIT; break; + case EXIT_REASON_TPR: + vmexit->inst_length = 0; + handled = HANDLED; + break; case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: case EXIT_REASON_VMLAUNCH: @@ -2943,6 +2969,14 @@ break; } + /* + * If TPR Shadowing is enabled, the TPR Threshold + * must be updated right before entering the guest. + */ + if (tpr_shadowing && !virtual_interrupt_delivery) { + vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); + } + /* * VM exits restore the base address but not the * limits of GDTR and IDTR. The VMCS only stores the Index: sys/amd64/vmm/io/vlapic.h =================================================================== --- sys/amd64/vmm/io/vlapic.h +++ sys/amd64/vmm/io/vlapic.h @@ -74,6 +74,8 @@ void vlapic_fire_cmci(struct vlapic *vlapic); int vlapic_trigger_lvt(struct vlapic *vlapic, int vector); +void vlapic_update_ppr(struct vlapic *vlapic); + uint64_t vlapic_get_apicbase(struct vlapic *vlapic); int vlapic_set_apicbase(struct vlapic *vlapic, uint64_t val); void vlapic_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state s); Index: sys/amd64/vmm/io/vlapic.c =================================================================== --- sys/amd64/vmm/io/vlapic.c +++ sys/amd64/vmm/io/vlapic.c @@ -490,7 +490,7 @@ * Algorithm adopted from section "Interrupt, Task and Processor Priority" * in Intel Architecture Manual Vol 3a. */ -static void +void vlapic_update_ppr(struct vlapic *vlapic) { int isrvec, tpr, ppr;