diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c --- a/sys/amd64/vmm/intel/vmx.c +++ b/sys/amd64/vmm/intel/vmx.c @@ -192,15 +192,18 @@ 0, "Guests are allowed to use INVPCID"); static int tpr_shadowing; -SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, tpr_shadowing, CTLFLAG_RD, +SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, tpr_shadowing, + CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &tpr_shadowing, 0, "TPR shadowing support"); static int virtual_interrupt_delivery; -SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, +SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, + CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); static int posted_interrupts; -SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD, +SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, + CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &posted_interrupts, 0, "APICv posted interrupt support"); static int pirvec = -1; @@ -213,10 +216,10 @@ &vpid_alloc_failed, 0, NULL); int guest_l1d_flush; -SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RD, +SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &guest_l1d_flush, 0, NULL); int guest_l1d_flush_sw; -SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RD, +SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &guest_l1d_flush_sw, 0, NULL); static struct msr_entry msr_load_list[1] __aligned(16); @@ -832,8 +835,12 @@ &tmp); if (error == 0) { tpr_shadowing = 1; +#ifndef BURN_BRIDGES TUNABLE_INT_FETCH("hw.vmm.vmx.use_tpr_shadowing", &tpr_shadowing); +#endif + TUNABLE_INT_FETCH("hw.vmm.vmx.cap.tpr_shadowing", + &tpr_shadowing); } if (tpr_shadowing) { @@ -854,8 +861,12 @@ procbased2_vid_bits, 0, &tmp); if (error == 0 && tpr_shadowing) { virtual_interrupt_delivery = 1; +#ifndef BURN_BRIDGES TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", &virtual_interrupt_delivery); +#endif + TUNABLE_INT_FETCH("hw.vmm.vmx.cap.virtual_interrupt_delivery", + &virtual_interrupt_delivery); } if (virtual_interrupt_delivery) { @@ -881,8 +892,12 @@ } } else { posted_interrupts = 1; +#ifndef BURN_BRIDGES TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir", &posted_interrupts); +#endif + TUNABLE_INT_FETCH("hw.vmm.vmx.cap.posted_interrupts", + &posted_interrupts); } } } @@ -899,7 +914,10 @@ guest_l1d_flush = (cpu_ia32_arch_caps & IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; +#ifndef BURN_BRIDGES TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); +#endif + TUNABLE_INT_FETCH("hw.vmm.vmx.l1d_flush", &guest_l1d_flush); /* * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when @@ -911,8 +929,12 @@ if (guest_l1d_flush) { if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { guest_l1d_flush_sw = 1; +#ifndef BURN_BRIDGES TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", &guest_l1d_flush_sw); +#endif + TUNABLE_INT_FETCH("hw.vmm.vmx.l1d_flush_sw", + &guest_l1d_flush_sw); } if (guest_l1d_flush_sw) { if (nmi_flush_l1d_sw <= 1)