Changeset View
Changeset View
Standalone View
Standalone View
sys/amd64/vmm/intel/vmx.c
Show First 20 Lines • Show All 521 Lines • ▼ Show 20 Lines | vpid_free(int vpid) | ||||
* VPIDs [0,VM_MAXCPU] are special and are not allocated from | * VPIDs [0,VM_MAXCPU] are special and are not allocated from | ||||
* the unit number allocator. | * the unit number allocator. | ||||
*/ | */ | ||||
if (vpid > VM_MAXCPU) | if (vpid > VM_MAXCPU) | ||||
free_unr(vpid_unr, vpid); | free_unr(vpid_unr, vpid); | ||||
} | } | ||||
static void | static uint16_t | ||||
vpid_alloc(uint16_t *vpid, int num) | vpid_alloc(int vcpuid) | ||||
{ | { | ||||
int i, x; | int x; | ||||
if (num <= 0 || num > VM_MAXCPU) | |||||
panic("invalid number of vpids requested: %d", num); | |||||
/* | /* | ||||
* If the "enable vpid" execution control is not enabled then the | * If the "enable vpid" execution control is not enabled then the | ||||
* VPID is required to be 0 for all vcpus. | * VPID is required to be 0 for all vcpus. | ||||
*/ | */ | ||||
if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { | if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) | ||||
for (i = 0; i < num; i++) | return (0); | ||||
vpid[i] = 0; | |||||
return; | |||||
} | |||||
/* | /* | ||||
* Allocate a unique VPID for each vcpu from the unit number allocator. | * Try to allocate a unique VPID for each from the unit number | ||||
* allocator. | |||||
*/ | */ | ||||
for (i = 0; i < num; i++) { | |||||
x = alloc_unr(vpid_unr); | x = alloc_unr(vpid_unr); | ||||
if (x == -1) | |||||
break; | |||||
else | |||||
vpid[i] = x; | |||||
} | |||||
if (i < num) { | if (x == -1) { | ||||
atomic_add_int(&vpid_alloc_failed, 1); | atomic_add_int(&vpid_alloc_failed, 1); | ||||
/* | /* | ||||
* If the unit number allocator does not have enough unique | * If the unit number allocator does not have enough unique | ||||
* VPIDs then we need to allocate from the [1,VM_MAXCPU] range. | * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. | ||||
* | * | ||||
* These VPIDs are not be unique across VMs but this does not | * These VPIDs are not be unique across VMs but this does not | ||||
* affect correctness because the combined mappings are also | * affect correctness because the combined mappings are also | ||||
* tagged with the EP4TA which is unique for each VM. | * tagged with the EP4TA which is unique for each VM. | ||||
* | * | ||||
* It is still sub-optimal because the invvpid will invalidate | * It is still sub-optimal because the invvpid will invalidate | ||||
* combined mappings for a particular VPID across all EP4TAs. | * combined mappings for a particular VPID across all EP4TAs. | ||||
*/ | */ | ||||
while (i-- > 0) | return (vcpuid + 1); | ||||
vpid_free(vpid[i]); | } | ||||
for (i = 0; i < num; i++) | return (x); | ||||
vpid[i] = i + 1; | |||||
} | } | ||||
} | |||||
static void | static void | ||||
vpid_init(void) | vpid_init(void) | ||||
{ | { | ||||
/* | /* | ||||
* VPID 0 is required when the "enable VPID" execution control is | * VPID 0 is required when the "enable VPID" execution control is | ||||
* disabled. | * disabled. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 442 Lines • ▼ Show 20 Lines | |||||
#define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) | #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) | ||||
#define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) | #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) | ||||
static void * | static void * | ||||
vmx_init(struct vm *vm, pmap_t pmap) | vmx_init(struct vm *vm, pmap_t pmap) | ||||
{ | { | ||||
int error; | int error; | ||||
struct vmx *vmx; | struct vmx *vmx; | ||||
uint16_t maxcpus = vm_get_maxcpus(vm); | |||||
vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); | vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); | ||||
vmx->vm = vm; | vmx->vm = vm; | ||||
vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pmltop)); | vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pmltop)); | ||||
/* | /* | ||||
* Clean up EPTP-tagged guest physical and combined mappings | * Clean up EPTP-tagged guest physical and combined mappings | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | if (guest_msr_rw(vmx, MSR_GSBASE) || | ||||
guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || | guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || | ||||
guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || | guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || | ||||
guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || | guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || | ||||
guest_msr_rw(vmx, MSR_EFER) || | guest_msr_rw(vmx, MSR_EFER) || | ||||
guest_msr_ro(vmx, MSR_TSC) || | guest_msr_ro(vmx, MSR_TSC) || | ||||
((cap_rdpid || cap_rdtscp) && guest_msr_ro(vmx, MSR_TSC_AUX))) | ((cap_rdpid || cap_rdtscp) && guest_msr_ro(vmx, MSR_TSC_AUX))) | ||||
panic("vmx_init: error setting guest msr access"); | panic("vmx_init: error setting guest msr access"); | ||||
vpid_alloc(vmx->vpids, maxcpus); | |||||
if (virtual_interrupt_delivery) { | if (virtual_interrupt_delivery) { | ||||
error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, | error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, | ||||
APIC_ACCESS_ADDRESS); | APIC_ACCESS_ADDRESS); | ||||
/* XXX this should really return an error to the caller */ | /* XXX this should really return an error to the caller */ | ||||
KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); | KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); | ||||
} | } | ||||
vmx->pmap = pmap; | vmx->pmap = pmap; | ||||
return (vmx); | return (vmx); | ||||
} | } | ||||
static void * | static void * | ||||
vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) | vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) | ||||
{ | { | ||||
struct vmx *vmx = vmi; | struct vmx *vmx = vmi; | ||||
struct vmcs *vmcs; | struct vmcs *vmcs; | ||||
struct vmx_vcpu *vcpu; | struct vmx_vcpu *vcpu; | ||||
uint32_t exc_bitmap; | uint32_t exc_bitmap; | ||||
uint16_t vpid; | |||||
int error; | int error; | ||||
vpid = vpid_alloc(vcpuid); | |||||
vcpu = malloc(sizeof(*vcpu), M_VMX, M_WAITOK | M_ZERO); | vcpu = malloc(sizeof(*vcpu), M_VMX, M_WAITOK | M_ZERO); | ||||
vcpu->vmx = vmx; | vcpu->vmx = vmx; | ||||
vcpu->vcpu = vcpu1; | vcpu->vcpu = vcpu1; | ||||
vcpu->vcpuid = vcpuid; | vcpu->vcpuid = vcpuid; | ||||
vcpu->vmcs = malloc_aligned(sizeof(*vmcs), PAGE_SIZE, M_VMX, | vcpu->vmcs = malloc_aligned(sizeof(*vmcs), PAGE_SIZE, M_VMX, | ||||
M_WAITOK | M_ZERO); | M_WAITOK | M_ZERO); | ||||
vcpu->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, | vcpu->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, | ||||
M_WAITOK | M_ZERO); | M_WAITOK | M_ZERO); | ||||
Show All 22 Lines | vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) | ||||
if (vcpu_trap_wbinvd(vcpu->vcpu)) { | if (vcpu_trap_wbinvd(vcpu->vcpu)) { | ||||
KASSERT(cap_wbinvd_exit, ("WBINVD trap not available")); | KASSERT(cap_wbinvd_exit, ("WBINVD trap not available")); | ||||
procbased_ctls2 |= PROCBASED2_WBINVD_EXITING; | procbased_ctls2 |= PROCBASED2_WBINVD_EXITING; | ||||
} | } | ||||
error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); | error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); | ||||
error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); | error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); | ||||
error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); | error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); | ||||
error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); | error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); | ||||
error += vmwrite(VMCS_VPID, vmx->vpids[vcpuid]); | error += vmwrite(VMCS_VPID, vpid); | ||||
if (guest_l1d_flush && !guest_l1d_flush_sw) { | if (guest_l1d_flush && !guest_l1d_flush_sw) { | ||||
vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract( | vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract( | ||||
(vm_offset_t)&msr_load_list[0])); | (vm_offset_t)&msr_load_list[0])); | ||||
vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, | vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, | ||||
nitems(msr_load_list)); | nitems(msr_load_list)); | ||||
vmcs_write(VMCS_EXIT_MSR_STORE, 0); | vmcs_write(VMCS_EXIT_MSR_STORE, 0); | ||||
vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); | vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); | ||||
Show All 31 Lines | vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) | ||||
vcpu->cap.set |= cap_rdpid != 0 ? 1 << VM_CAP_RDPID : 0; | vcpu->cap.set |= cap_rdpid != 0 ? 1 << VM_CAP_RDPID : 0; | ||||
vcpu->cap.set |= cap_rdtscp != 0 ? 1 << VM_CAP_RDTSCP : 0; | vcpu->cap.set |= cap_rdtscp != 0 ? 1 << VM_CAP_RDTSCP : 0; | ||||
vcpu->cap.proc_ctls = procbased_ctls; | vcpu->cap.proc_ctls = procbased_ctls; | ||||
vcpu->cap.proc_ctls2 = procbased_ctls2; | vcpu->cap.proc_ctls2 = procbased_ctls2; | ||||
vcpu->cap.exc_bitmap = exc_bitmap; | vcpu->cap.exc_bitmap = exc_bitmap; | ||||
vcpu->state.nextrip = ~0; | vcpu->state.nextrip = ~0; | ||||
vcpu->state.lastcpu = NOCPU; | vcpu->state.lastcpu = NOCPU; | ||||
vcpu->state.vpid = vmx->vpids[vcpuid]; | vcpu->state.vpid = vpid; | ||||
/* | /* | ||||
* Set up the CR0/4 shadows, and init the read shadow | * Set up the CR0/4 shadows, and init the read shadow | ||||
* to the power-on register value from the Intel Sys Arch. | * to the power-on register value from the Intel Sys Arch. | ||||
* CR0 - 0x60000010 | * CR0 - 0x60000010 | ||||
* CR4 - 0 | * CR4 - 0 | ||||
*/ | */ | ||||
error = vmx_setup_cr0_shadow(vmcs, 0x60000010); | error = vmx_setup_cr0_shadow(vmcs, 0x60000010); | ||||
▲ Show 20 Lines • Show All 3,061 Lines • Show Last 20 Lines |