diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c --- a/sys/amd64/vmm/amd/svm.c +++ b/sys/amd64/vmm/amd/svm.c @@ -565,8 +565,6 @@ uint16_t maxcpus; svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); - if (((uintptr_t)svm_sc & PAGE_MASK) != 0) - panic("malloc of svm_softc not aligned on page boundary"); svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); @@ -619,9 +617,11 @@ maxcpus = vm_get_maxcpus(svm_sc->vm); for (i = 0; i < maxcpus; i++) { vcpu = svm_get_vcpu(svm_sc, i); + vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, + M_SVM, M_WAITOK | M_ZERO); vcpu->nextrip = ~0; vcpu->lastcpu = NOCPU; - vcpu->vmcb_pa = vtophys(&vcpu->vmcb); + vcpu->vmcb_pa = vtophys(vcpu->vmcb); vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); svm_msr_guest_init(svm_sc, i); } @@ -2149,7 +2149,14 @@ svm_cleanup(void *arg) { struct svm_softc *sc = arg; + struct svm_vcpu *vcpu; + uint16_t i, maxcpus; + maxcpus = vm_get_maxcpus(sc->vm); + for (i = 0; i < maxcpus; i++) { + vcpu = svm_get_vcpu(sc, i); + free(vcpu->vmcb, M_SVM); + } contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM); contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM); free(sc, M_SVM); @@ -2400,7 +2407,8 @@ vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); vlapic->vm = svm_sc->vm; vlapic->vcpuid = vcpuid; - vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; + vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC, + M_WAITOK | M_ZERO); vlapic_init(vlapic); @@ -2412,6 +2420,7 @@ { vlapic_cleanup(vlapic); + free(vlapic->apic_page, M_SVM_VLAPIC); free(vlapic, M_SVM_VLAPIC); } diff --git a/sys/amd64/vmm/amd/svm_msr.c b/sys/amd64/vmm/amd/svm_msr.c --- a/sys/amd64/vmm/amd/svm_msr.c +++ b/sys/amd64/vmm/amd/svm_msr.c @@ -124,7 +124,7 @@ case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: case MSR_MTRR64kBase: case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: - if (vm_rdmtrr(&sc->mtrr[vcpu], num, result) != 0) { + if (vm_rdmtrr(&sc->vcpu[vcpu].mtrr, num, result) != 0) { vm_inject_gp(sc->vm, vcpu); } break; @@ -156,7 +156,7 @@ case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: case MSR_MTRR64kBase: case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: - if (vm_wrmtrr(&sc->mtrr[vcpu], num, val) != 0) { + if (vm_wrmtrr(&sc->vcpu[vcpu].mtrr, num, val) != 0) { vm_inject_gp(sc->vm, vcpu); } break; diff --git a/sys/amd64/vmm/amd/svm_softc.h b/sys/amd64/vmm/amd/svm_softc.h --- a/sys/amd64/vmm/amd/svm_softc.h +++ b/sys/amd64/vmm/amd/svm_softc.h @@ -41,12 +41,8 @@ uint32_t num; /* range is [1, nasid - 1] */ }; -/* - * XXX separate out 'struct vmcb' from 'svm_vcpu' to avoid wasting space - * due to VMCB alignment requirements. - */ struct svm_vcpu { - struct vmcb vmcb; /* hardware saved vcpu context */ + struct vmcb *vmcb; /* hardware saved vcpu context */ struct svm_regctx swctx; /* software saved vcpu context */ uint64_t vmcb_pa; /* VMCB physical address */ uint64_t nextrip; /* next instruction to be executed by guest */ @@ -54,23 +50,20 @@ uint32_t dirty; /* state cache bits that must be cleared */ long eptgen; /* pmap->pm_eptgen when the vcpu last ran */ struct asid asid; -} __aligned(PAGE_SIZE); + struct vm_mtrr mtrr; +}; /* * SVM softc, one per virtual machine. */ struct svm_softc { - uint8_t apic_page[VM_MAXCPU][PAGE_SIZE]; struct svm_vcpu vcpu[VM_MAXCPU]; vm_offset_t nptp; /* nested page table */ uint8_t *iopm_bitmap; /* shared by all vcpus */ uint8_t *msr_bitmap; /* shared by all vcpus */ struct vm *vm; - struct vm_mtrr mtrr[VM_MAXCPU]; }; -CTASSERT((offsetof(struct svm_softc, nptp) & PAGE_MASK) == 0); - static __inline struct svm_vcpu * svm_get_vcpu(struct svm_softc *sc, int vcpu) { @@ -82,21 +75,21 @@ svm_get_vmcb(struct svm_softc *sc, int vcpu) { - return (&(sc->vcpu[vcpu].vmcb)); + return ((sc->vcpu[vcpu].vmcb)); } static __inline struct vmcb_state * svm_get_vmcb_state(struct svm_softc *sc, int vcpu) { - return (&(sc->vcpu[vcpu].vmcb.state)); + return (&(sc->vcpu[vcpu].vmcb->state)); } static __inline struct vmcb_ctrl * svm_get_vmcb_ctrl(struct svm_softc *sc, int vcpu) { - return (&(sc->vcpu[vcpu].vmcb.ctrl)); + return (&(sc->vcpu[vcpu].vmcb->ctrl)); } static __inline struct svm_regctx *