Index: sys/amd64/include/vmm.h =================================================================== --- sys/amd64/include/vmm.h +++ sys/amd64/include/vmm.h @@ -188,6 +188,7 @@ const char *vm_name(struct vm *vm); void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus); +uint16_t vm_get_maxcpus(struct vm *vm); int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus); Index: sys/amd64/vmm/amd/svm.c =================================================================== --- sys/amd64/vmm/amd/svm.c +++ sys/amd64/vmm/amd/svm.c @@ -577,7 +577,7 @@ iopm_pa = vtophys(svm_sc->iopm_bitmap); msrpm_pa = vtophys(svm_sc->msr_bitmap); pml4_pa = svm_sc->nptp; - for (i = 0; i < VM_MAXCPU; i++) { + for (i = 0; i < vm_get_maxcpus(svm_sc->vm); i++) { vcpu = svm_get_vcpu(svm_sc, i); vcpu->nextrip = ~0; vcpu->lastcpu = NOCPU; Index: sys/amd64/vmm/intel/vmx.c =================================================================== --- sys/amd64/vmm/intel/vmx.c +++ sys/amd64/vmm/intel/vmx.c @@ -1003,7 +1003,7 @@ KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); } - for (i = 0; i < VM_MAXCPU; i++) { + for (i = 0; i < vm_get_maxcpus(vm); i++) { vmcs = &vmx->vmcs[i]; vmcs->identifier = vmx_revision(); error = vmclear(vmcs); @@ -3005,7 +3005,7 @@ if (apic_access_virtualization(vmx, 0)) vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); - for (i = 0; i < VM_MAXCPU; i++) + for (i = 0; i < vm_get_maxcpus(vmx->vm); i++) vpid_free(vmx->state[i].vpid); free(vmx, M_VMX); Index: sys/amd64/vmm/io/vlapic.c =================================================================== --- sys/amd64/vmm/io/vlapic.c +++ sys/amd64/vmm/io/vlapic.c @@ -838,7 +838,7 @@ */ CPU_ZERO(dmask); vcpuid = vm_apicid2vcpuid(vm, dest); - if (vcpuid < VM_MAXCPU) + if (vcpuid < vm_get_maxcpus(vm)) CPU_SET(vcpuid, dmask); } else { /* @@ -1030,7 +1030,7 @@ if ((icrval & APIC_LEVEL_MASK) == APIC_LEVEL_DEASSERT) return (0); - if (vlapic->vcpuid == 0 && dest != 0 && dest < VM_MAXCPU) { + if (vlapic->vcpuid == 0 && dest != 0 && dest < vm_get_maxcpus(vlapic->vm)) { vlapic2 = vm_lapic(vlapic->vm, dest); /* move from INIT to waiting-for-SIPI state */ @@ -1043,7 +1043,7 @@ } if (mode == APIC_DELMODE_STARTUP) { - if (vlapic->vcpuid == 0 && dest != 0 && dest < VM_MAXCPU) { + if (vlapic->vcpuid == 0 && dest != 0 && dest < vm_get_maxcpus(vlapic->vm)) { vlapic2 = vm_lapic(vlapic->vm, dest); /* @@ -1447,7 +1447,7 @@ vlapic_init(struct vlapic *vlapic) { KASSERT(vlapic->vm != NULL, ("vlapic_init: vm is not initialized")); - KASSERT(vlapic->vcpuid >= 0 && vlapic->vcpuid < VM_MAXCPU, + KASSERT(vlapic->vcpuid >= 0 && vlapic->vcpuid < vm_get_maxcpus(vlapic->vm), ("vlapic_init: vcpuid is not initialized")); KASSERT(vlapic->apic_page != NULL, ("vlapic_init: apic_page is not " "initialized")); Index: sys/amd64/vmm/vmm.c =================================================================== --- sys/amd64/vmm/vmm.c +++ sys/amd64/vmm/vmm.c @@ -276,7 +276,7 @@ { struct vcpu *vcpu; - KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU, + KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, ("vcpu_init: invalid vcpu %d", vcpu_id)); vcpu = &vm->vcpu[vcpu_id]; @@ -315,7 +315,7 @@ { struct vcpu *vcpu; - if (cpuid < 0 || cpuid >= VM_MAXCPU) + if (cpuid < 0 || cpuid >= vm->maxcpus) panic("vm_exitinfo: invalid cpuid %d", cpuid); vcpu = &vm->vcpu[cpuid]; @@ -428,7 +428,7 @@ vm->suspend = 0; CPU_ZERO(&vm->suspended_cpus); - for (i = 0; i < VM_MAXCPU; i++) + for (i = 0; i < vm->maxcpus; i++) vcpu_init(vm, i, create); } @@ -467,6 +467,7 @@ vm->cores = cores_per_package; /* XXX backwards compatibility */ vm->threads = threads_per_core; /* XXX backwards compatibility */ vm->maxcpus = 0; /* XXX not implemented */ + vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */ vm_init(vm, true); @@ -484,19 +485,26 @@ *maxcpus = vm->maxcpus; } +uint16_t +vm_get_maxcpus(struct vm *vm) +{ + return (vm->maxcpus); +} + int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus) { if (maxcpus != 0) return (EINVAL); /* XXX remove when supported */ - if ((sockets * cores * threads) > VM_MAXCPU) + if ((sockets * cores * threads) > vm->maxcpus) return (EINVAL); /* XXX need to check sockets * cores * threads == vCPU, how? */ vm->sockets = sockets; vm->cores = cores; vm->threads = threads; vm->maxcpus = maxcpus; + vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */ return(0); } @@ -521,7 +529,7 @@ vatpic_cleanup(vm->vatpic); vioapic_cleanup(vm->vioapic); - for (i = 0; i < VM_MAXCPU; i++) + for (i = 0; i < vm->maxcpus; i++) vcpu_cleanup(vm, i, destroy); VMCLEANUP(vm->cookie); @@ -956,9 +964,9 @@ * guaranteed if at least one vcpu is in the VCPU_FROZEN state. */ int state; - KASSERT(vcpuid >= -1 && vcpuid < VM_MAXCPU, ("%s: invalid vcpuid %d", + KASSERT(vcpuid >= -1 && vcpuid < vm->maxcpus, ("%s: invalid vcpuid %d", __func__, vcpuid)); - for (i = 0; i < VM_MAXCPU; i++) { + for (i = 0; i < vm->maxcpus; i++) { if (vcpuid != -1 && vcpuid != i) continue; state = vcpu_get_state(vm, i, NULL); @@ -1004,7 +1012,7 @@ vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval) { - if (vcpu < 0 || vcpu >= VM_MAXCPU) + if (vcpu < 0 || vcpu >= vm->maxcpus) return (EINVAL); if (reg >= VM_REG_LAST) @@ -1019,7 +1027,7 @@ struct vcpu *vcpu; int error; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) return (EINVAL); if (reg >= VM_REG_LAST) @@ -1073,7 +1081,7 @@ struct seg_desc *desc) { - if (vcpu < 0 || vcpu >= VM_MAXCPU) + if (vcpu < 0 || vcpu >= vm->maxcpus) return (EINVAL); if (!is_segment_register(reg) && !is_descriptor_table(reg)) @@ -1086,7 +1094,7 @@ vm_set_seg_desc(struct vm *vm, int vcpu, int reg, struct seg_desc *desc) { - if (vcpu < 0 || vcpu >= VM_MAXCPU) + if (vcpu < 0 || vcpu >= vm->maxcpus) return (EINVAL); if (!is_segment_register(reg) && !is_descriptor_table(reg)) @@ -1258,7 +1266,7 @@ vm_handle_rendezvous(struct vm *vm, int vcpuid) { - KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), + KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus), ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); mtx_lock(&vm->rendezvous_mtx); @@ -1537,7 +1545,7 @@ /* * Wakeup the other sleeping vcpus and return to userspace. */ - for (i = 0; i < VM_MAXCPU; i++) { + for (i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->suspended_cpus)) { vcpu_notify_event(vm, i, false); } @@ -1579,7 +1587,7 @@ /* * Notify all active vcpus that they are now suspended. */ - for (i = 0; i < VM_MAXCPU; i++) { + for (i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->active_cpus)) vcpu_notify_event(vm, i, false); } @@ -1665,7 +1673,7 @@ vcpuid = vmrun->cpuid; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) return (EINVAL); if (!CPU_ISSET(vcpuid, &vm->active_cpus)) @@ -1766,7 +1774,7 @@ int error; vm = arg; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) return (EINVAL); vcpu = &vm->vcpu[vcpuid]; @@ -1805,7 +1813,7 @@ struct vcpu *vcpu; int type, vector; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) return (EINVAL); vcpu = &vm->vcpu[vcpuid]; @@ -1946,7 +1954,7 @@ uint64_t info1, info2; int valid; - KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid)); + KASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus, ("invalid vcpu %d", vcpuid)); vcpu = &vm->vcpu[vcpuid]; @@ -1986,7 +1994,7 @@ { struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) return (EINVAL); vcpu = &vm->vcpu[vcpuid]; @@ -2003,7 +2011,7 @@ uint64_t regval; int error; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) return (EINVAL); if (vector < 0 || vector >= 32) @@ -2094,7 +2102,7 @@ { struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) return (EINVAL); vcpu = &vm->vcpu[vcpuid]; @@ -2109,7 +2117,7 @@ { struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); vcpu = &vm->vcpu[vcpuid]; @@ -2122,7 +2130,7 @@ { struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); vcpu = &vm->vcpu[vcpuid]; @@ -2141,7 +2149,7 @@ { struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) return (EINVAL); vcpu = &vm->vcpu[vcpuid]; @@ -2156,7 +2164,7 @@ { struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) panic("vm_extint_pending: invalid vcpuid %d", vcpuid); vcpu = &vm->vcpu[vcpuid]; @@ -2169,7 +2177,7 @@ { struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) panic("vm_extint_pending: invalid vcpuid %d", vcpuid); vcpu = &vm->vcpu[vcpuid]; @@ -2184,7 +2192,7 @@ int vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) { - if (vcpu < 0 || vcpu >= VM_MAXCPU) + if (vcpu < 0 || vcpu >= vm->maxcpus) return (EINVAL); if (type < 0 || type >= VM_CAP_MAX) @@ -2196,7 +2204,7 @@ int vm_set_capability(struct vm *vm, int vcpu, int type, int val) { - if (vcpu < 0 || vcpu >= VM_MAXCPU) + if (vcpu < 0 || vcpu >= vm->maxcpus) return (EINVAL); if (type < 0 || type >= VM_CAP_MAX) @@ -2281,7 +2289,7 @@ int error; struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) panic("vm_set_run_state: invalid vcpuid %d", vcpuid); vcpu = &vm->vcpu[vcpuid]; @@ -2299,7 +2307,7 @@ struct vcpu *vcpu; enum vcpu_state state; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) panic("vm_get_run_state: invalid vcpuid %d", vcpuid); vcpu = &vm->vcpu[vcpuid]; @@ -2317,7 +2325,7 @@ vm_activate_cpu(struct vm *vm, int vcpuid) { - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) return (EINVAL); if (CPU_ISSET(vcpuid, &vm->active_cpus)) @@ -2333,12 +2341,12 @@ { int i; - if (vcpuid < -1 || vcpuid >= VM_MAXCPU) + if (vcpuid < -1 || vcpuid >= vm->maxcpus) return (EINVAL); if (vcpuid == -1) { vm->debug_cpus = vm->active_cpus; - for (i = 0; i < VM_MAXCPU; i++) { + for (i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->active_cpus)) vcpu_notify_event(vm, i, false); } @@ -2356,7 +2364,7 @@ vm_resume_cpu(struct vm *vm, int vcpuid) { - if (vcpuid < -1 || vcpuid >= VM_MAXCPU) + if (vcpuid < -1 || vcpuid >= vm->maxcpus) return (EINVAL); if (vcpuid == -1) { @@ -2408,7 +2416,7 @@ int vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) { - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) return (EINVAL); *state = vm->vcpu[vcpuid].x2apic_state; @@ -2419,7 +2427,7 @@ int vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) { - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= vm->maxcpus) return (EINVAL); if (state >= X2APIC_STATE_LAST) @@ -2506,7 +2514,7 @@ * Enforce that this function is called without any locks */ WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); - KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), + KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus), ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid)); restart: @@ -2536,7 +2544,7 @@ * Wake up any sleeping vcpus and trigger a VM-exit in any running * vcpus so they handle the rendezvous as soon as possible. */ - for (i = 0; i < VM_MAXCPU; i++) { + for (i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &dest)) vcpu_notify_event(vm, i, false); } Index: sys/amd64/vmm/vmm_dev.c =================================================================== --- sys/amd64/vmm/vmm_dev.c +++ sys/amd64/vmm/vmm_dev.c @@ -111,7 +111,7 @@ { int error; - if (vcpu < 0 || vcpu >= VM_MAXCPU) + if (vcpu < 0 || vcpu >= vm_get_maxcpus(sc->vm)) return (EINVAL); error = vcpu_set_state(sc->vm, vcpu, VCPU_FROZEN, true); @@ -137,7 +137,7 @@ { int error, vcpu; - for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) { + for (vcpu = 0; vcpu < vm_get_maxcpus(sc->vm); vcpu++) { error = vcpu_lock_one(sc, vcpu); if (error) break; @@ -156,7 +156,7 @@ { int vcpu; - for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) + for (vcpu = 0; vcpu < vm_get_maxcpus(sc->vm); vcpu++) vcpu_unlock_one(sc, vcpu); } @@ -203,7 +203,7 @@ /* * Get a read lock on the guest memory map by freezing any vcpu. */ - error = vcpu_lock_one(sc, VM_MAXCPU - 1); + error = vcpu_lock_one(sc, vm_get_maxcpus(sc->vm) - 1); if (error) return (error); @@ -222,7 +222,7 @@ * Since this device does not support lseek(2), dd(1) will * read(2) blocks of data to simulate the lseek(2). */ - hpa = vm_gpa_hold(sc->vm, VM_MAXCPU - 1, gpa, c, prot, &cookie); + hpa = vm_gpa_hold(sc->vm, vm_get_maxcpus(sc->vm) - 1, gpa, c, prot, &cookie); if (hpa == NULL) { if (uio->uio_rw == UIO_READ && gpa < maxaddr) error = uiomove(__DECONST(void *, zero_region), @@ -234,7 +234,7 @@ vm_gpa_release(cookie); } } - vcpu_unlock_one(sc, VM_MAXCPU - 1); + vcpu_unlock_one(sc, vm_get_maxcpus(sc->vm) - 1); return (error); } @@ -437,7 +437,7 @@ * Lock a vcpu to make sure that the memory map cannot be * modified while it is being inspected. */ - vcpu = VM_MAXCPU - 1; + vcpu = vm_get_maxcpus(sc->vm) - 1; error = vcpu_lock_one(sc, vcpu); if (error) goto done; @@ -817,7 +817,7 @@ /* * Get a read lock on the guest memory map by freezing any vcpu. */ - error = vcpu_lock_one(sc, VM_MAXCPU - 1); + error = vcpu_lock_one(sc, vm_get_maxcpus(sc->vm) - 1); if (error) return (error); @@ -846,7 +846,7 @@ error = EINVAL; } } - vcpu_unlock_one(sc, VM_MAXCPU - 1); + vcpu_unlock_one(sc, vm_get_maxcpus(sc->vm) - 1); return (error); } @@ -1058,7 +1058,7 @@ if ((nprot & PROT_EXEC) || first < 0 || first >= last) return (EINVAL); - error = vcpu_lock_one(dsc->sc, VM_MAXCPU - 1); + error = vcpu_lock_one(dsc->sc, vm_get_maxcpus(dsc->sc->vm) - 1); if (error) return (error); @@ -1066,7 +1066,7 @@ KASSERT(error == 0 && !sysmem && *objp != NULL, ("%s: invalid devmem segment %d", __func__, dsc->segid)); - vcpu_unlock_one(dsc->sc, VM_MAXCPU - 1); + vcpu_unlock_one(dsc->sc, vm_get_maxcpus(dsc->sc->vm) - 1); if (seglen >= last) { vm_object_reference(*objp); Index: sys/amd64/vmm/vmm_lapic.c =================================================================== --- sys/amd64/vmm/vmm_lapic.c +++ sys/amd64/vmm/vmm_lapic.c @@ -56,7 +56,7 @@ { struct vlapic *vlapic; - if (cpu < 0 || cpu >= VM_MAXCPU) + if (cpu < 0 || cpu >= vm_get_maxcpus(vm)) return (EINVAL); /* @@ -79,7 +79,7 @@ cpuset_t dmask; int error; - if (cpu < -1 || cpu >= VM_MAXCPU) + if (cpu < -1 || cpu >= vm_get_maxcpus(vm)) return (EINVAL); if (cpu == -1) Index: sys/amd64/vmm/vmm_stat.c =================================================================== --- sys/amd64/vmm/vmm_stat.c +++ sys/amd64/vmm/vmm_stat.c @@ -88,7 +88,7 @@ uint64_t *stats; int i; - if (vcpu < 0 || vcpu >= VM_MAXCPU) + if (vcpu < 0 || vcpu >= vm_get_maxcpus(vm)) return (EINVAL); /* Let stats functions update their counters */