Index: sys/amd64/vmm/amd/svm.c =================================================================== --- sys/amd64/vmm/amd/svm.c +++ sys/amd64/vmm/amd/svm.c @@ -98,6 +98,9 @@ static MALLOC_DEFINE(M_SVM, "svm", "svm"); static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); +/* Number of vCPUs */ +extern u_int maxvcpus; + /* Per-CPU context area. */ extern struct pcpu __pcpu[]; @@ -558,7 +561,7 @@ iopm_pa = vtophys(svm_sc->iopm_bitmap); msrpm_pa = vtophys(svm_sc->msr_bitmap); pml4_pa = svm_sc->nptp; - for (i = 0; i < VM_MAXCPU; i++) { + for (i = 0; i < maxvcpus; i++) { vcpu = svm_get_vcpu(svm_sc, i); vcpu->nextrip = ~0; vcpu->lastcpu = NOCPU; Index: sys/amd64/vmm/amd/svm_softc.h =================================================================== --- sys/amd64/vmm/amd/svm_softc.h +++ sys/amd64/vmm/amd/svm_softc.h @@ -58,6 +58,7 @@ struct svm_softc { uint8_t iopm_bitmap[SVM_IO_BITMAP_SIZE]; /* shared by all vcpus */ uint8_t msr_bitmap[SVM_MSR_BITMAP_SIZE]; /* shared by all vcpus */ + /* FIXME: make this maxvcpus */ uint8_t apic_page[VM_MAXCPU][PAGE_SIZE]; struct svm_vcpu vcpu[VM_MAXCPU]; vm_offset_t nptp; /* nested page table */ Index: sys/amd64/vmm/intel/vmcs.c =================================================================== --- sys/amd64/vmm/intel/vmcs.c +++ sys/amd64/vmm/intel/vmcs.c @@ -50,6 +50,8 @@ #include #endif +extern u_int maxvcpus; + static uint64_t vmcs_fix_regval(uint32_t encoding, uint64_t val) { Index: sys/amd64/vmm/intel/vmx.h =================================================================== --- sys/amd64/vmm/intel/vmx.h +++ sys/amd64/vmm/intel/vmx.h @@ -109,6 +109,7 @@ /* virtual machine softc */ struct vmx { + /* FIXME: make this use maxvcpus */ struct vmcs vmcs[VM_MAXCPU]; /* one vmcs per virtual cpu */ struct apic_page apic_page[VM_MAXCPU]; /* one apic page per vcpu */ char msr_bitmap[PAGE_SIZE]; Index: sys/amd64/vmm/intel/vmx.c =================================================================== --- sys/amd64/vmm/intel/vmx.c +++ sys/amd64/vmm/intel/vmx.c @@ -113,6 +113,8 @@ #define HANDLED 1 #define UNHANDLED 0 +extern u_int maxvcpus; + static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); @@ -384,11 +386,11 @@ panic("vpid_free: invalid vpid %d", vpid); /* - * VPIDs [0,VM_MAXCPU] are special and are not allocated from + * VPIDs [0,maxvcpus] are special and are not allocated from * the unit number allocator. */ - if (vpid > VM_MAXCPU) + if (vpid > maxvcpus) free_unr(vpid_unr, vpid); } @@ -397,7 +399,7 @@ { int i, x; - if (num <= 0 || num > VM_MAXCPU) + if (num <= 0 || num > maxvcpus) panic("invalid number of vpids requested: %d", num); /* @@ -426,7 +428,7 @@ /* * If the unit number allocator does not have enough unique - * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. + * VPIDs then we need to allocate from the [1,maxvcpus] range. * * These VPIDs are not be unique across VMs but this does not * affect correctness because the combined mappings are also @@ -450,13 +452,13 @@ * VPID 0 is required when the "enable VPID" execution control is * disabled. * - * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the + * VPIDs [1,maxvcpus] are used as the "overflow namespace" when the * unit number allocator does not have sufficient unique VPIDs to * satisfy the allocation. * * The remaining VPIDs are managed by the unit number allocator. */ - vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL); + vpid_unr = new_unrhdr(maxvcpus + 1, 0xffff, NULL); } static void @@ -814,12 +816,14 @@ static void * vmx_vminit(struct vm *vm, pmap_t pmap) { - uint16_t vpid[VM_MAXCPU]; + uint16_t *vpid; int i, error; struct vmx *vmx; struct vmcs *vmcs; uint32_t exc_bitmap; + vpid = malloc(sizeof(uint16_t)*maxvcpus, M_VMX, M_WAITOK | M_ZERO); + vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); if ((uintptr_t)vmx & PAGE_MASK) { panic("malloc of struct vmx not aligned on %d byte boundary", @@ -871,7 +875,7 @@ guest_msr_ro(vmx, MSR_TSC)) panic("vmx_vminit: error setting guest msr access"); - vpid_alloc(vpid, VM_MAXCPU); + vpid_alloc(vpid, maxvcpus); if (virtual_interrupt_delivery) { error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, @@ -880,7 +884,7 @@ KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); } - for (i = 0; i < VM_MAXCPU; i++) { + for (i = 0; i < maxvcpus; i++) { vmcs = &vmx->vmcs[i]; vmcs->identifier = vmx_revision(); error = vmclear(vmcs); @@ -955,6 +959,8 @@ vmx->ctx[i].pmap = pmap; } + free(vpid, M_VMX); + return (vmx); } @@ -2722,7 +2728,7 @@ if (apic_access_virtualization(vmx, 0)) vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); - for (i = 0; i < VM_MAXCPU; i++) + for (i = 0; i < maxvcpus; i++) vpid_free(vmx->state[i].vpid); free(vmx, M_VMX); Index: sys/amd64/vmm/io/vlapic.c =================================================================== --- sys/amd64/vmm/io/vlapic.c +++ sys/amd64/vmm/io/vlapic.c @@ -76,6 +76,8 @@ */ #define VLAPIC_BUS_FREQ (128 * 1024 * 1024) +extern u_int maxvcpus; + static __inline uint32_t vlapic_get_id(struct vlapic *vlapic) { @@ -836,7 +838,7 @@ */ CPU_ZERO(dmask); vcpuid = vm_apicid2vcpuid(vm, dest); - if (vcpuid < VM_MAXCPU) + if (vcpuid < maxvcpus) CPU_SET(vcpuid, dmask); } else { /* @@ -906,6 +908,10 @@ } } +/* + * FIXME: make this use maxvcpus. However, I don't yet see how this can be + * made dynamic, since VMM_STAT_ARRAY uses SYSINIT. + */ static VMM_STAT_ARRAY(IPIS_SENT, VM_MAXCPU, "ipis sent to vcpu"); static void @@ -1028,7 +1034,7 @@ if ((icrval & APIC_LEVEL_MASK) == APIC_LEVEL_DEASSERT) return (0); - if (vlapic->vcpuid == 0 && dest != 0 && dest < VM_MAXCPU) { + if (vlapic->vcpuid == 0 && dest != 0 && dest < maxvcpus) { vlapic2 = vm_lapic(vlapic->vm, dest); /* move from INIT to waiting-for-SIPI state */ @@ -1041,7 +1047,7 @@ } if (mode == APIC_DELMODE_STARTUP) { - if (vlapic->vcpuid == 0 && dest != 0 && dest < VM_MAXCPU) { + if (vlapic->vcpuid == 0 && dest != 0 && dest < maxvcpus) { vlapic2 = vm_lapic(vlapic->vm, dest); /* @@ -1445,7 +1451,7 @@ vlapic_init(struct vlapic *vlapic) { KASSERT(vlapic->vm != NULL, ("vlapic_init: vm is not initialized")); - KASSERT(vlapic->vcpuid >= 0 && vlapic->vcpuid < VM_MAXCPU, + KASSERT(vlapic->vcpuid >= 0 && vlapic->vcpuid < maxvcpus, ("vlapic_init: vcpuid is not initialized")); KASSERT(vlapic->apic_page != NULL, ("vlapic_init: apic_page is not " "initialized")); Index: sys/amd64/vmm/vmm.c =================================================================== --- sys/amd64/vmm/vmm.c +++ sys/amd64/vmm/vmm.c @@ -161,7 +161,8 @@ struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */ struct vmspace *vmspace; /* (o) guest's address space */ char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */ - struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */ + /* FIXME: make this use maxvcpus */ + struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */ }; static int vmm_initialized; @@ -228,6 +229,8 @@ static bool sysmem_mapping(struct vm *vm, struct mem_map *mm); static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr); +extern u_int maxvcpus; + #ifdef KTR static const char * vcpu_state2str(enum vcpu_state state) @@ -265,7 +268,7 @@ { struct vcpu *vcpu; - KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU, + KASSERT(vcpu_id >= 0 && vcpu_id < maxvcpus, ("vcpu_init: invalid vcpu %d", vcpu_id)); vcpu = &vm->vcpu[vcpu_id]; @@ -304,7 +307,7 @@ { struct vcpu *vcpu; - if (cpuid < 0 || cpuid >= VM_MAXCPU) + if (cpuid < 0 || cpuid >= maxvcpus) panic("vm_exitinfo: invalid cpuid %d", cpuid); vcpu = &vm->vcpu[cpuid]; @@ -415,7 +418,7 @@ vm->suspend = 0; CPU_ZERO(&vm->suspended_cpus); - for (i = 0; i < VM_MAXCPU; i++) + for (i = 0; i < maxvcpus; i++) vcpu_init(vm, i, create); } @@ -471,7 +474,7 @@ vatpic_cleanup(vm->vatpic); vioapic_cleanup(vm->vioapic); - for (i = 0; i < VM_MAXCPU; i++) + for (i = 0; i < maxvcpus; i++) vcpu_cleanup(vm, i, destroy); VMCLEANUP(vm->cookie); @@ -906,9 +909,9 @@ * guaranteed if at least one vcpu is in the VCPU_FROZEN state. */ int state; - KASSERT(vcpuid >= -1 && vcpuid < VM_MAXCPU, ("%s: invalid vcpuid %d", + KASSERT(vcpuid >= -1 && vcpuid < maxvcpus, ("%s: invalid vcpuid %d", __func__, vcpuid)); - for (i = 0; i < VM_MAXCPU; i++) { + for (i = 0; i < maxvcpus; i++) { if (vcpuid != -1 && vcpuid != i) continue; state = vcpu_get_state(vm, i, NULL); @@ -954,7 +957,7 @@ vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval) { - if (vcpu < 0 || vcpu >= VM_MAXCPU) + if (vcpu < 0 || vcpu >= maxvcpus) return (EINVAL); if (reg >= VM_REG_LAST) @@ -969,7 +972,7 @@ struct vcpu *vcpu; int error; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) return (EINVAL); if (reg >= VM_REG_LAST) @@ -1023,7 +1026,7 @@ struct seg_desc *desc) { - if (vcpu < 0 || vcpu >= VM_MAXCPU) + if (vcpu < 0 || vcpu >= maxvcpus) return (EINVAL); if (!is_segment_register(reg) && !is_descriptor_table(reg)) @@ -1036,7 +1039,7 @@ vm_set_seg_desc(struct vm *vm, int vcpu, int reg, struct seg_desc *desc) { - if (vcpu < 0 || vcpu >= VM_MAXCPU) + if (vcpu < 0 || vcpu >= maxvcpus) return (EINVAL); if (!is_segment_register(reg) && !is_descriptor_table(reg)) @@ -1208,7 +1211,7 @@ vm_handle_rendezvous(struct vm *vm, int vcpuid) { - KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), + KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < maxvcpus), ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); mtx_lock(&vm->rendezvous_mtx); @@ -1484,7 +1487,7 @@ /* * Wakeup the other sleeping vcpus and return to userspace. */ - for (i = 0; i < VM_MAXCPU; i++) { + for (i = 0; i < maxvcpus; i++) { if (CPU_ISSET(i, &vm->suspended_cpus)) { vcpu_notify_event(vm, i, false); } @@ -1526,7 +1529,7 @@ /* * Notify all active vcpus that they are now suspended. */ - for (i = 0; i < VM_MAXCPU; i++) { + for (i = 0; i < maxvcpus; i++) { if (CPU_ISSET(i, &vm->active_cpus)) vcpu_notify_event(vm, i, false); } @@ -1601,7 +1604,7 @@ vcpuid = vmrun->cpuid; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) return (EINVAL); if (!CPU_ISSET(vcpuid, &vm->active_cpus)) @@ -1701,7 +1704,7 @@ int error; vm = arg; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) return (EINVAL); vcpu = &vm->vcpu[vcpuid]; @@ -1740,7 +1743,7 @@ struct vcpu *vcpu; int type, vector; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) return (EINVAL); vcpu = &vm->vcpu[vcpuid]; @@ -1881,7 +1884,7 @@ uint64_t info1, info2; int valid; - KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid)); + KASSERT(vcpuid >= 0 && vcpuid < maxvcpus, ("invalid vcpu %d", vcpuid)); vcpu = &vm->vcpu[vcpuid]; @@ -1921,7 +1924,7 @@ { struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) return (EINVAL); vcpu = &vm->vcpu[vcpuid]; @@ -1938,7 +1941,7 @@ uint64_t regval; int error; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) return (EINVAL); if (vector < 0 || vector >= 32) @@ -2029,7 +2032,7 @@ { struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) return (EINVAL); vcpu = &vm->vcpu[vcpuid]; @@ -2044,7 +2047,7 @@ { struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); vcpu = &vm->vcpu[vcpuid]; @@ -2057,7 +2060,7 @@ { struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); vcpu = &vm->vcpu[vcpuid]; @@ -2076,7 +2079,7 @@ { struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) return (EINVAL); vcpu = &vm->vcpu[vcpuid]; @@ -2091,7 +2094,7 @@ { struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) panic("vm_extint_pending: invalid vcpuid %d", vcpuid); vcpu = &vm->vcpu[vcpuid]; @@ -2104,7 +2107,7 @@ { struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) panic("vm_extint_pending: invalid vcpuid %d", vcpuid); vcpu = &vm->vcpu[vcpuid]; @@ -2119,7 +2122,7 @@ int vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) { - if (vcpu < 0 || vcpu >= VM_MAXCPU) + if (vcpu < 0 || vcpu >= maxvcpus) return (EINVAL); if (type < 0 || type >= VM_CAP_MAX) @@ -2131,7 +2134,7 @@ int vm_set_capability(struct vm *vm, int vcpu, int type, int val) { - if (vcpu < 0 || vcpu >= VM_MAXCPU) + if (vcpu < 0 || vcpu >= maxvcpus) return (EINVAL); if (type < 0 || type >= VM_CAP_MAX) @@ -2216,7 +2219,7 @@ int error; struct vcpu *vcpu; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) panic("vm_set_run_state: invalid vcpuid %d", vcpuid); vcpu = &vm->vcpu[vcpuid]; @@ -2234,7 +2237,7 @@ struct vcpu *vcpu; enum vcpu_state state; - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) panic("vm_get_run_state: invalid vcpuid %d", vcpuid); vcpu = &vm->vcpu[vcpuid]; @@ -2252,7 +2255,7 @@ vm_activate_cpu(struct vm *vm, int vcpuid) { - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) return (EINVAL); if (CPU_ISSET(vcpuid, &vm->active_cpus)) @@ -2287,7 +2290,7 @@ int vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) { - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) return (EINVAL); *state = vm->vcpu[vcpuid].x2apic_state; @@ -2298,7 +2301,7 @@ int vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) { - if (vcpuid < 0 || vcpuid >= VM_MAXCPU) + if (vcpuid < 0 || vcpuid >= maxvcpus) return (EINVAL); if (state >= X2APIC_STATE_LAST) @@ -2385,7 +2388,7 @@ * Enforce that this function is called without any locks */ WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); - KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), + KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < maxvcpus), ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid)); restart: @@ -2415,7 +2418,7 @@ * Wake up any sleeping vcpus and trigger a VM-exit in any running * vcpus so they handle the rendezvous as soon as possible. */ - for (i = 0; i < VM_MAXCPU; i++) { + for (i = 0; i < maxvcpus; i++) { if (CPU_ISSET(i, &dest)) vcpu_notify_event(vm, i, false); } Index: sys/amd64/vmm/vmm_dev.c =================================================================== --- sys/amd64/vmm/vmm_dev.c +++ sys/amd64/vmm/vmm_dev.c @@ -89,12 +89,14 @@ static int devmem_create_cdev(const char *vmname, int id, char *devmem); static void devmem_destroy(void *arg); +extern u_int maxvcpus; + static int vcpu_lock_one(struct vmmdev_softc *sc, int vcpu) { int error; - if (vcpu < 0 || vcpu >= VM_MAXCPU) + if (vcpu < 0 || vcpu >= maxvcpus) return (EINVAL); error = vcpu_set_state(sc->vm, vcpu, VCPU_FROZEN, true); @@ -120,7 +122,7 @@ { int error, vcpu; - for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) { + for (vcpu = 0; vcpu < maxvcpus; vcpu++) { error = vcpu_lock_one(sc, vcpu); if (error) break; @@ -139,7 +141,7 @@ { int vcpu; - for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) + for (vcpu = 0; vcpu < maxvcpus; vcpu++) vcpu_unlock_one(sc, vcpu); } @@ -182,7 +184,7 @@ /* * Get a read lock on the guest memory map by freezing any vcpu. */ - error = vcpu_lock_one(sc, VM_MAXCPU - 1); + error = vcpu_lock_one(sc, maxvcpus - 1); if (error) return (error); @@ -200,7 +202,7 @@ * Since this device does not support lseek(2), dd(1) will * read(2) blocks of data to simulate the lseek(2). */ - hpa = vm_gpa_hold(sc->vm, VM_MAXCPU - 1, gpa, c, prot, &cookie); + hpa = vm_gpa_hold(sc->vm, maxvcpus - 1, gpa, c, prot, &cookie); if (hpa == NULL) { if (uio->uio_rw == UIO_READ) error = uiomove(__DECONST(void *, zero_region), @@ -212,7 +214,7 @@ vm_gpa_release(cookie); } } - vcpu_unlock_one(sc, VM_MAXCPU - 1); + vcpu_unlock_one(sc, maxvcpus - 1); return (error); } @@ -375,7 +377,7 @@ * Lock a vcpu to make sure that the memory map cannot be * modified while it is being inspected. */ - vcpu = VM_MAXCPU - 1; + vcpu = maxvcpus - 1; error = vcpu_lock_one(sc, vcpu); if (error) goto done; @@ -681,7 +683,7 @@ /* * Get a read lock on the guest memory map by freezing any vcpu. */ - error = vcpu_lock_one(sc, VM_MAXCPU - 1); + error = vcpu_lock_one(sc, maxvcpus - 1); if (error) return (error); @@ -710,7 +712,7 @@ error = EINVAL; } } - vcpu_unlock_one(sc, VM_MAXCPU - 1); + vcpu_unlock_one(sc, maxvcpus - 1); return (error); } @@ -910,7 +912,7 @@ if ((nprot & PROT_EXEC) || first < 0 || first >= last) return (EINVAL); - error = vcpu_lock_one(dsc->sc, VM_MAXCPU - 1); + error = vcpu_lock_one(dsc->sc, maxvcpus - 1); if (error) return (error); @@ -918,7 +920,7 @@ KASSERT(error == 0 && !sysmem && *objp != NULL, ("%s: invalid devmem segment %d", __func__, dsc->segid)); - vcpu_unlock_one(dsc->sc, VM_MAXCPU - 1); + vcpu_unlock_one(dsc->sc, maxvcpus - 1); if (seglen >= last) { vm_object_reference(*objp); Index: sys/amd64/vmm/vmm_lapic.c =================================================================== --- sys/amd64/vmm/vmm_lapic.c +++ sys/amd64/vmm/vmm_lapic.c @@ -49,12 +49,14 @@ #define MSI_X86_ADDR_RH 0x00000008 /* Redirection Hint */ #define MSI_X86_ADDR_LOG 0x00000004 /* Destination Mode */ +extern u_int maxvcpus; + int lapic_set_intr(struct vm *vm, int cpu, int vector, bool level) { struct vlapic *vlapic; - if (cpu < 0 || cpu >= VM_MAXCPU) + if (cpu < 0 || cpu >= maxvcpus) return (EINVAL); /* @@ -77,7 +79,7 @@ cpuset_t dmask; int error; - if (cpu < -1 || cpu >= VM_MAXCPU) + if (cpu < -1 || cpu >= maxvcpus) return (EINVAL); if (cpu == -1) Index: sys/amd64/vmm/vmm_stat.c =================================================================== --- sys/amd64/vmm/vmm_stat.c +++ sys/amd64/vmm/vmm_stat.c @@ -49,6 +49,8 @@ static int vst_num_elems, vst_num_types; static struct vmm_stat_type *vsttab[MAX_VMM_STAT_ELEMS]; +extern u_int maxvcpus; + static MALLOC_DEFINE(M_VMM_STAT, "vmm stat", "vmm stat"); #define vst_size ((size_t)vst_num_elems * sizeof(uint64_t)) @@ -86,7 +88,7 @@ uint64_t *stats; int i; - if (vcpu < 0 || vcpu >= VM_MAXCPU) + if (vcpu < 0 || vcpu >= maxvcpus) return (EINVAL); /* Let stats functions update their counters */ Index: sys/amd64/vmm/x86.c =================================================================== --- sys/amd64/vmm/x86.c +++ sys/amd64/vmm/x86.c @@ -59,6 +59,21 @@ "Number of times an unknown cpuid leaf was accessed"); /* + * FIXME: This should be a RDTUN, but it's a RWTUN for debugging purposes at + * the moment. + * + * One idea to start with is to make this a SYSCTL_PROC and add some code to + * limit maxvcpus to 21 until FADT_OFFSET, HPET_OFFSET, MCFG_OFFSET, + * FACS_OFFSET and DSDT_OFFSET are dynamically calculated. + * + * There's a discussion about this at + * lists.freebsd.org/pipermail/freebsd-virtualization/2016-September/004724.html + */ +u_int maxvcpus = VM_MAXCPU; +SYSCTL_UINT(_hw_vmm, OID_AUTO, maxvcpus, CTLFLAG_RWTUN, &maxvcpus, + VM_MAXCPU, "Maximum number of virtual CPUs"); + +/* * The default CPU topology is a single thread per package. */ static u_int threads_per_core = 1;