Index: head/sys/amd64/vmm/amd/svm.c =================================================================== --- head/sys/amd64/vmm/amd/svm.c +++ head/sys/amd64/vmm/amd/svm.c @@ -1496,11 +1496,8 @@ break; case VMCB_EXIT_CPUID: vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); - handled = x86_emulate_cpuid(svm_sc->vm, vcpu, - (uint32_t *)&state->rax, - (uint32_t *)&ctx->sctx_rbx, - (uint32_t *)&ctx->sctx_rcx, - (uint32_t *)&ctx->sctx_rdx); + handled = x86_emulate_cpuid(svm_sc->vm, vcpu, &state->rax, + &ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx); break; case VMCB_EXIT_HLT: vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); Index: head/sys/amd64/vmm/intel/vmx.c =================================================================== --- head/sys/amd64/vmm/intel/vmx.c +++ head/sys/amd64/vmm/intel/vmx.c @@ -1193,15 +1193,11 @@ static int vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) { - int handled, func; + int handled; - func = vmxctx->guest_rax; - - handled = x86_emulate_cpuid(vm, vcpu, - (uint32_t*)(&vmxctx->guest_rax), - (uint32_t*)(&vmxctx->guest_rbx), - (uint32_t*)(&vmxctx->guest_rcx), - (uint32_t*)(&vmxctx->guest_rdx)); + handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax, + (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, + (uint64_t *)&vmxctx->guest_rdx); return (handled); } Index: head/sys/amd64/vmm/x86.h =================================================================== --- head/sys/amd64/vmm/x86.h +++ head/sys/amd64/vmm/x86.h @@ -64,8 +64,8 @@ */ #define CPUID_0000_0001_FEAT0_VMX (1<<5) -int x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx); +int x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint64_t *rax, uint64_t *rbx, + uint64_t *rcx, uint64_t *rdx); enum vm_cpuid_capability { VCC_NONE, Index: head/sys/amd64/vmm/x86.c =================================================================== --- head/sys/amd64/vmm/x86.c +++ head/sys/amd64/vmm/x86.c @@ -87,35 +87,40 @@ } int -x86_emulate_cpuid(struct vm *vm, int vcpu_id, - uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) +x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint64_t *rax, uint64_t *rbx, + uint64_t *rcx, uint64_t *rdx) { const struct xsave_limits *limits; uint64_t cr4; int error, enable_invpcid, enable_rdpid, enable_rdtscp, level, width, x2apic_id; - unsigned int func, regs[4], logical_cpus; + unsigned int func, regs[4], logical_cpus, param; enum x2apic_state x2apic_state; uint16_t cores, maxcpus, sockets, threads; - VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", *eax, *ecx); + /* + * The function of CPUID is controlled through the provided value of + * %eax (and secondarily %ecx, for certain leaf data). + */ + func = (uint32_t)*rax; + param = (uint32_t)*rcx; + VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", func, param); + /* * Requests for invalid CPUID levels should map to the highest * available level instead. */ - if (cpu_exthigh != 0 && *eax >= 0x80000000) { - if (*eax > cpu_exthigh) - *eax = cpu_exthigh; - } else if (*eax >= 0x40000000) { - if (*eax > CPUID_VM_HIGH) - *eax = CPUID_VM_HIGH; - } else if (*eax > cpu_high) { - *eax = cpu_high; + if (cpu_exthigh != 0 && func >= 0x80000000) { + if (func > cpu_exthigh) + func = cpu_exthigh; + } else if (func >= 0x40000000) { + if (func > CPUID_VM_HIGH) + func = CPUID_VM_HIGH; + } else if (func > cpu_high) { + func = cpu_high; } - func = *eax; - /* * In general the approach used for CPU topology is to * advertise a flat topology where all CPUs are packages with @@ -133,10 +138,10 @@ case CPUID_8000_0003: case CPUID_8000_0004: case CPUID_8000_0006: - cpuid_count(*eax, *ecx, regs); + cpuid_count(func, param, regs); break; case CPUID_8000_0008: - cpuid_count(*eax, *ecx, regs); + cpuid_count(func, param, regs); if (vmm_is_svm()) { /* * As on Intel (0000_0007:0, EDX), mask out @@ -167,7 +172,7 @@ break; case CPUID_8000_0001: - cpuid_count(*eax, *ecx, regs); + cpuid_count(func, param, regs); /* * Hide SVM from guest. @@ -248,7 +253,7 @@ */ vm_get_topology(vm, &sockets, &cores, &threads, &maxcpus); - switch (*ecx) { + switch (param) { case 0: logical_cpus = threads; level = 1; @@ -396,7 +401,7 @@ break; case CPUID_0000_0004: - cpuid_count(*eax, *ecx, regs); + cpuid_count(func, param, regs); if (regs[0] || regs[1] || regs[2] || regs[3]) { vm_get_topology(vm, &sockets, &cores, &threads, @@ -425,8 +430,8 @@ regs[3] = 0; /* leaf 0 */ - if (*ecx == 0) { - cpuid_count(*eax, *ecx, regs); + if (param == 0) { + cpuid_count(func, param, regs); /* Only leaf 0 is supported */ regs[0] = 0; @@ -485,21 +490,21 @@ if (vmm_is_intel()) { vm_get_topology(vm, &sockets, &cores, &threads, &maxcpus); - if (*ecx == 0) { + if (param == 0) { logical_cpus = threads; width = log2(logical_cpus); level = CPUID_TYPE_SMT; x2apic_id = vcpu_id; } - if (*ecx == 1) { + if (param == 1) { logical_cpus = threads * cores; width = log2(logical_cpus); level = CPUID_TYPE_CORE; x2apic_id = vcpu_id; } - if (!cpuid_leaf_b || *ecx >= 2) { + if (!cpuid_leaf_b || param >= 2) { width = 0; logical_cpus = 0; level = 0; @@ -508,7 +513,7 @@ regs[0] = width & 0x1f; regs[1] = logical_cpus & 0xffff; - regs[2] = (level << 8) | (*ecx & 0xff); + regs[2] = (level << 8) | (param & 0xff); regs[3] = x2apic_id; } else { regs[0] = 0; @@ -528,8 +533,8 @@ break; } - cpuid_count(*eax, *ecx, regs); - switch (*ecx) { + cpuid_count(func, param, regs); + switch (param) { case 0: /* * Only permit the guest to use bits @@ -559,7 +564,7 @@ * pass through as-is, otherwise return * all zeroes. */ - if (!(limits->xcr0_allowed & (1ul << *ecx))) { + if (!(limits->xcr0_allowed & (1ul << param))) { regs[0] = 0; regs[1] = 0; regs[2] = 0; @@ -596,14 +601,17 @@ * how many unhandled leaf values have been seen. */ atomic_add_long(&bhyve_xcpuids, 1); - cpuid_count(*eax, *ecx, regs); + cpuid_count(func, param, regs); break; } - *eax = regs[0]; - *ebx = regs[1]; - *ecx = regs[2]; - *edx = regs[3]; + /* + * CPUID clears the upper 32-bits of the long-mode registers. + */ + *rax = regs[0]; + *rbx = regs[1]; + *rcx = regs[2]; + *rdx = regs[3]; return (1); }