Index: sys/amd64/vmm/amd/svm.c =================================================================== --- sys/amd64/vmm/amd/svm.c +++ sys/amd64/vmm/amd/svm.c @@ -325,9 +325,13 @@ static void svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) { - int index, bit, error; + int bit, index; +#ifdef INVARIANTS + int error; - error = svm_msr_index(msr, &index, &bit); + error = +#endif + svm_msr_index(msr, &index, &bit); KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, ("%s: invalid index %d for msr %#lx", __func__, index, msr)); @@ -598,12 +602,17 @@ { struct vmcb_segment seg; struct vmcb_state *state; +#ifdef INVARIANTS int error; +#endif state = &vmcb->state; if (state->efer & EFER_LMA) { - error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); +#ifdef INVARIANTS + error = +#endif + vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, error)); @@ -663,7 +672,10 @@ svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, int in, struct vm_inout_str *vis) { - int error, s; +#ifdef INVARIANTS + int error; +#endif + int s; if (in) { vis->seg_name = VM_REG_GUEST_ES; @@ -672,8 +684,10 @@ s = (info1 >> 10) & 0x7; vis->seg_name = vm_segment_name(s); } - - error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); +#ifdef INVARIANTS + error = +#endif + vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); } @@ -802,7 +816,10 @@ struct vmcb_segment seg; struct vmcb_ctrl *ctrl; char *inst_bytes; - int error, inst_len; +#ifdef INVARIANTS + int error; +#endif + int inst_len; ctrl = &vmcb->ctrl; paging = &vmexit->u.inst_emul.paging; @@ -812,7 +829,10 @@ vmexit->u.inst_emul.gla = VIE_INVALID_GLA; svm_paging_info(vmcb, paging); - error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); +#ifdef INVARIANTS + error = +#endif + vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); switch(paging->cpu_mode) { @@ -964,6 +984,7 @@ vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); } +#ifdef INVARIANTS static __inline int vintr_intercept_enabled(struct svm_softc *sc, int vcpu) { @@ -971,6 +992,7 @@ return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR)); } +#endif static __inline void enable_intr_window_exiting(struct svm_softc *sc, int vcpu) @@ -1072,7 +1094,9 @@ static void clear_nmi_blocking(struct svm_softc *sc, int vcpu) { +#ifdef INVARIANTS int error; +#endif KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); @@ -1093,7 +1117,10 @@ * Set 'intr_shadow' to prevent an NMI from being injected on the * immediate VMRUN. */ - error = svm_modify_intr_shadow(sc, vcpu, 1); +#ifdef INVARIANTS + error = +#endif + svm_modify_intr_shadow(sc, vcpu, 1); KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); } @@ -1105,7 +1132,9 @@ struct vm_exit *vme; struct vmcb_state *state; uint64_t changed, lma, oldval; +#ifdef INVARIANTS int error; +#endif state = svm_get_vmcb_state(sc, vcpu); @@ -1159,7 +1188,10 @@ goto gpf; } - error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); +#ifdef INVARIANTS + error = +#endif + svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); return (0); gpf: @@ -1285,7 +1317,10 @@ struct svm_regctx *ctx; uint64_t code, info1, info2, val; uint32_t eax, ecx, edx; - int error, errcode_valid, handled, idtvec, reflect; +#ifdef INVARIANTS + int error; +#endif + int errcode_valid, handled, idtvec, reflect; bool retu; ctx = svm_get_guest_regctx(svm_sc, vcpu); @@ -1359,7 +1394,10 @@ __asm __volatile("int $18"); break; case IDT_PF: - error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, +#ifdef INVARIANTS + error = +#endif + svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, info2); KASSERT(error == 0, ("%s: error %d updating cr2", __func__, error)); @@ -1407,7 +1445,10 @@ /* Reflect the exception back into the guest */ VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " "%d/%#x into the guest", idtvec, (int)info1); - error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, +#ifdef INVARIANTS + error = +#endif + vm_inject_exception(svm_sc->vm, vcpu, idtvec, errcode_valid, info1, 0); KASSERT(error == 0, ("%s: vm_inject_exception error %d", __func__, error)); Index: sys/amd64/vmm/intel/vmcs.h =================================================================== --- sys/amd64/vmm/intel/vmcs.h +++ sys/amd64/vmm/intel/vmcs.h @@ -61,10 +61,13 @@ static __inline uint64_t vmcs_read(uint32_t encoding) { - int error; uint64_t val; +#ifdef INVARIANTS + int error; - error = vmread(encoding, &val); + error = +#endif + vmread(encoding, &val); KASSERT(error == 0, ("vmcs_read(%u) error %d", encoding, error)); return (val); } @@ -72,9 +75,12 @@ static __inline void vmcs_write(uint32_t encoding, uint64_t val) { +#ifdef INVARIANTS int error; - error = vmwrite(encoding, val); + error = +#endif + vmwrite(encoding, val); KASSERT(error == 0, ("vmcs_write(%u) error %d", encoding, error)); } #endif /* _VMX_CPUFUNC_H_ */ Index: sys/amd64/vmm/intel/vmx.c =================================================================== --- sys/amd64/vmm/intel/vmx.c +++ sys/amd64/vmm/intel/vmx.c @@ -961,9 +961,7 @@ static int vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) { - int handled, func; - - func = vmxctx->guest_rax; + int handled; handled = x86_emulate_cpuid(vm, vcpu, (uint32_t*)(&vmxctx->guest_rax), @@ -1155,9 +1153,15 @@ static void vmx_inject_nmi(struct vmx *vmx, int vcpu) { - uint32_t gi, info; +#ifdef INVARIANTS + uint32_t gi; +#endif + uint32_t info; - gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); +#ifdef INVARIANTS + gi = +#endif + vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest " "interruptibility-state %#x", gi)); @@ -1400,9 +1404,12 @@ static void vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) { +#ifdef INVARIANTS uint32_t gi; - gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); + gi = +#endif + vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, ("NMI blocking is not in effect %#x", gi)); } @@ -1720,11 +1727,16 @@ inout_str_index(struct vmx *vmx, int vcpuid, int in) { uint64_t val; +#ifdef INVARIANTS int error; +#endif enum vm_reg_name reg; reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI; - error = vmx_getreg(vmx, vcpuid, reg, &val); +#ifdef INVARIANTS + error = +#endif + vmx_getreg(vmx, vcpuid, reg, &val); KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error)); return (val); } @@ -1733,10 +1745,15 @@ inout_str_count(struct vmx *vmx, int vcpuid, int rep) { uint64_t val; +#ifdef INVARIANTS int error; +#endif if (rep) { - error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val); +#ifdef INVARIANTS + error = +#endif + vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val); KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error)); } else { val = 1; @@ -1766,7 +1783,10 @@ inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in, struct vm_inout_str *vis) { - int error, s; +#ifdef INVARIANTS + int error; +#endif + int s; if (in) { vis->seg_name = VM_REG_GUEST_ES; @@ -1775,7 +1795,10 @@ vis->seg_name = vm_segment_name(s); } - error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc); +#ifdef INVARIANTS + error = +#endif + vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc); KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error)); } @@ -3240,7 +3263,10 @@ struct vmx *vmx; struct vmcs *vmcs; uint32_t proc_ctls2; - int vcpuid, error; +#ifdef INVARIANTS + int error; +#endif + int vcpuid; vcpuid = vlapic->vcpuid; vmx = ((struct vlapic_vtx *)vlapic)->vmx; @@ -3263,7 +3289,10 @@ * The nested page table mappings are shared by all vcpus * so unmap the APIC access page just once. */ - error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); +#ifdef INVARIANTS + error = +#endif + vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); KASSERT(error == 0, ("%s: vm_unmap_mmio error %d", __func__, error)); @@ -3271,7 +3300,10 @@ * The MSR bitmap is shared by all vcpus so modify it only * once in the context of vcpu 0. */ - error = vmx_allow_x2apic_msrs(vmx); +#ifdef INVARIANTS + error = +#endif + vmx_allow_x2apic_msrs(vmx); KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d", __func__, error)); } Index: sys/amd64/vmm/io/vlapic.c =================================================================== --- sys/amd64/vmm/io/vlapic.c +++ sys/amd64/vmm/io/vlapic.c @@ -192,11 +192,14 @@ vlapic_get_ccr(struct vlapic *vlapic) { struct bintime bt_now, bt_rem; - struct LAPIC *lapic; uint32_t ccr; - + ccr = 0; +#if defined(INVARIANTS) || defined(KTR) + struct LAPIC *lapic; + lapic = vlapic->apic_page; +#endif VLAPIC_TIMER_LOCK(vlapic); if (callout_active(&vlapic->callout)) { Index: sys/amd64/vmm/io/vrtc.c =================================================================== --- sys/amd64/vmm/io/vrtc.c +++ sys/amd64/vmm/io/vrtc.c @@ -279,12 +279,16 @@ struct clocktime ct; struct timespec ts; struct rtcdev *rtc; +#ifdef KTR struct vm *vm; +#endif int century, error, hour, pm, year; KASSERT(VRTC_LOCKED(vrtc), ("%s: vrtc not locked", __func__)); +#ifdef KTR vm = vrtc->vm; +#endif rtc = &vrtc->rtcdev; bzero(&ct, sizeof(struct clocktime)); @@ -387,7 +391,7 @@ * Stop updating the RTC if the date/time fields programmed by * the guest are invalid. */ - VM_CTR0(vrtc->vm, "Invalid RTC date/time programming detected"); + VM_CTR0(vm, "Invalid RTC date/time programming detected"); return (VRTC_BROKEN_TIME); } @@ -395,7 +399,9 @@ vrtc_time_update(struct vrtc *vrtc, time_t newtime, sbintime_t newbase) { struct rtcdev *rtc; +#ifdef KTR sbintime_t oldbase; +#endif time_t oldtime; uint8_t alarm_sec, alarm_min, alarm_hour; @@ -410,7 +416,9 @@ VM_CTR2(vrtc->vm, "Updating RTC secs from %#lx to %#lx", oldtime, newtime); +#ifdef KTR oldbase = vrtc->base_uptime; +#endif VM_CTR2(vrtc->vm, "Updating RTC base uptime from %#lx to %#lx", oldbase, newbase); vrtc->base_uptime = newbase; @@ -539,7 +547,9 @@ struct vrtc *vrtc = arg; sbintime_t freqsbt, basetime; time_t rtctime; +#ifdef INVARIANTS int error; +#endif VM_CTR0(vrtc->vm, "vrtc callout fired"); @@ -560,7 +570,10 @@ if (aintr_enabled(vrtc) || uintr_enabled(vrtc)) { rtctime = vrtc_curtime(vrtc, &basetime); - error = vrtc_time_update(vrtc, rtctime, basetime); +#ifdef INVARIANTS + error = +#endif + vrtc_time_update(vrtc, rtctime, basetime); KASSERT(error == 0, ("%s: vrtc_time_update error %d", __func__, error)); } @@ -572,6 +585,7 @@ VRTC_UNLOCK(vrtc); } +#ifdef INVARIANTS static __inline void vrtc_callout_check(struct vrtc *vrtc, sbintime_t freq) { @@ -582,6 +596,7 @@ ("vrtc callout %s with frequency %#lx", active ? "active" : "inactive", freq)); } +#endif static void vrtc_set_reg_c(struct vrtc *vrtc, uint8_t newval) @@ -627,7 +642,9 @@ struct rtcdev *rtc; sbintime_t oldfreq, newfreq, basetime; time_t curtime, rtctime; +#ifdef INVARIANTS int error; +#endif uint8_t oldval, changed; KASSERT(VRTC_LOCKED(vrtc), ("%s: vrtc not locked", __func__)); @@ -671,7 +688,10 @@ rtctime = VRTC_BROKEN_TIME; rtc->reg_b &= ~RTCSB_UINTR; } - error = vrtc_time_update(vrtc, rtctime, basetime); +#ifdef INVARIANTS + error = +#endif + vrtc_time_update(vrtc, rtctime, basetime); KASSERT(error == 0, ("vrtc_time_update error %d", error)); } @@ -687,8 +707,10 @@ newfreq = vrtc_freq(vrtc); if (newfreq != oldfreq) vrtc_callout_reset(vrtc, newfreq); +#ifdef INVARIANTS else vrtc_callout_check(vrtc, newfreq); +#endif /* * The side effect of bits that control the RTC date/time format @@ -739,8 +761,10 @@ newfreq = vrtc_freq(vrtc); if (newfreq != oldfreq) vrtc_callout_reset(vrtc, newfreq); +#ifdef INVARIANTS else vrtc_callout_check(vrtc, newfreq); +#endif } int Index: sys/amd64/vmm/vmm.c =================================================================== --- sys/amd64/vmm/vmm.c +++ sys/amd64/vmm/vmm.c @@ -759,11 +759,15 @@ vm_free_memmap(struct vm *vm, int ident) { struct mem_map *mm; +#ifdef INVARIANTS int error; - +#endif mm = &vm->mem_maps[ident]; if (mm->len) { - error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa, +#ifdef INVARIANTS + error = +#endif + vm_map_remove(&vm->vmspace->vm_map, mm->gpa, mm->gpa + mm->len); KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d", __func__, error)); @@ -1453,10 +1457,9 @@ static int vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu) { - int i, done; struct vcpu *vcpu; + int i; - done = 0; vcpu = &vm->vcpu[vcpuid]; CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); @@ -1706,7 +1709,9 @@ struct vcpu *vcpu; enum vcpu_state state; uint64_t rip; +#ifdef INVARIANTS int error; +#endif vm = arg; if (vcpuid < 0 || vcpuid >= VM_MAXCPU) @@ -1731,7 +1736,10 @@ * Thus instruction restart is achieved by setting 'nextrip' * to the vcpu's %rip. */ - error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip); +#ifdef INVARIANTS + error = +#endif + vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip); KASSERT(!error, ("%s: error %d getting rip", __func__, error)); VCPU_CTR2(vm, vcpuid, "restarting instruction by updating " "nextrip from %#lx to %#lx", vcpu->nextrip, rip); @@ -1944,7 +1952,9 @@ { struct vcpu *vcpu; uint64_t regval; +#ifdef INVARIANTS int error; +#endif if (vcpuid < 0 || vcpuid >= VM_MAXCPU) return (EINVAL); @@ -1972,7 +1982,10 @@ /* * Exceptions don't deliver an error code in real mode. */ - error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, ®val); +#ifdef INVARIANTS + error = +#endif + vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, ®val); KASSERT(!error, ("%s: error %d getting CR0", __func__, error)); if (!(regval & CR0_PE)) errcode_valid = 0; @@ -1984,7 +1997,10 @@ * Event blocking by "STI" or "MOV SS" is cleared after guest executes * one instruction or incurs an exception. */ - error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0); +#ifdef INVARIANTS + error = +#endif + vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0); KASSERT(error == 0, ("%s: error %d clearing interrupt shadow", __func__, error)); @@ -2004,12 +2020,18 @@ int errcode) { struct vm *vm; - int error, restart_instruction; +#ifdef INVARIANTS + int error; +#endif + int restart_instruction; vm = vmarg; restart_instruction = 1; - error = vm_inject_exception(vm, vcpuid, vector, errcode_valid, +#ifdef INVARIANTS + error = +#endif + vm_inject_exception(vm, vcpuid, vector, errcode_valid, errcode, restart_instruction); KASSERT(error == 0, ("vm_inject_exception error %d", error)); } @@ -2018,13 +2040,18 @@ vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2) { struct vm *vm; +#ifdef INVARIANTS int error; +#endif vm = vmarg; VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx", error_code, cr2); - error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2); +#ifdef INVARIANTS + error = +#endif + vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2); KASSERT(error == 0, ("vm_set_register(cr2) error %d", error)); vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code); Index: sys/amd64/vmm/vmm_dev.c =================================================================== --- sys/amd64/vmm/vmm_dev.c +++ sys/amd64/vmm/vmm_dev.c @@ -247,7 +247,7 @@ static int alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg) { - char *name; + char *name, *namep; int error; bool sysmem; @@ -255,10 +255,11 @@ name = NULL; sysmem = true; - if (VM_MEMSEG_NAME(mseg)) { + namep = VM_MEMSEG_NAME(mseg); + if (namep != NULL) { sysmem = false; name = malloc(SPECNAMELEN + 1, M_VMMDEV, M_WAITOK); - error = copystr(VM_MEMSEG_NAME(mseg), name, SPECNAMELEN + 1, 0); + error = copystr(namep, name, SPECNAMELEN + 1, 0); if (error) goto done; } @@ -267,7 +268,7 @@ if (error) goto done; - if (VM_MEMSEG_NAME(mseg)) { + if (namep != NULL) { error = devmem_create_cdev(vm_name(sc->vm), mseg->segid, name); if (error) vm_free_memseg(sc->vm, mseg->segid); @@ -719,9 +720,12 @@ { struct vmmdev_softc *sc = arg; struct devmem_softc *dsc; +#ifdef INVARIANTS int error; - error = vcpu_lock_all(sc); + error = +#endif + vcpu_lock_all(sc); KASSERT(error == 0, ("%s: error %d freezing vcpus", __func__, error)); while ((dsc = SLIST_FIRST(&sc->devmem)) != NULL) { Index: sys/amd64/vmm/vmm_instruction_emul.c =================================================================== --- sys/amd64/vmm/vmm_instruction_emul.c +++ sys/amd64/vmm/vmm_instruction_emul.c @@ -613,17 +613,25 @@ error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0); KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error)); + if (error != 0) + goto guest_fault; error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags); KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error)); + if (error != 0) + goto guest_fault; error = vm_get_seg_desc(vm, vcpuid, seg, &desc); KASSERT(error == 0, ("%s: error %d getting segment descriptor %d", __func__, error, seg)); + if (error != 0) + goto guest_fault; error = vie_read_register(vm, vcpuid, gpr, &val); KASSERT(error == 0, ("%s: error %d getting register %d", __func__, error, gpr)); + if (error != 0) + goto guest_fault; if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize, addrsize, prot, gla)) { @@ -648,11 +656,11 @@ } *fault = 0; - return (0); + return (error); guest_fault: *fault = 1; - return (0); + return (error); } static int @@ -1701,6 +1709,11 @@ usermode = (paging->cpl == 3 ? 1 : 0); writable = prot & VM_PROT_WRITE; cookie = NULL; + pgsize = 1; + pte = 0; + ptpbase = NULL; + ptpindex = -1; + ptpshift = 0; retval = 0; retries = 0; restart: Index: sys/amd64/vmm/vmm_ioport.c =================================================================== --- sys/amd64/vmm/vmm_ioport.c +++ sys/amd64/vmm/vmm_ioport.c @@ -155,11 +155,14 @@ int vm_handle_inout(struct vm *vm, int vcpuid, struct vm_exit *vmexit, bool *retu) { - int bytes, error; + int error; +#ifdef INVARIANTS + int bytes; bytes = vmexit->u.inout.bytes; KASSERT(bytes == 1 || bytes == 2 || bytes == 4, ("vm_handle_inout: invalid operand size %d", bytes)); +#endif if (vmexit->u.inout.string) error = emulate_inout_str(vm, vcpuid, vmexit, retu); Index: sys/amd64/vmm/x86.c =================================================================== --- sys/amd64/vmm/x86.c +++ sys/amd64/vmm/x86.c @@ -90,10 +90,13 @@ { const struct xsave_limits *limits; uint64_t cr4; - int error, enable_invpcid, level, width, x2apic_id; + int error, enable_invpcid, level, x2apic_id; unsigned int func, regs[4], logical_cpus; enum x2apic_state x2apic_state; + logical_cpus = 0; + x2apic_id = 0; + VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", *eax, *ecx); /* @@ -381,6 +384,11 @@ break; case CPUID_0000_000B: + { + int width; + + width = 0; + /* * Processor topology enumeration */ @@ -410,6 +418,7 @@ regs[1] = logical_cpus & 0xffff; regs[2] = (level << 8) | (*ecx & 0xff); regs[3] = x2apic_id; + } break; case CPUID_0000_000D: