diff --git a/sys/amd64/vmm/intel/vmx.h b/sys/amd64/vmm/intel/vmx.h --- a/sys/amd64/vmm/intel/vmx.h +++ b/sys/amd64/vmm/intel/vmx.h @@ -150,6 +150,21 @@ uint16_t vpids[VM_MAXCPU]; }; +#define VMX_CTR0(vcpu, format) \ + VCPU_CTR0((vcpu)->vmx->vm, (vcpu)->vcpuid, format) + +#define VMX_CTR1(vcpu, format, p1) \ + VCPU_CTR1((vcpu)->vmx->vm, (vcpu)->vcpuid, format, p1) + +#define VMX_CTR2(vcpu, format, p1, p2) \ + VCPU_CTR2((vcpu)->vmx->vm, (vcpu)->vcpuid, format, p1, p2) + +#define VMX_CTR3(vcpu, format, p1, p2, p3) \ + VCPU_CTR3((vcpu)->vmx->vm, (vcpu)->vcpuid, format, p1, p2, p3) + +#define VMX_CTR4(vcpu, format, p1, p2, p3, p4) \ + VCPU_CTR4((vcpu)->vmx->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4) + #define VMX_GUEST_VMEXIT 0 #define VMX_VMRESUME_ERROR 1 #define VMX_VMLAUNCH_ERROR 2 diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c --- a/sys/amd64/vmm/intel/vmx.c +++ b/sys/amd64/vmm/intel/vmx.c @@ -1234,8 +1234,7 @@ vmx_run_trace(struct vmx_vcpu *vcpu) { #ifdef KTR - VCPU_CTR1(vcpu->vmx->vm, vcpu->vcpuid, "Resume execution at %#lx", - vmcs_guest_rip()); + VMX_CTR1(vcpu, "Resume execution at %#lx", vmcs_guest_rip()); #endif } @@ -1244,7 +1243,7 @@ int handled) { #ifdef KTR - VCPU_CTR3(vcpu->vmx->vm, vcpu->vcpuid, "%s %s vmexit at 0x%0lx", + VMX_CTR3(vcpu, "%s %s vmexit at 0x%0lx", handled ? "handled" : "unhandled", exit_reason_to_str(exit_reason), rip); #endif @@ -1254,8 +1253,7 @@ vmx_astpending_trace(struct vmx_vcpu *vcpu, uint64_t rip) { #ifdef KTR - VCPU_CTR1(vcpu->vmx->vm, vcpu->vcpuid, "astpending vmexit at 0x%0lx", - rip); + VMX_CTR1(vcpu, "astpending vmexit at 0x%0lx", rip); #endif } @@ -1353,8 +1351,7 @@ if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { vcpu->cap.proc_ctls |= PROCBASED_INT_WINDOW_EXITING; vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); - VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, - "Enabling interrupt window exiting"); + VMX_CTR0(vcpu, "Enabling interrupt window exiting"); } } @@ -1366,8 +1363,7 @@ ("intr_window_exiting not set: %#x", vcpu->cap.proc_ctls)); vcpu->cap.proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); - VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, - "Disabling interrupt window exiting"); + VMX_CTR0(vcpu, "Disabling interrupt window exiting"); } static void __inline @@ -1377,8 +1373,7 @@ if ((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { vcpu->cap.proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); - VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, - "Enabling NMI window exiting"); + VMX_CTR0(vcpu, "Enabling NMI window exiting"); } } @@ -1390,7 +1385,7 @@ ("nmi_window_exiting not set %#x", vcpu->cap.proc_ctls)); vcpu->cap.proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); - VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, "Disabling NMI window exiting"); + VMX_CTR0(vcpu, "Disabling NMI window exiting"); } int @@ -1401,7 +1396,7 @@ if ((vcpu->cap.proc_ctls & PROCBASED_TSC_OFFSET) == 0) { vcpu->cap.proc_ctls |= PROCBASED_TSC_OFFSET; vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); - VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Enabling TSC offsetting"); + VMX_CTR0(vcpu, "Enabling TSC offsetting"); } error = vmwrite(VMCS_TSC_OFFSET, offset); @@ -1437,7 +1432,7 @@ info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID; vmcs_write(VMCS_ENTRY_INTR_INFO, info); - VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Injecting vNMI"); + VMX_CTR0(vcpu, "Injecting vNMI"); /* Clear the request */ vm_nmi_clear(vmx->vm, vcpu->vcpuid); @@ -1454,7 +1449,7 @@ if (vcpu->state.nextrip != guestrip) { gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); if (gi & HWINTR_BLOCKING) { - VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking " + VMX_CTR2(vcpu, "Guest interrupt blocking " "cleared due to rip change: %#lx/%#lx", vcpu->state.nextrip, guestrip); gi &= ~HWINTR_BLOCKING; @@ -1507,13 +1502,12 @@ vmx_inject_nmi(vmx, vcpu); need_nmi_exiting = 0; } else { - VCPU_CTR1(vmx->vm, vcpu->vcpuid, "Cannot " - "inject NMI due to VM-entry intr info %#x", - info); + VMX_CTR1(vcpu, "Cannot inject NMI " + "due to VM-entry intr info %#x", info); } } else { - VCPU_CTR1(vmx->vm, vcpu->vcpuid, "Cannot inject NMI " - "due to Guest Interruptibility-state %#x", gi); + VMX_CTR1(vcpu, "Cannot inject NMI due to " + "Guest Interruptibility-state %#x", gi); } if (need_nmi_exiting) @@ -1533,8 +1527,8 @@ * not needed for correctness. */ if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { - VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Skip interrupt injection " - "due to pending int_window_exiting"); + VMX_CTR0(vcpu, "Skip interrupt injection due to " + "pending int_window_exiting"); return; } @@ -1568,15 +1562,15 @@ /* Check RFLAGS.IF and the interruptibility state of the guest */ rflags = vmcs_read(VMCS_GUEST_RFLAGS); if ((rflags & PSL_I) == 0) { - VCPU_CTR2(vmx->vm, vcpu->vcpuid, "Cannot inject vector %d due " - "to rflags %#lx", vector, rflags); + VMX_CTR2(vcpu, "Cannot inject vector %d due to " + "rflags %#lx", vector, rflags); goto cantinject; } gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); if (gi & HWINTR_BLOCKING) { - VCPU_CTR2(vmx->vm, vcpu->vcpuid, "Cannot inject vector %d due " - "to Guest Interruptibility-state %#x", vector, gi); + VMX_CTR2(vcpu, "Cannot inject vector %d due to " + "Guest Interruptibility-state %#x", vector, gi); goto cantinject; } @@ -1589,8 +1583,8 @@ * - An exception was injected above. * - An NMI was injected above or after "NMI window exiting" */ - VCPU_CTR2(vmx->vm, vcpu->vcpuid, "Cannot inject vector %d due " - "to VM-entry intr info %#x", vector, info); + VMX_CTR2(vcpu, "Cannot inject vector %d due to " + "VM-entry intr info %#x", vector, info); goto cantinject; } @@ -1620,8 +1614,7 @@ vmx_set_int_window_exiting(vcpu); } - VCPU_CTR1(vmx->vm, vcpu->vcpuid, "Injecting hwintr at vector %d", - vector); + VMX_CTR1(vcpu, "Injecting hwintr at vector %d", vector); return; @@ -1647,7 +1640,7 @@ { uint32_t gi; - VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, "Restore Virtual-NMI blocking"); + VMX_CTR0(vcpu, "Restore Virtual-NMI blocking"); gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); @@ -1658,7 +1651,7 @@ { uint32_t gi; - VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, "Clear Virtual-NMI blocking"); + VMX_CTR0(vcpu, "Clear Virtual-NMI blocking"); gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); @@ -2395,7 +2388,7 @@ * as most VM-exit fields are not populated as usual. */ if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) { - VCPU_CTR0(vmx->vm, vcpuid, "Handling MCE during VM-entry"); + VMX_CTR0(vcpu, "Handling MCE during VM-entry"); __asm __volatile("int $18"); return (1); } @@ -2490,7 +2483,7 @@ } vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpuid, vmexit, ts); - VCPU_CTR4(vmx->vm, vcpuid, "task switch reason %d, tss 0x%04x, " + VMX_CTR4(vcpu, "task switch reason %d, tss 0x%04x, " "%s errcode 0x%016lx", ts->reason, ts->tsssel, ts->ext ? "external" : "internal", ((uint64_t)ts->errcode << 32) | ts->errcode_valid); @@ -2514,7 +2507,7 @@ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_RDMSR, 1); retu = false; ecx = vmxctx->guest_rcx; - VCPU_CTR1(vmx->vm, vcpuid, "rdmsr 0x%08x", ecx); + VMX_CTR1(vcpu, "rdmsr 0x%08x", ecx); SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpuid, vmexit, ecx); error = emulate_rdmsr(vmx, vcpu, ecx, &retu); if (error) { @@ -2534,7 +2527,7 @@ eax = vmxctx->guest_rax; ecx = vmxctx->guest_rcx; edx = vmxctx->guest_rdx; - VCPU_CTR2(vmx->vm, vcpuid, "wrmsr 0x%08x value 0x%016lx", + VMX_CTR2(vcpu, "wrmsr 0x%08x value 0x%016lx", ecx, (uint64_t)edx << 32 | eax); SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpuid, ecx, (uint64_t)edx << 32 | eax); @@ -2680,7 +2673,7 @@ * the machine check back into the guest. */ if (intr_vec == IDT_MC) { - VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to MCE handler"); + VMX_CTR0(vcpu, "Vectoring to MCE handler"); __asm __volatile("int $18"); return (1); } @@ -2718,7 +2711,7 @@ errcode_valid = 1; errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); } - VCPU_CTR2(vmx->vm, vcpuid, "Reflecting exception %d/%#x into " + VMX_CTR2(vcpu, "Reflecting exception %d/%#x into " "the guest", intr_vec, errcode); SDT_PROBE5(vmm, vmx, exit, exception, vmx, vcpuid, vmexit, intr_vec, errcode); @@ -2913,8 +2906,7 @@ if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " "to NMI has invalid vector: %#x", intr_info)); - VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, - "Vectoring to NMI handler"); + VMX_CTR0(vcpu, "Vectoring to NMI handler"); __asm __volatile("int $2"); } } @@ -3212,7 +3204,7 @@ handled, vmexit->exitcode); } - VCPU_CTR1(vm, vcpuid, "returning from vmx_run: exitcode %d", + VMX_CTR1(vcpu, "returning from vmx_run: exitcode %d", vmexit->exitcode); VMCLEAR(vmcs); @@ -3359,8 +3351,8 @@ error = vmcs_setreg(vmcs, running, ident, gi); } done: - VCPU_CTR2(vcpu->vmx->vm, vcpu->vcpuid, "Setting intr_shadow to %#lx %s", - val, error ? "failed" : "succeeded"); + VMX_CTR2(vcpu, "Setting intr_shadow to %#lx %s", val, + error ? "failed" : "succeeded"); return (error); }