Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F110744453
D37152.id112261.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
64 KB
Referenced Files
None
Subscribers
None
D37152.id112261.diff
View Options
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -167,29 +167,24 @@
typedef int (*vmm_cleanup_func_t)(void);
typedef void (*vmm_resume_func_t)(void);
typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap);
-typedef int (*vmi_run_func_t)(void *vmi, void *vcpui, register_t rip,
+typedef int (*vmi_run_func_t)(void *vcpui, register_t rip,
struct pmap *pmap, struct vm_eventinfo *info);
typedef void (*vmi_cleanup_func_t)(void *vmi);
typedef void * (*vmi_vcpu_init_func_t)(void *vmi, int vcpu_id);
-typedef void (*vmi_vcpu_cleanup_func_t)(void *vmi, void *vcpui);
-typedef int (*vmi_get_register_t)(void *vmi, void *vcpui, int num,
- uint64_t *retval);
-typedef int (*vmi_set_register_t)(void *vmi, void *vcpui, int num,
- uint64_t val);
-typedef int (*vmi_get_desc_t)(void *vmi, void *vcpui, int num,
- struct seg_desc *desc);
-typedef int (*vmi_set_desc_t)(void *vmi, void *vcpui, int num,
- struct seg_desc *desc);
-typedef int (*vmi_get_cap_t)(void *vmi, void *vcpui, int num, int *retval);
-typedef int (*vmi_set_cap_t)(void *vmi, void *vcpui, int num, int val);
+typedef void (*vmi_vcpu_cleanup_func_t)(void *vcpui);
+typedef int (*vmi_get_register_t)(void *vcpui, int num, uint64_t *retval);
+typedef int (*vmi_set_register_t)(void *vcpui, int num, uint64_t val);
+typedef int (*vmi_get_desc_t)(void *vcpui, int num, struct seg_desc *desc);
+typedef int (*vmi_set_desc_t)(void *vcpui, int num, struct seg_desc *desc);
+typedef int (*vmi_get_cap_t)(void *vcpui, int num, int *retval);
+typedef int (*vmi_set_cap_t)(void *vcpui, int num, int val);
typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
typedef void (*vmi_vmspace_free)(struct vmspace *vmspace);
-typedef struct vlapic * (*vmi_vlapic_init)(void *vmi, void *vcpui);
-typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic);
+typedef struct vlapic * (*vmi_vlapic_init)(void *vcpui);
+typedef void (*vmi_vlapic_cleanup)(struct vlapic *vlapic);
typedef int (*vmi_snapshot_t)(void *vmi, struct vm_snapshot_meta *meta);
-typedef int (*vmi_snapshot_vcpu_t)(void *vmi, struct vm_snapshot_meta *meta,
- void *vcpui);
-typedef int (*vmi_restore_tsc_t)(void *vmi, void *vcpui, uint64_t now);
+typedef int (*vmi_snapshot_vcpu_t)(void *vcpui, struct vm_snapshot_meta *meta);
+typedef int (*vmi_restore_tsc_t)(void *vcpui, uint64_t now);
struct vmm_ops {
vmm_init_func_t modinit; /* module wide initialization */
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -132,8 +132,8 @@
static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
-static int svm_getdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc);
-static int svm_setreg(void *arg, void *vcpui, int ident, uint64_t val);
+static int svm_getdesc(void *vcpui, int reg, struct seg_desc *desc);
+static int svm_setreg(void *vcpui, int ident, uint64_t val);
static __inline int
flush_by_asid(void)
@@ -292,7 +292,7 @@
ctrl->tsc_offset = offset;
svm_set_dirty(vcpu, VMCB_CACHE_I);
- VCPU_CTR1(sc->vm, vcpu, "tsc offset changed to %#lx", offset);
+ VCPU_CTR1(sc->vm, vcpu->vcpuid, "tsc offset changed to %#lx", offset);
error = vm_set_tsc_offset(sc->vm, vcpu->vcpuid, offset);
@@ -382,8 +382,7 @@
}
static __inline int
-svm_get_intercept(struct svm_softc *sc, struct svm_vcpu *vcpu, int idx,
- uint32_t bitmask)
+svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask)
{
struct vmcb_ctrl *ctrl;
@@ -394,8 +393,7 @@
}
static __inline void
-svm_set_intercept(struct svm_softc *sc, struct svm_vcpu *vcpu, int idx,
- uint32_t bitmask, int enabled)
+svm_set_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask, int enabled)
{
struct vmcb_ctrl *ctrl;
uint32_t oldval;
@@ -412,25 +410,23 @@
if (ctrl->intercept[idx] != oldval) {
svm_set_dirty(vcpu, VMCB_CACHE_I);
- VCPU_CTR3(sc->vm, vcpu->vcpuid, "intercept[%d] modified "
+ VCPU_CTR3(vcpu->sc->vm, vcpu->vcpuid, "intercept[%d] modified "
"from %#x to %#x", idx, oldval, ctrl->intercept[idx]);
}
}
static __inline void
-svm_disable_intercept(struct svm_softc *sc, struct svm_vcpu *vcpu, int off,
- uint32_t bitmask)
+svm_disable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask)
{
- svm_set_intercept(sc, vcpu, off, bitmask, 0);
+ svm_set_intercept(vcpu, off, bitmask, 0);
}
static __inline void
-svm_enable_intercept(struct svm_softc *sc, struct svm_vcpu *vcpu, int off,
- uint32_t bitmask)
+svm_enable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask)
{
- svm_set_intercept(sc, vcpu, off, bitmask, 1);
+ svm_set_intercept(vcpu, off, bitmask, 1);
}
static void
@@ -459,9 +455,9 @@
for (n = 0; n < 16; n++) {
mask = (BIT(n) << 16) | BIT(n);
if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8)
- svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
+ svm_disable_intercept(vcpu, VMCB_CR_INTCPT, mask);
else
- svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
+ svm_enable_intercept(vcpu, VMCB_CR_INTCPT, mask);
}
/*
@@ -476,41 +472,40 @@
if (n == 2 || n == 9) {
continue;
}
- svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n));
+ svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(n));
}
} else {
- svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
+ svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
}
/* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */
- svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
- VMCB_INTCPT_FERR_FREEZE);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
+ svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
+ svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
+ svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
+ svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
+ svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
+ svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
+ svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
+ svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
+ svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_FERR_FREEZE);
+ svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
+ svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
+ svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
+ svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
/*
* Intercept SVM instructions since AMD enables them in guests otherwise.
* Non-intercepted VMMCALL causes #UD, skip it.
*/
- svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
+ svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
+ svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
+ svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
+ svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
+ svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
+ svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
if (vcpu_trap_wbinvd(sc->vm, vcpu->vcpuid)) {
- svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT,
+ svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT,
VMCB_INTCPT_WBINVD);
}
@@ -518,7 +513,7 @@
* From section "Canonicalization and Consistency Checks" in APMv2
* the VMRUN intercept bit must be set to pass the consistency check.
*/
- svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
+ svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
/*
* The ASID will be set to a non-zero value just before VMRUN.
@@ -614,12 +609,13 @@
}
static void *
-svm_vcpu_init(void *arg, int vcpuid)
+svm_vcpu_init(void *vmi, int vcpuid)
{
- struct svm_softc *sc = arg;
+ struct svm_softc *sc = vmi;
struct svm_vcpu *vcpu;
vcpu = malloc(sizeof(*vcpu), M_SVM, M_WAITOK | M_ZERO);
+ vcpu->sc = sc;
vcpu->vcpuid = vcpuid;
vcpu->vmcb = malloc(sizeof(struct vmcb), M_SVM, M_WAITOK | M_ZERO);
vcpu->nextrip = ~0;
@@ -723,8 +719,8 @@
}
static void
-svm_inout_str_seginfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
- int64_t info1, int in, struct vm_inout_str *vis)
+svm_inout_str_seginfo(struct svm_vcpu *vcpu, int64_t info1, int in,
+ struct vm_inout_str *vis)
{
int error __diagused, s;
@@ -736,7 +732,7 @@
vis->seg_name = vm_segment_name(s);
}
- error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc);
+ error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc);
KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error));
}
@@ -777,8 +773,7 @@
* Handle guest I/O intercept.
*/
static int
-svm_handle_io(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
- struct vm_exit *vmexit)
+svm_handle_io(struct svm_vcpu *vcpu, struct vm_exit *vmexit)
{
struct vmcb_ctrl *ctrl;
struct vmcb_state *state;
@@ -821,8 +816,7 @@
vis->index = svm_inout_str_index(regs, vmexit->u.inout.in);
vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep);
vis->addrsize = svm_inout_str_addrsize(info1);
- svm_inout_str_seginfo(svm_sc, vcpu, info1,
- vmexit->u.inout.in, vis);
+ svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis);
}
return (UNHANDLED);
@@ -936,8 +930,8 @@
* Inject an event to vcpu as described in section 15.20, "Event injection".
*/
static void
-svm_eventinject(struct svm_softc *sc, struct svm_vcpu *vcpu, int intr_type,
- int vector, uint32_t error, bool ec_valid)
+svm_eventinject(struct svm_vcpu *vcpu, int intr_type, int vector,
+ uint32_t error, bool ec_valid)
{
struct vmcb_ctrl *ctrl;
@@ -966,23 +960,24 @@
if (ec_valid) {
ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID;
ctrl->eventinj |= (uint64_t)error << 32;
- VCPU_CTR3(sc->vm, vcpu->vcpuid,
+ VCPU_CTR3(vcpu->sc->vm, vcpu->vcpuid,
"Injecting %s at vector %d errcode %#x",
intrtype_to_str(intr_type), vector, error);
} else {
- VCPU_CTR2(sc->vm, vcpu->vcpuid, "Injecting %s at vector %d",
+ VCPU_CTR2(vcpu->sc->vm, vcpu->vcpuid,
+ "Injecting %s at vector %d",
intrtype_to_str(intr_type), vector);
}
}
static void
-svm_update_virqinfo(struct svm_softc *sc, struct svm_vcpu *vcpu)
+svm_update_virqinfo(struct svm_vcpu *vcpu)
{
struct vm *vm;
struct vlapic *vlapic;
struct vmcb_ctrl *ctrl;
- vm = sc->vm;
+ vm = vcpu->sc->vm;
vlapic = vm_lapic(vm, vcpu->vcpuid);
ctrl = svm_get_vmcb_ctrl(vcpu);
@@ -1021,16 +1016,15 @@
#ifdef INVARIANTS
static __inline int
-vintr_intercept_enabled(struct svm_softc *sc, struct svm_vcpu *vcpu)
+vintr_intercept_enabled(struct svm_vcpu *vcpu)
{
- return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
- VMCB_INTCPT_VINTR));
+ return (svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR));
}
#endif
static __inline void
-enable_intr_window_exiting(struct svm_softc *sc, struct svm_vcpu *vcpu)
+enable_intr_window_exiting(struct svm_vcpu *vcpu)
{
struct vmcb_ctrl *ctrl;
@@ -1038,42 +1032,41 @@
if (ctrl->v_irq && ctrl->v_intr_vector == 0) {
KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
- KASSERT(vintr_intercept_enabled(sc, vcpu),
+ KASSERT(vintr_intercept_enabled(vcpu),
("%s: vintr intercept should be enabled", __func__));
return;
}
- VCPU_CTR0(sc->vm, vcpu->vcpuid, "Enable intr window exiting");
+ VCPU_CTR0(vcpu->sc->vm, vcpu->vcpuid, "Enable intr window exiting");
ctrl->v_irq = 1;
ctrl->v_ign_tpr = 1;
ctrl->v_intr_vector = 0;
svm_set_dirty(vcpu, VMCB_CACHE_TPR);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
+ svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
}
static __inline void
-disable_intr_window_exiting(struct svm_softc *sc, struct svm_vcpu *vcpu)
+disable_intr_window_exiting(struct svm_vcpu *vcpu)
{
struct vmcb_ctrl *ctrl;
ctrl = svm_get_vmcb_ctrl(vcpu);
if (!ctrl->v_irq && ctrl->v_intr_vector == 0) {
- KASSERT(!vintr_intercept_enabled(sc, vcpu),
+ KASSERT(!vintr_intercept_enabled(vcpu),
("%s: vintr intercept should be disabled", __func__));
return;
}
- VCPU_CTR0(sc->vm, vcpu->vcpuid, "Disable intr window exiting");
+ VCPU_CTR0(vcpu->sc->vm, vcpu->vcpuid, "Disable intr window exiting");
ctrl->v_irq = 0;
ctrl->v_intr_vector = 0;
svm_set_dirty(vcpu, VMCB_CACHE_TPR);
- svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
+ svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
}
static int
-svm_modify_intr_shadow(struct svm_softc *sc, struct svm_vcpu *vcpu,
- uint64_t val)
+svm_modify_intr_shadow(struct svm_vcpu *vcpu, uint64_t val)
{
struct vmcb_ctrl *ctrl;
int oldval, newval;
@@ -1083,13 +1076,14 @@
newval = val ? 1 : 0;
if (newval != oldval) {
ctrl->intr_shadow = newval;
- VCPU_CTR1(sc->vm, vcpu->vcpuid, "Setting intr_shadow to %d", newval);
+ VCPU_CTR1(vcpu->sc->vm, vcpu->vcpuid,
+ "Setting intr_shadow to %d", newval);
}
return (0);
}
static int
-svm_get_intr_shadow(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t *val)
+svm_get_intr_shadow(struct svm_vcpu *vcpu, uint64_t *val)
{
struct vmcb_ctrl *ctrl;
@@ -1104,31 +1098,30 @@
* to track when the vcpu is done handling the NMI.
*/
static int
-nmi_blocked(struct svm_softc *sc, struct svm_vcpu *vcpu)
+nmi_blocked(struct svm_vcpu *vcpu)
{
int blocked;
- blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
- VMCB_INTCPT_IRET);
+ blocked = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
return (blocked);
}
static void
-enable_nmi_blocking(struct svm_softc *sc, struct svm_vcpu *vcpu)
+enable_nmi_blocking(struct svm_vcpu *vcpu)
{
- KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked"));
- VCPU_CTR0(sc->vm, vcpu->vcpuid, "vNMI blocking enabled");
- svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
+ KASSERT(!nmi_blocked(vcpu), ("vNMI already blocked"));
+ VCPU_CTR0(vcpu->sc->vm, vcpu->vcpuid, "vNMI blocking enabled");
+ svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
}
static void
-clear_nmi_blocking(struct svm_softc *sc, struct svm_vcpu *vcpu)
+clear_nmi_blocking(struct svm_vcpu *vcpu)
{
int error __diagused;
- KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked"));
- VCPU_CTR0(sc->vm, vcpu->vcpuid, "vNMI blocking cleared");
+ KASSERT(nmi_blocked(vcpu), ("vNMI already unblocked"));
+ VCPU_CTR0(vcpu->sc->vm, vcpu->vcpuid, "vNMI blocking cleared");
/*
* When the IRET intercept is cleared the vcpu will attempt to execute
* the "iret" when it runs next. However, it is possible to inject
@@ -1140,13 +1133,13 @@
*
* XXX this needs to be fixed
*/
- svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
+ svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
/*
* Set 'intr_shadow' to prevent an NMI from being injected on the
* immediate VMRUN.
*/
- error = svm_modify_intr_shadow(sc, vcpu, 1);
+ error = svm_modify_intr_shadow(vcpu, 1);
KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error));
}
@@ -1214,7 +1207,7 @@
goto gpf;
}
- error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval);
+ error = svm_setreg(vcpu, VM_REG_GUEST_EFER, newval);
KASSERT(error == 0, ("%s: error %d updating efer", __func__, error));
return (0);
gpf:
@@ -1382,7 +1375,7 @@
("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)",
vmexit->inst_length, code, info1, info2));
- svm_update_virqinfo(svm_sc, vcpu);
+ svm_update_virqinfo(vcpu);
svm_save_intinfo(svm_sc, vcpu);
switch (code) {
@@ -1391,7 +1384,7 @@
* Restart execution at "iret" but with the intercept cleared.
*/
vmexit->inst_length = 0;
- clear_nmi_blocking(svm_sc, vcpu);
+ clear_nmi_blocking(vcpu);
handled = 1;
break;
case VMCB_EXIT_VINTR: /* interrupt window exiting */
@@ -1420,8 +1413,7 @@
__asm __volatile("int $18");
break;
case IDT_PF:
- error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
- info2);
+ error = svm_setreg(vcpu, VM_REG_GUEST_CR2, info2);
KASSERT(error == 0, ("%s: error %d updating cr2",
__func__, error));
/* fallthru */
@@ -1511,7 +1503,7 @@
}
break;
case VMCB_EXIT_IO:
- handled = svm_handle_io(svm_sc, vcpu, vmexit);
+ handled = svm_handle_io(vcpu, vmexit);
vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_INOUT, 1);
break;
case VMCB_EXIT_CPUID:
@@ -1616,7 +1608,7 @@
KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not "
"valid: %#lx", __func__, intinfo));
- svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo),
+ svm_eventinject(vcpu, VMCB_EXITINTINFO_TYPE(intinfo),
VMCB_EXITINTINFO_VECTOR(intinfo),
VMCB_EXITINTINFO_EC(intinfo),
VMCB_EXITINTINFO_EC_VALID(intinfo));
@@ -1663,7 +1655,7 @@
/* NMI event has priority over interrupts. */
if (vm_nmi_pending(sc->vm, vcpuid)) {
- if (nmi_blocked(sc, vcpu)) {
+ if (nmi_blocked(vcpu)) {
/*
* Can't inject another NMI if the guest has not
* yet executed an "iret" after the last NMI.
@@ -1701,11 +1693,11 @@
vm_nmi_clear(sc->vm, vcpuid);
/* Inject NMI, vector number is not used */
- svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI,
+ svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI,
IDT_NMI, 0, false);
/* virtual NMI blocking is now in effect */
- enable_nmi_blocking(sc, vcpu);
+ enable_nmi_blocking(vcpu);
VCPU_CTR0(sc->vm, vcpuid, "Injecting vNMI");
}
@@ -1749,7 +1741,7 @@
goto done;
}
- svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
+ svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
if (!extint_pending) {
vlapic_intr_accepted(vlapic, vector);
@@ -1801,9 +1793,9 @@
("Bogus intr_window_exiting: eventinj (%#lx), "
"intr_shadow (%u), rflags (%#lx)",
ctrl->eventinj, ctrl->intr_shadow, state->rflags));
- enable_intr_window_exiting(sc, vcpu);
+ enable_intr_window_exiting(vcpu);
} else {
- disable_intr_window_exiting(sc, vcpu);
+ disable_intr_window_exiting(vcpu);
}
}
@@ -1825,7 +1817,7 @@
}
static void
-svm_pmap_activate(struct svm_softc *sc, struct svm_vcpu *vcpu, pmap_t pmap)
+svm_pmap_activate(struct svm_vcpu *vcpu, pmap_t pmap)
{
struct vmcb_ctrl *ctrl;
long eptgen;
@@ -2006,8 +1998,7 @@
* Start vcpu with specified RIP.
*/
static int
-svm_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
- struct vm_eventinfo *evinfo)
+svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
{
struct svm_regctx *gctx;
struct svm_softc *svm_sc;
@@ -2021,11 +2012,10 @@
int handled, vcpuid;
uint16_t ldt_sel;
- svm_sc = arg;
+ vcpu = vcpui;
+ vcpuid = vcpu->vcpuid;
+ svm_sc = vcpu->sc;
vm = svm_sc->vm;
-
- vcpu = vcpui;
- vcpuid = vcpu->vcpuid;
state = svm_get_vmcb_state(vcpu);
ctrl = svm_get_vmcb_ctrl(vcpu);
vmexit = vm_exitinfo(vm, vcpuid);
@@ -2120,7 +2110,7 @@
* Check the pmap generation and the ASID generation to
* ensure that the vcpu does not use stale TLB mappings.
*/
- svm_pmap_activate(svm_sc, vcpu, pmap);
+ svm_pmap_activate(vcpu, pmap);
ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty;
vcpu->dirty = 0;
@@ -2160,7 +2150,7 @@
}
static void
-svm_vcpu_cleanup(void *arg, void *vcpui)
+svm_vcpu_cleanup(void *vcpui)
{
struct svm_vcpu *vcpu = vcpui;
@@ -2169,9 +2159,9 @@
}
static void
-svm_cleanup(void *arg)
+svm_cleanup(void *vmi)
{
- struct svm_softc *sc = arg;
+ struct svm_softc *sc = vmi;
contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM);
contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM);
@@ -2225,20 +2215,18 @@
}
static int
-svm_getreg(void *arg, void *vcpui, int ident, uint64_t *val)
+svm_getreg(void *vcpui, int ident, uint64_t *val)
{
- struct svm_softc *svm_sc;
struct svm_vcpu *vcpu;
register_t *reg;
- svm_sc = arg;
vcpu = vcpui;
if (ident == VM_REG_GUEST_INTR_SHADOW) {
- return (svm_get_intr_shadow(svm_sc, vcpu, val));
+ return (svm_get_intr_shadow(vcpu, val));
}
- if (vmcb_read(svm_sc, vcpu, ident, val) == 0) {
+ if (vmcb_read(vcpu, ident, val) == 0) {
return (0);
}
@@ -2249,28 +2237,26 @@
return (0);
}
- VCPU_CTR1(svm_sc->vm, vcpu->vcpuid, "svm_getreg: unknown register %#x",
- ident);
+ VCPU_CTR1(vcpu->sc->vm, vcpu->vcpuid,
+ "svm_getreg: unknown register %#x", ident);
return (EINVAL);
}
static int
-svm_setreg(void *arg, void *vcpui, int ident, uint64_t val)
+svm_setreg(void *vcpui, int ident, uint64_t val)
{
- struct svm_softc *svm_sc;
struct svm_vcpu *vcpu;
register_t *reg;
- svm_sc = arg;
vcpu = vcpui;
if (ident == VM_REG_GUEST_INTR_SHADOW) {
- return (svm_modify_intr_shadow(svm_sc, vcpu, val));
+ return (svm_modify_intr_shadow(vcpu, val));
}
/* Do not permit user write access to VMCB fields by offset. */
if (!VMCB_ACCESS_OK(ident)) {
- if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
+ if (vmcb_write(vcpu, ident, val) == 0) {
return (0);
}
}
@@ -2293,33 +2279,32 @@
* whether 'running' is true/false.
*/
- VCPU_CTR1(svm_sc->vm, vcpu->vcpuid, "svm_setreg: unknown register %#x",
- ident);
+ VCPU_CTR1(vcpu->sc->vm, vcpu->vcpuid,
+ "svm_setreg: unknown register %#x", ident);
return (EINVAL);
}
static int
-svm_getdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc)
+svm_getdesc(void *vcpui, int reg, struct seg_desc *desc)
{
- return (vmcb_getdesc(arg, vcpui, reg, desc));
+ return (vmcb_getdesc(vcpui, reg, desc));
}
static int
-svm_setdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc)
+svm_setdesc(void *vcpui, int reg, struct seg_desc *desc)
{
- return (vmcb_setdesc(arg, vcpui, reg, desc));
+ return (vmcb_setdesc(vcpui, reg, desc));
}
#ifdef BHYVE_SNAPSHOT
static int
-svm_snapshot_reg(void *arg, void *vcpui, int ident,
- struct vm_snapshot_meta *meta)
+svm_snapshot_reg(void *vcpui, int ident, struct vm_snapshot_meta *meta)
{
int ret;
uint64_t val;
if (meta->op == VM_SNAPSHOT_SAVE) {
- ret = svm_getreg(arg, vcpui, ident, &val);
+ ret = svm_getreg(vcpui, ident, &val);
if (ret != 0)
goto done;
@@ -2327,7 +2312,7 @@
} else if (meta->op == VM_SNAPSHOT_RESTORE) {
SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
- ret = svm_setreg(arg, vcpui, ident, val);
+ ret = svm_setreg(vcpui, ident, val);
if (ret != 0)
goto done;
} else {
@@ -2341,23 +2326,22 @@
#endif
static int
-svm_setcap(void *arg, void *vcpui, int type, int val)
+svm_setcap(void *vcpui, int type, int val)
{
- struct svm_softc *sc;
struct svm_vcpu *vcpu;
struct vlapic *vlapic;
int error;
- sc = arg;
vcpu = vcpui;
error = 0;
+
switch (type) {
case VM_CAP_HALT_EXIT:
- svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
+ svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
VMCB_INTCPT_HLT, val);
break;
case VM_CAP_PAUSE_EXIT:
- svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
+ svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
VMCB_INTCPT_PAUSE, val);
break;
case VM_CAP_UNRESTRICTED_GUEST:
@@ -2366,7 +2350,7 @@
error = EINVAL;
break;
case VM_CAP_IPI_EXIT:
- vlapic = vm_lapic(sc->vm, vcpu->vcpuid);
+ vlapic = vm_lapic(vcpu->sc->vm, vcpu->vcpuid);
vlapic->ipi_exit = val;
break;
default:
@@ -2377,31 +2361,29 @@
}
static int
-svm_getcap(void *arg, void *vcpui, int type, int *retval)
+svm_getcap(void *vcpui, int type, int *retval)
{
- struct svm_softc *sc;
struct svm_vcpu *vcpu;
struct vlapic *vlapic;
int error;
- sc = arg;
vcpu = vcpui;
error = 0;
switch (type) {
case VM_CAP_HALT_EXIT:
- *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
+ *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT,
VMCB_INTCPT_HLT);
break;
case VM_CAP_PAUSE_EXIT:
- *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
+ *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT,
VMCB_INTCPT_PAUSE);
break;
case VM_CAP_UNRESTRICTED_GUEST:
*retval = 1; /* unrestricted guest is always enabled */
break;
case VM_CAP_IPI_EXIT:
- vlapic = vm_lapic(sc->vm, vcpu->vcpuid);
+ vlapic = vm_lapic(vcpu->sc->vm, vcpu->vcpuid);
*retval = vlapic->ipi_exit;
break;
default:
@@ -2424,16 +2406,14 @@
}
static struct vlapic *
-svm_vlapic_init(void *arg, void *vcpui)
+svm_vlapic_init(void *vcpui)
{
- struct svm_softc *svm_sc;
struct svm_vcpu *vcpu;
struct vlapic *vlapic;
- svm_sc = arg;
vcpu = vcpui;
vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO);
- vlapic->vm = svm_sc->vm;
+ vlapic->vm = vcpu->sc->vm;
vlapic->vcpuid = vcpu->vcpuid;
vlapic->apic_page = malloc(PAGE_SIZE, M_SVM_VLAPIC, M_WAITOK | M_ZERO);
@@ -2443,7 +2423,7 @@
}
static void
-svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
+svm_vlapic_cleanup(struct vlapic *vlapic)
{
vlapic_cleanup(vlapic);
@@ -2453,7 +2433,7 @@
#ifdef BHYVE_SNAPSHOT
static int
-svm_snapshot(void *arg, struct vm_snapshot_meta *meta)
+svm_snapshot(void *vmi, struct vm_snapshot_meta *meta)
{
if (meta->op == VM_SNAPSHOT_RESTORE)
flush_by_asid();
@@ -2462,163 +2442,159 @@
}
static int
-svm_vcpu_snapshot(void *arg, struct vm_snapshot_meta *meta, void *vcpui)
+svm_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta)
{
- struct svm_softc *sc;
struct svm_vcpu *vcpu;
int err, running, hostcpu;
- sc = (struct svm_softc *)arg;
vcpu = vcpui;
err = 0;
- KASSERT(arg != NULL, ("%s: arg was NULL", __func__));
-
- running = vcpu_is_running(sc->vm, vcpu->vcpuid, &hostcpu);
+ running = vcpu_is_running(vcpu->sc->vm, vcpu->vcpuid, &hostcpu);
if (running && hostcpu != curcpu) {
- printf("%s: %s%d is running", __func__, vm_name(sc->vm),
+ printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm),
vcpu->vcpuid);
return (EINVAL);
}
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR0, meta);
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR2, meta);
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR3, meta);
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR4, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR0, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR2, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR3, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR4, meta);
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DR6, meta);
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DR7, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR6, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR7, meta);
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RAX, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RAX, meta);
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RSP, meta);
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RIP, meta);
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RFLAGS, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RSP, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RIP, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RFLAGS, meta);
/* Guest segments */
/* ES */
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_ES, meta);
- err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_ES, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_ES, meta);
+ err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_ES, meta);
/* CS */
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CS, meta);
- err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_CS, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CS, meta);
+ err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_CS, meta);
/* SS */
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_SS, meta);
- err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_SS, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_SS, meta);
+ err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_SS, meta);
/* DS */
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DS, meta);
- err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_DS, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DS, meta);
+ err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_DS, meta);
/* FS */
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_FS, meta);
- err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_FS, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_FS, meta);
+ err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_FS, meta);
/* GS */
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_GS, meta);
- err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_GS, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_GS, meta);
+ err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GS, meta);
/* TR */
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_TR, meta);
- err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_TR, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_TR, meta);
+ err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_TR, meta);
/* LDTR */
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_LDTR, meta);
- err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_LDTR, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_LDTR, meta);
+ err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_LDTR, meta);
/* EFER */
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_EFER, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_EFER, meta);
/* IDTR and GDTR */
- err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_IDTR, meta);
- err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_GDTR, meta);
+ err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_IDTR, meta);
+ err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GDTR, meta);
/* Specific AMD registers */
- err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_INTR_SHADOW, meta);
+ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_INTR_SHADOW, meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_PAUSE_FILTHRESH, 2), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_PAUSE_FILCNT, 2), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_ASID, 4), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_NP_ENABLE, 1), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_CPL, 1), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_STAR, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_LSTAR, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_CSTAR, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_SFMASK, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_KERNELGBASE, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_DBGCTL, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_BR_FROM, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_BR_TO, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_INT_FROM, 8), meta);
- err += vmcb_snapshot_any(sc, vcpu,
+ err += vmcb_snapshot_any(vcpu,
VMCB_ACCESS(VMCB_OFF_INT_TO, 8), meta);
if (err != 0)
goto done;
@@ -2676,11 +2652,12 @@
}
static int
-svm_restore_tsc(void *arg, void *vcpui, uint64_t offset)
+svm_restore_tsc(void *vcpui, uint64_t offset)
{
+ struct svm_vcpu *vcpu = vcpui;
int err;
- err = svm_set_tsc_offset(arg, vcpui, offset);
+ err = svm_set_tsc_offset(vcpu->sc, vcpu, offset);
return (err);
}
diff --git a/sys/amd64/vmm/amd/svm_softc.h b/sys/amd64/vmm/amd/svm_softc.h
--- a/sys/amd64/vmm/amd/svm_softc.h
+++ b/sys/amd64/vmm/amd/svm_softc.h
@@ -36,12 +36,15 @@
#define SVM_IO_BITMAP_SIZE (3 * PAGE_SIZE)
#define SVM_MSR_BITMAP_SIZE (2 * PAGE_SIZE)
+struct svm_softc;
+
struct asid {
uint64_t gen; /* range is [1, ~0UL] */
uint32_t num; /* range is [1, nasid - 1] */
};
struct svm_vcpu {
+ struct svm_softc *sc;
struct vmcb *vmcb; /* hardware saved vcpu context */
struct svm_regctx swctx; /* software saved vcpu context */
uint64_t vmcb_pa; /* VMCB physical address */
diff --git a/sys/amd64/vmm/amd/vmcb.h b/sys/amd64/vmm/amd/vmcb.h
--- a/sys/amd64/vmm/amd/vmcb.h
+++ b/sys/amd64/vmm/amd/vmcb.h
@@ -354,23 +354,17 @@
CTASSERT(sizeof(struct vmcb) == PAGE_SIZE);
CTASSERT(offsetof(struct vmcb, state) == 0x400);
-int vmcb_read(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
- uint64_t *retval);
-int vmcb_write(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
- uint64_t val);
-int vmcb_setdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
- struct seg_desc *desc);
-int vmcb_getdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
- struct seg_desc *desc);
+int vmcb_read(struct svm_vcpu *vcpu, int ident, uint64_t *retval);
+int vmcb_write(struct svm_vcpu *vcpu, int ident, uint64_t val);
+int vmcb_setdesc(struct svm_vcpu *vcpu, int ident, struct seg_desc *desc);
+int vmcb_getdesc(struct svm_vcpu *vcpu, int ident, struct seg_desc *desc);
int vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg);
#ifdef BHYVE_SNAPSHOT
-int vmcb_getany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
- uint64_t *val);
-int vmcb_setany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
- uint64_t val);
-int vmcb_snapshot_desc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
+int vmcb_getany(struct svm_vcpu *vcpu, int ident, uint64_t *val);
+int vmcb_setany(struct svm_vcpu *vcpu, int ident, uint64_t val);
+int vmcb_snapshot_desc(struct svm_vcpu *vcpu, int reg,
struct vm_snapshot_meta *meta);
-int vmcb_snapshot_any(struct svm_softc *sc, struct svm_vcpu*vcpu, int ident,
+int vmcb_snapshot_any(struct svm_vcpu*vcpu, int ident,
struct vm_snapshot_meta *meta);
#endif
diff --git a/sys/amd64/vmm/amd/vmcb.c b/sys/amd64/vmm/amd/vmcb.c
--- a/sys/amd64/vmm/amd/vmcb.c
+++ b/sys/amd64/vmm/amd/vmcb.c
@@ -116,8 +116,7 @@
}
static int
-vmcb_access(struct svm_softc *softc, struct svm_vcpu *vcpu, int write,
- int ident, uint64_t *val)
+vmcb_access(struct svm_vcpu *vcpu, int write, int ident, uint64_t *val)
{
struct vmcb *vmcb;
int off, bytes;
@@ -146,7 +145,7 @@
memcpy(val, ptr + off, bytes);
break;
default:
- VCPU_CTR1(softc->vm, vcpu->vcpuid,
+ VCPU_CTR1(vcpu->sc->vm, vcpu->vcpuid,
"Invalid size %d for VMCB access: %d", bytes);
return (EINVAL);
}
@@ -162,8 +161,7 @@
* Read from segment selector, control and general purpose register of VMCB.
*/
int
-vmcb_read(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
- uint64_t *retval)
+vmcb_read(struct svm_vcpu *vcpu, int ident, uint64_t *retval)
{
struct vmcb *vmcb;
struct vmcb_state *state;
@@ -175,7 +173,7 @@
err = 0;
if (VMCB_ACCESS_OK(ident))
- return (vmcb_access(sc, vcpu, 0, ident, retval));
+ return (vmcb_access(vcpu, 0, ident, retval));
switch (ident) {
case VM_REG_GUEST_CR0:
@@ -253,7 +251,7 @@
* Write to segment selector, control and general purpose register of VMCB.
*/
int
-vmcb_write(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident, uint64_t val)
+vmcb_write(struct svm_vcpu *vcpu, int ident, uint64_t val)
{
struct vmcb *vmcb;
struct vmcb_state *state;
@@ -266,7 +264,7 @@
err = 0;
if (VMCB_ACCESS_OK(ident))
- return (vmcb_access(sc, vcpu, 1, ident, &val));
+ return (vmcb_access(vcpu, 1, ident, &val));
switch (ident) {
case VM_REG_GUEST_CR0:
@@ -366,8 +364,7 @@
}
int
-vmcb_setdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
- struct seg_desc *desc)
+vmcb_setdesc(struct svm_vcpu *vcpu, int reg, struct seg_desc *desc)
{
struct vmcb *vmcb;
struct vmcb_segment *seg;
@@ -395,8 +392,9 @@
seg->attrib = attrib;
}
- VCPU_CTR4(sc->vm, vcpu->vcpuid, "Setting desc %d: base (%#lx), limit (%#x), "
- "attrib (%#x)", reg, seg->base, seg->limit, seg->attrib);
+ VCPU_CTR4(vcpu->sc->vm, vcpu->vcpuid, "Setting desc %d: base (%#lx), "
+ "limit (%#x), attrib (%#x)", reg, seg->base, seg->limit,
+ seg->attrib);
switch (reg) {
case VM_REG_GUEST_CS:
@@ -417,8 +415,7 @@
}
int
-vmcb_getdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
- struct seg_desc *desc)
+vmcb_getdesc(struct svm_vcpu *vcpu, int reg, struct seg_desc *desc)
{
struct vmcb *vmcb;
struct vmcb_segment *seg;
@@ -458,8 +455,7 @@
#ifdef BHYVE_SNAPSHOT
int
-vmcb_getany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
- uint64_t *val)
+vmcb_getany(struct svm_vcpu *vcpu, int ident, uint64_t *val)
{
int error = 0;
@@ -468,15 +464,14 @@
goto err;
}
- error = vmcb_read(sc, vcpu, ident, val);
+ error = vmcb_read(vcpu, ident, val);
err:
return (error);
}
int
-vmcb_setany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
- uint64_t val)
+vmcb_setany(struct svm_vcpu *vcpu, int ident, uint64_t val)
{
int error = 0;
@@ -485,21 +480,21 @@
goto err;
}
- error = vmcb_write(sc, vcpu, ident, val);
+ error = vmcb_write(vcpu, ident, val);
err:
return (error);
}
int
-vmcb_snapshot_desc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
+vmcb_snapshot_desc(struct svm_vcpu *vcpu, int reg,
struct vm_snapshot_meta *meta)
{
int ret;
struct seg_desc desc;
if (meta->op == VM_SNAPSHOT_SAVE) {
- ret = vmcb_getdesc(sc, vcpu, reg, &desc);
+ ret = vmcb_getdesc(vcpu, reg, &desc);
if (ret != 0)
goto done;
@@ -511,7 +506,7 @@
SNAPSHOT_VAR_OR_LEAVE(desc.limit, meta, ret, done);
SNAPSHOT_VAR_OR_LEAVE(desc.access, meta, ret, done);
- ret = vmcb_setdesc(sc, vcpu, reg, &desc);
+ ret = vmcb_setdesc(vcpu, reg, &desc);
if (ret != 0)
goto done;
} else {
@@ -524,14 +519,14 @@
}
int
-vmcb_snapshot_any(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
- struct vm_snapshot_meta *meta)
+vmcb_snapshot_any(struct svm_vcpu *vcpu, int ident,
+ struct vm_snapshot_meta *meta)
{
int ret;
uint64_t val;
if (meta->op == VM_SNAPSHOT_SAVE) {
- ret = vmcb_getany(sc, vcpu, ident, &val);
+ ret = vmcb_getany(vcpu, ident, &val);
if (ret != 0)
goto done;
@@ -539,7 +534,7 @@
} else if (meta->op == VM_SNAPSHOT_RESTORE) {
SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
- ret = vmcb_setany(sc, vcpu, ident, val);
+ ret = vmcb_setany(vcpu, ident, val);
if (ret != 0)
goto done;
} else {
diff --git a/sys/amd64/vmm/intel/vmx.h b/sys/amd64/vmm/intel/vmx.h
--- a/sys/amd64/vmm/intel/vmx.h
+++ b/sys/amd64/vmm/intel/vmx.h
@@ -38,6 +38,7 @@
#include "x86.h"
struct pmap;
+struct vmx;
struct vmxctx {
register_t guest_rdi; /* Guest state */
@@ -126,6 +127,7 @@
};
struct vmx_vcpu {
+ struct vmx *vmx;
struct vmcs *vmcs;
struct apic_page *apic_page;
struct pir_desc *pir_desc;
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -308,12 +308,12 @@
*/
#define APIC_ACCESS_ADDRESS 0xFFFFF000
-static int vmx_getdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc);
-static int vmx_getreg(void *arg, void *vcpui, int reg, uint64_t *retval);
+static int vmx_getdesc(void *vcpui, int reg, struct seg_desc *desc);
+static int vmx_getreg(void *vcpui, int reg, uint64_t *retval);
static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
static void vmx_inject_pir(struct vlapic *vlapic);
#ifdef BHYVE_SNAPSHOT
-static int vmx_restore_tsc(void *arg, void *vcpui, uint64_t now);
+static int vmx_restore_tsc(void *vcpui, uint64_t now);
#endif
static inline bool
@@ -1107,15 +1107,16 @@
}
static void *
-vmx_vcpu_init(void *arg, int vcpuid)
+vmx_vcpu_init(void *vmi, int vcpuid)
{
- struct vmx *vmx = arg;
+ struct vmx *vmx = vmi;
struct vmcs *vmcs;
struct vmx_vcpu *vcpu;
uint32_t exc_bitmap;
int error;
vcpu = malloc(sizeof(*vcpu), M_VMX, M_WAITOK | M_ZERO);
+ vcpu->vmx = vmx;
vcpu->vcpuid = vcpuid;
vcpu->vmcs = malloc(sizeof(*vmcs), M_VMX, M_WAITOK | M_ZERO);
vcpu->apic_page = malloc(PAGE_SIZE, M_VMX, M_WAITOK | M_ZERO);
@@ -1230,30 +1231,31 @@
}
static __inline void
-vmx_run_trace(struct vmx *vmx, struct vmx_vcpu *vcpu)
+vmx_run_trace(struct vmx_vcpu *vcpu)
{
#ifdef KTR
- VCPU_CTR1(vmx->vm, vcpu->vcpuid, "Resume execution at %#lx",
+ VCPU_CTR1(vcpu->vmx->vm, vcpu->vcpuid, "Resume execution at %#lx",
vmcs_guest_rip());
#endif
}
static __inline void
-vmx_exit_trace(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t rip,
- uint32_t exit_reason, int handled)
+vmx_exit_trace(struct vmx_vcpu *vcpu, uint64_t rip, uint32_t exit_reason,
+ int handled)
{
#ifdef KTR
- VCPU_CTR3(vmx->vm, vcpu->vcpuid, "%s %s vmexit at 0x%0lx",
+ VCPU_CTR3(vcpu->vmx->vm, vcpu->vcpuid, "%s %s vmexit at 0x%0lx",
handled ? "handled" : "unhandled",
exit_reason_to_str(exit_reason), rip);
#endif
}
static __inline void
-vmx_astpending_trace(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t rip)
+vmx_astpending_trace(struct vmx_vcpu *vcpu, uint64_t rip)
{
#ifdef KTR
- VCPU_CTR1(vmx->vm, vcpu->vcpuid, "astpending vmexit at 0x%0lx", rip);
+ VCPU_CTR1(vcpu->vmx->vm, vcpu->vcpuid, "astpending vmexit at 0x%0lx",
+ rip);
#endif
}
@@ -1345,48 +1347,50 @@
CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
static void __inline
-vmx_set_int_window_exiting(struct vmx *vmx, struct vmx_vcpu *vcpu)
+vmx_set_int_window_exiting(struct vmx_vcpu *vcpu)
{
if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
vcpu->cap.proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
- VCPU_CTR0(vmx->vm, vcpu->vcpuid,
+ VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid,
"Enabling interrupt window exiting");
}
}
static void __inline
-vmx_clear_int_window_exiting(struct vmx *vmx, struct vmx_vcpu *vcpu)
+vmx_clear_int_window_exiting(struct vmx_vcpu *vcpu)
{
KASSERT((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
("intr_window_exiting not set: %#x", vcpu->cap.proc_ctls));
vcpu->cap.proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
- VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Disabling interrupt window exiting");
+ VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid,
+ "Disabling interrupt window exiting");
}
static void __inline
-vmx_set_nmi_window_exiting(struct vmx *vmx, struct vmx_vcpu *vcpu)
+vmx_set_nmi_window_exiting(struct vmx_vcpu *vcpu)
{
if ((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
vcpu->cap.proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
- VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Enabling NMI window exiting");
+ VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid,
+ "Enabling NMI window exiting");
}
}
static void __inline
-vmx_clear_nmi_window_exiting(struct vmx *vmx, struct vmx_vcpu *vcpu)
+vmx_clear_nmi_window_exiting(struct vmx_vcpu *vcpu)
{
KASSERT((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
("nmi_window_exiting not set %#x", vcpu->cap.proc_ctls));
vcpu->cap.proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
- VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Disabling NMI window exiting");
+ VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, "Disabling NMI window exiting");
}
int
@@ -1513,7 +1517,7 @@
}
if (need_nmi_exiting)
- vmx_set_nmi_window_exiting(vmx, vcpu);
+ vmx_set_nmi_window_exiting(vcpu);
}
extint_pending = vm_extint_pending(vmx->vm, vcpu->vcpuid);
@@ -1613,7 +1617,7 @@
* as soon as possible. This applies both for the software
* emulated vlapic and the hardware assisted virtual APIC.
*/
- vmx_set_int_window_exiting(vmx, vcpu);
+ vmx_set_int_window_exiting(vcpu);
}
VCPU_CTR1(vmx->vm, vcpu->vcpuid, "Injecting hwintr at vector %d",
@@ -1626,7 +1630,7 @@
* Set the Interrupt Window Exiting execution control so we can inject
* the interrupt as soon as blocking condition goes away.
*/
- vmx_set_int_window_exiting(vmx, vcpu);
+ vmx_set_int_window_exiting(vcpu);
}
/*
@@ -1639,29 +1643,29 @@
* hypervisor needs to restore virtual-NMI blocking before resuming the guest.
*/
static void
-vmx_restore_nmi_blocking(struct vmx *vmx, struct vmx_vcpu *vcpu)
+vmx_restore_nmi_blocking(struct vmx_vcpu *vcpu)
{
uint32_t gi;
- VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Restore Virtual-NMI blocking");
+ VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, "Restore Virtual-NMI blocking");
gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
}
static void
-vmx_clear_nmi_blocking(struct vmx *vmx, struct vmx_vcpu *vcpu)
+vmx_clear_nmi_blocking(struct vmx_vcpu *vcpu)
{
uint32_t gi;
- VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Clear Virtual-NMI blocking");
+ VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, "Clear Virtual-NMI blocking");
gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
}
static void
-vmx_assert_nmi_blocking(struct vmx *vmx, struct vmx_vcpu *vcpu)
+vmx_assert_nmi_blocking(struct vmx_vcpu *vcpu)
{
uint32_t gi __diagused;
@@ -1986,26 +1990,26 @@
}
static uint64_t
-inout_str_index(struct vmx *vmx, struct vmx_vcpu *vcpu, int in)
+inout_str_index(struct vmx_vcpu *vcpu, int in)
{
uint64_t val;
int error __diagused;
enum vm_reg_name reg;
reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
- error = vmx_getreg(vmx, vcpu, reg, &val);
+ error = vmx_getreg(vcpu, reg, &val);
KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
return (val);
}
static uint64_t
-inout_str_count(struct vmx *vmx, struct vmx_vcpu *vcpu, int rep)
+inout_str_count(struct vmx_vcpu *vcpu, int rep)
{
uint64_t val;
int error __diagused;
if (rep) {
- error = vmx_getreg(vmx, vcpu, VM_REG_GUEST_RCX, &val);
+ error = vmx_getreg(vcpu, VM_REG_GUEST_RCX, &val);
KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
} else {
val = 1;
@@ -2032,8 +2036,8 @@
}
static void
-inout_str_seginfo(struct vmx *vmx, struct vmx_vcpu *vcpu, uint32_t inst_info,
- int in, struct vm_inout_str *vis)
+inout_str_seginfo(struct vmx_vcpu *vcpu, uint32_t inst_info, int in,
+ struct vm_inout_str *vis)
{
int error __diagused, s;
@@ -2044,7 +2048,7 @@
vis->seg_name = vm_segment_name(s);
}
- error = vmx_getdesc(vmx, vcpu, vis->seg_name, &vis->seg_desc);
+ error = vmx_getdesc(vcpu, vis->seg_name, &vis->seg_desc);
KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
}
@@ -2430,9 +2434,9 @@
intr_type = idtvec_info & VMCS_INTR_T_MASK;
if (intr_type == VMCS_INTR_T_NMI) {
if (reason != EXIT_REASON_TASK_SWITCH)
- vmx_clear_nmi_blocking(vmx, vcpu);
+ vmx_clear_nmi_blocking(vcpu);
else
- vmx_assert_nmi_blocking(vmx, vcpu);
+ vmx_assert_nmi_blocking(vcpu);
}
/*
@@ -2573,7 +2577,7 @@
case EXIT_REASON_INTR_WINDOW:
vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_INTR_WINDOW, 1);
SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpuid, vmexit);
- vmx_clear_int_window_exiting(vmx, vcpu);
+ vmx_clear_int_window_exiting(vcpu);
return (1);
case EXIT_REASON_EXT_INTR:
/*
@@ -2611,7 +2615,7 @@
/* Exit to allow the pending virtual NMI to be injected */
if (vm_nmi_pending(vmx->vm, vcpuid))
vmx_inject_nmi(vmx, vcpu);
- vmx_clear_nmi_window_exiting(vmx, vcpu);
+ vmx_clear_nmi_window_exiting(vcpu);
vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_NMI_WINDOW, 1);
return (1);
case EXIT_REASON_INOUT:
@@ -2630,10 +2634,10 @@
vmx_paging_info(&vis->paging);
vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
- vis->index = inout_str_index(vmx, vcpu, in);
- vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
+ vis->index = inout_str_index(vcpu, in);
+ vis->count = inout_str_count(vcpu, vis->inout.rep);
vis->addrsize = inout_str_addrsize(inst_info);
- inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
+ inout_str_seginfo(vcpu, inst_info, in, vis);
}
SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpuid, vmexit);
break;
@@ -2663,7 +2667,7 @@
if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
(intr_vec != IDT_DF) &&
(intr_info & EXIT_QUAL_NMIUDTI) != 0)
- vmx_restore_nmi_blocking(vmx, vcpu);
+ vmx_restore_nmi_blocking(vcpu);
/*
* The NMI has already been handled in vmx_exit_handle_nmi().
@@ -2756,7 +2760,7 @@
*/
if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
(qual & EXIT_QUAL_NMIUDTI) != 0)
- vmx_restore_nmi_blocking(vmx, vcpu);
+ vmx_restore_nmi_blocking(vcpu);
break;
case EXIT_REASON_VIRTUALIZED_EOI:
vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
@@ -2893,8 +2897,7 @@
* clear NMI blocking.
*/
static __inline void
-vmx_exit_handle_nmi(struct vmx *vmx, struct vmx_vcpu *vcpu,
- struct vm_exit *vmexit)
+vmx_exit_handle_nmi(struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
{
uint32_t intr_info;
@@ -2910,7 +2913,8 @@
if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
"to NMI has invalid vector: %#x", intr_info));
- VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Vectoring to NMI handler");
+ VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid,
+ "Vectoring to NMI handler");
__asm __volatile("int $2");
}
}
@@ -3008,8 +3012,7 @@
}
static int
-vmx_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
- struct vm_eventinfo *evinfo)
+vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
{
int rc, handled, launched, vcpuid;
struct vmx *vmx;
@@ -3023,9 +3026,9 @@
struct region_descriptor gdtr, idtr;
uint16_t ldt_sel;
- vmx = arg;
+ vcpu = vcpui;
+ vmx = vcpu->vmx;
vm = vmx->vm;
- vcpu = vcpui;
vcpuid = vcpu->vcpuid;
vmcs = vcpu->vmcs;
vmxctx = &vcpu->ctx;
@@ -3104,7 +3107,7 @@
if (vcpu_should_yield(vm, vcpuid)) {
enable_intr();
vm_exit_astpending(vmx->vm, vcpuid, rip);
- vmx_astpending_trace(vmx, vcpu, rip);
+ vmx_astpending_trace(vcpu, rip);
handled = HANDLED;
break;
}
@@ -3166,7 +3169,7 @@
*/
vmx_pmap_activate(vmx, pmap);
- vmx_run_trace(vmx, vcpu);
+ vmx_run_trace(vcpu);
rc = vmx_enter_guest(vmxctx, vmx, launched);
vmx_pmap_deactivate(vmx, pmap);
@@ -3187,7 +3190,7 @@
vcpu->state.nextrip = rip;
if (rc == VMX_GUEST_VMEXIT) {
- vmx_exit_handle_nmi(vmx, vcpu, vmexit);
+ vmx_exit_handle_nmi(vcpu, vmexit);
enable_intr();
handled = vmx_exit_process(vmx, vcpu, vmexit);
} else {
@@ -3195,7 +3198,7 @@
vmx_exit_inst_error(vmxctx, rc, vmexit);
}
launched = 1;
- vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
+ vmx_exit_trace(vcpu, rip, exit_reason, handled);
rip = vmexit->rip;
} while (handled);
@@ -3219,7 +3222,7 @@
}
static void
-vmx_vcpu_cleanup(void *arg, void *vcpui)
+vmx_vcpu_cleanup(void *vcpui)
{
struct vmx_vcpu *vcpu = vcpui;
@@ -3231,9 +3234,9 @@
}
static void
-vmx_cleanup(void *arg)
+vmx_cleanup(void *vmi)
{
- struct vmx *vmx = arg;
+ struct vmx *vmx = vmi;
if (virtual_interrupt_delivery)
vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
@@ -3334,8 +3337,7 @@
}
static int
-vmx_modify_intr_shadow(struct vmx *vmx, struct vmx_vcpu *vcpu, int running,
- uint64_t val)
+vmx_modify_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t val)
{
struct vmcs *vmcs;
uint64_t gi;
@@ -3357,8 +3359,8 @@
error = vmcs_setreg(vmcs, running, ident, gi);
}
done:
- VCPU_CTR2(vmx->vm, vcpu->vcpuid, "Setting intr_shadow to %#lx %s", val,
- error ? "failed" : "succeeded");
+ VCPU_CTR2(vcpu->vmx->vm, vcpu->vcpuid, "Setting intr_shadow to %#lx %s",
+ val, error ? "failed" : "succeeded");
return (error);
}
@@ -3384,11 +3386,11 @@
}
static int
-vmx_getreg(void *arg, void *vcpui, int reg, uint64_t *retval)
+vmx_getreg(void *vcpui, int reg, uint64_t *retval)
{
int running, hostcpu;
- struct vmx *vmx = arg;
struct vmx_vcpu *vcpu = vcpui;
+ struct vmx *vmx = vcpu->vmx;
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
if (running && hostcpu != curcpu)
@@ -3405,13 +3407,13 @@
}
static int
-vmx_setreg(void *arg, void *vcpui, int reg, uint64_t val)
+vmx_setreg(void *vcpui, int reg, uint64_t val)
{
int error, hostcpu, running, shadow;
uint64_t ctls;
pmap_t pmap;
- struct vmx *vmx = arg;
struct vmx_vcpu *vcpu = vcpui;
+ struct vmx *vmx = vcpu->vmx;
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
if (running && hostcpu != curcpu)
@@ -3419,7 +3421,7 @@
vcpu->vcpuid);
if (reg == VM_REG_GUEST_INTR_SHADOW)
- return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
+ return (vmx_modify_intr_shadow(vcpu, running, val));
if (vmxctx_setreg(&vcpu->ctx, reg, val) == 0)
return (0);
@@ -3474,11 +3476,11 @@
}
static int
-vmx_getdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc)
+vmx_getdesc(void *vcpui, int reg, struct seg_desc *desc)
{
int hostcpu, running;
- struct vmx *vmx = arg;
struct vmx_vcpu *vcpu = vcpui;
+ struct vmx *vmx = vcpu->vmx;
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
if (running && hostcpu != curcpu)
@@ -3489,11 +3491,11 @@
}
static int
-vmx_setdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc)
+vmx_setdesc(void *vcpui, int reg, struct seg_desc *desc)
{
int hostcpu, running;
- struct vmx *vmx = arg;
struct vmx_vcpu *vcpu = vcpui;
+ struct vmx *vmx = vcpu->vmx;
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
if (running && hostcpu != curcpu)
@@ -3504,7 +3506,7 @@
}
static int
-vmx_getcap(void *arg, void *vcpui, int type, int *retval)
+vmx_getcap(void *vcpui, int type, int *retval)
{
struct vmx_vcpu *vcpu = vcpui;
int vcap;
@@ -3558,9 +3560,8 @@
}
static int
-vmx_setcap(void *arg, void *vcpui, int type, int val)
+vmx_setcap(void *vcpui, int type, int val)
{
- struct vmx *vmx = arg;
struct vmx_vcpu *vcpu = vcpui;
struct vmcs *vmcs = vcpu->vmcs;
struct vlapic *vlapic;
@@ -3645,7 +3646,7 @@
case VM_CAP_IPI_EXIT:
retval = 0;
- vlapic = vm_lapic(vmx->vm, vcpu->vcpuid);
+ vlapic = vm_lapic(vcpu->vmx->vm, vcpu->vcpuid);
vlapic->ipi_exit = val;
break;
default:
@@ -3699,7 +3700,6 @@
struct vlapic_vtx {
struct vlapic vlapic;
struct pir_desc *pir_desc;
- struct vmx *vmx;
struct vmx_vcpu *vcpu;
u_int pending_prio;
};
@@ -3929,8 +3929,8 @@
int error __diagused;
vlapic_vtx = (struct vlapic_vtx *)vlapic;
- vmx = vlapic_vtx->vmx;
vcpu = vlapic_vtx->vcpu;
+ vmx = vcpu->vmx;
vmcs = vcpu->vmcs;
proc_ctls2 = vcpu->cap.proc_ctls2;
@@ -4066,15 +4066,15 @@
}
static struct vlapic *
-vmx_vlapic_init(void *arg, void *vcpui)
+vmx_vlapic_init(void *vcpui)
{
struct vmx *vmx;
struct vmx_vcpu *vcpu;
struct vlapic *vlapic;
struct vlapic_vtx *vlapic_vtx;
- vmx = arg;
vcpu = vcpui;
+ vmx = vcpu->vmx;
vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
vlapic->vm = vmx->vm;
@@ -4083,7 +4083,6 @@
vlapic_vtx = (struct vlapic_vtx *)vlapic;
vlapic_vtx->pir_desc = vcpu->pir_desc;
- vlapic_vtx->vmx = vmx;
vlapic_vtx->vcpu = vcpu;
if (tpr_shadowing) {
@@ -4107,7 +4106,7 @@
}
static void
-vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
+vmx_vlapic_cleanup(struct vlapic *vlapic)
{
vlapic_cleanup(vlapic);
@@ -4116,13 +4115,13 @@
#ifdef BHYVE_SNAPSHOT
static int
-vmx_snapshot(void *arg, struct vm_snapshot_meta *meta)
+vmx_snapshot(void *vmi, struct vm_snapshot_meta *meta)
{
return (0);
}
static int
-vmx_vcpu_snapshot(void *arg, struct vm_snapshot_meta *meta, void *vcpui)
+vmx_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta)
{
struct vmcs *vmcs;
struct vmx *vmx;
@@ -4130,11 +4129,9 @@
struct vmxctx *vmxctx;
int err, run, hostcpu;
- vmx = (struct vmx *)arg;
- vcpu = vcpui;
err = 0;
-
- KASSERT(arg != NULL, ("%s: arg was NULL", __func__));
+ vcpu = vcpui;
+ vmx = vcpu->vmx;
vmcs = vcpu->vmcs;
run = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
@@ -4230,14 +4227,14 @@
}
static int
-vmx_restore_tsc(void *arg, void *vcpui, uint64_t offset)
+vmx_restore_tsc(void *vcpui, uint64_t offset)
{
- struct vmcs *vmcs;
- struct vmx *vmx = (struct vmx *)arg;
struct vmx_vcpu *vcpu = vcpui;
+ struct vmcs *vmcs;
+ struct vmx *vmx;
int error, running, hostcpu;
- KASSERT(arg != NULL, ("%s: arg was NULL", __func__));
+ vmx = vcpu->vmx;
vmcs = vcpu->vmcs;
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -209,32 +209,27 @@
DEFINE_VMMOPS_IFUNC(int, modcleanup, (void))
DEFINE_VMMOPS_IFUNC(void, modresume, (void))
DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
-DEFINE_VMMOPS_IFUNC(int, run, (void *vmi, void *vcpui, register_t rip,
- struct pmap *pmap, struct vm_eventinfo *info))
+DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t rip, struct pmap *pmap,
+ struct vm_eventinfo *info))
DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi))
DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, int vcpu_id))
-DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vmi, void *vcpui))
-DEFINE_VMMOPS_IFUNC(int, getreg, (void *vmi, void *vcpui, int num,
- uint64_t *retval))
-DEFINE_VMMOPS_IFUNC(int, setreg, (void *vmi, void *vcpui, int num,
- uint64_t val))
-DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vmi, void *vcpui, int num,
- struct seg_desc *desc))
-DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vmi, void *vcpui, int num,
- struct seg_desc *desc))
-DEFINE_VMMOPS_IFUNC(int, getcap, (void *vmi, void *vcpui, int num, int *retval))
-DEFINE_VMMOPS_IFUNC(int, setcap, (void *vmi, void *vcpui, int num, int val))
+DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui))
+DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval))
+DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val))
+DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vcpui, int num, struct seg_desc *desc))
+DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vcpui, int num, struct seg_desc *desc))
+DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval))
+DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val))
DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
vm_offset_t max))
DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace))
-DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vmi, void *vcpui))
-DEFINE_VMMOPS_IFUNC(void, vlapic_cleanup, (void *vmi, struct vlapic *vlapic))
+DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vcpui))
+DEFINE_VMMOPS_IFUNC(void, vlapic_cleanup, (struct vlapic *vlapic))
#ifdef BHYVE_SNAPSHOT
-DEFINE_VMMOPS_IFUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta
- *meta))
-DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vmi, struct vm_snapshot_meta
- *meta, void *vcpui))
-DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vmi, void *vcpui, uint64_t now))
+DEFINE_VMMOPS_IFUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta *meta))
+DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vcpui,
+ struct vm_snapshot_meta *meta))
+DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vcpui, uint64_t now))
#endif
#define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
@@ -307,8 +302,8 @@
{
struct vcpu *vcpu = &vm->vcpu[i];
- vmmops_vlapic_cleanup(vm->cookie, vcpu->vlapic);
- vmmops_vcpu_cleanup(vm->cookie, vcpu->cookie);
+ vmmops_vlapic_cleanup(vcpu->vlapic);
+ vmmops_vcpu_cleanup(vcpu->cookie);
vcpu->cookie = NULL;
if (destroy) {
vmm_stat_free(vcpu->stats);
@@ -338,7 +333,7 @@
}
vcpu->cookie = vmmops_vcpu_init(vm->cookie, vcpu_id);
- vcpu->vlapic = vmmops_vlapic_init(vm->cookie, vcpu->cookie);
+ vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie);
vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
vcpu->reqidle = 0;
vcpu->exitintinfo = 0;
@@ -1082,8 +1077,7 @@
if (reg >= VM_REG_LAST)
return (EINVAL);
- return (vmmops_getreg(vm->cookie, vcpu_cookie(vm, vcpu), reg,
- retval));
+ return (vmmops_getreg(vcpu_cookie(vm, vcpu), reg, retval));
}
int
@@ -1099,7 +1093,7 @@
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
- error = vmmops_setreg(vm->cookie, vcpu->cookie, reg, val);
+ error = vmmops_setreg(vcpu->cookie, reg, val);
if (error || reg != VM_REG_GUEST_RIP)
return (error);
@@ -1152,7 +1146,7 @@
if (!is_segment_register(reg) && !is_descriptor_table(reg))
return (EINVAL);
- return (vmmops_getdesc(vm->cookie, vcpu_cookie(vm, vcpu), reg, desc));
+ return (vmmops_getdesc(vcpu_cookie(vm, vcpu), reg, desc));
}
int
@@ -1165,7 +1159,7 @@
if (!is_segment_register(reg) && !is_descriptor_table(reg))
return (EINVAL);
- return (vmmops_setdesc(vm->cookie, vcpu_cookie(vm, vcpu), reg, desc));
+ return (vmmops_setdesc(vcpu_cookie(vm, vcpu), reg, desc));
}
static void
@@ -1780,8 +1774,7 @@
restore_guest_fpustate(vcpu);
vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
- error = vmmops_run(vm->cookie, vcpu->cookie, vcpu->nextrip, pmap,
- &evinfo);
+ error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo);
vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
save_guest_fpustate(vcpu);
@@ -2287,7 +2280,7 @@
if (type < 0 || type >= VM_CAP_MAX)
return (EINVAL);
- return (vmmops_getcap(vm->cookie, vcpu_cookie(vm, vcpu), type, retval));
+ return (vmmops_getcap(vcpu_cookie(vm, vcpu), type, retval));
}
int
@@ -2299,7 +2292,7 @@
if (type < 0 || type >= VM_CAP_MAX)
return (EINVAL);
- return (vmmops_setcap(vm->cookie, vcpu_cookie(vm, vcpu), type, val));
+ return (vmmops_setcap(vcpu_cookie(vm, vcpu), type, val));
}
struct vlapic *
@@ -2876,7 +2869,7 @@
for (i = 0; i < maxcpus; i++) {
vcpu = &vm->vcpu[i];
- error = vmmops_vcpu_snapshot(vm->cookie, meta, vcpu->cookie);
+ error = vmmops_vcpu_snapshot(vcpu->cookie, meta);
if (error != 0) {
printf("%s: failed to snapshot vmcs/vmcb data for "
"vCPU: %d; error: %d\n", __func__, i, error);
@@ -2967,7 +2960,7 @@
for (i = 0; i < maxcpus; i++) {
vcpu = &vm->vcpu[i];
- error = vmmops_restore_tsc(vm->cookie, vcpu->cookie,
+ error = vmmops_restore_tsc(vcpu->cookie,
vcpu->tsc_offset - now);
if (error)
return (error);
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sun, Feb 23, 2:00 PM (10 h, 14 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
16799444
Default Alt Text
D37152.id112261.diff (64 KB)
Attached To
Mode
D37152: vmm: Remove the per-vm cookie argument from vmmops taking a vcpu.
Attached
Detach File
Event Timeline
Log In to Comment