diff --git a/lib/libvmmapi/internal.h b/lib/libvmmapi/internal.h --- a/lib/libvmmapi/internal.h +++ b/lib/libvmmapi/internal.h @@ -12,6 +12,7 @@ struct vcpu { struct vmctx *ctx; int vcpuid; + cpuset_t dmask; }; #endif /* !__VMMAPI_INTERNAL_H__ */ diff --git a/lib/libvmmapi/vmmapi.h b/lib/libvmmapi/vmmapi.h --- a/lib/libvmmapi/vmmapi.h +++ b/lib/libvmmapi/vmmapi.h @@ -126,6 +126,7 @@ struct vcpu *vm_vcpu_open(struct vmctx *ctx, int vcpuid); void vm_vcpu_close(struct vcpu *vcpu); int vcpu_id(struct vcpu *vcpu); +cpuset_t *vcpu_dmask(struct vcpu *vcpu); int vm_parse_memsize(const char *optarg, size_t *memsize); int vm_setup_memory(struct vmctx *ctx, size_t len, enum vm_mmap_style s); void *vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len); diff --git a/lib/libvmmapi/vmmapi.c b/lib/libvmmapi/vmmapi.c --- a/lib/libvmmapi/vmmapi.c +++ b/lib/libvmmapi/vmmapi.c @@ -172,6 +172,7 @@ vcpu = malloc(sizeof(*vcpu)); vcpu->ctx = ctx; vcpu->vcpuid = vcpuid; + CPU_ZERO(&vcpu->dmask); return (vcpu); } @@ -187,6 +188,12 @@ return (vcpu->vcpuid); } +cpuset_t * +vcpu_dmask(struct vcpu *vcpu) +{ + return (&vcpu->dmask); +} + int vm_parse_memsize(const char *opt, size_t *ret_memsize) { @@ -727,6 +734,8 @@ struct vm_run vmrun; bzero(&vmrun, sizeof(vmrun)); + vmrun.dmask = vcpu_dmask(vcpu); + vmrun.dmasksize = sizeof(cpuset_t); error = vcpu_ioctl(vcpu, VM_RUN, &vmrun); bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit)); diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h --- a/sys/amd64/include/vmm.h +++ b/sys/amd64/include/vmm.h @@ -283,6 +283,7 @@ void vm_extint_clear(struct vcpu *vcpu); int vcpu_vcpuid(struct vcpu *vcpu); struct vm *vcpu_vm(struct vcpu *vcpu); +cpuset_t *vcpu_dmask(struct vcpu *vcpu); struct vcpu *vm_vcpu(struct vm *vm, int cpu); struct vlapic *vm_lapic(struct vcpu *vcpu); struct vioapic *vm_ioapic(struct vm *vm); @@ -756,7 +757,7 @@ struct { uint32_t mode; uint8_t vector; - cpuset_t dmask; + __BITSET_DEFINE(, 256) dmask; } ipi; struct vm_task_switch task_switch; } u; diff --git a/sys/amd64/include/vmm_dev.h b/sys/amd64/include/vmm_dev.h --- a/sys/amd64/include/vmm_dev.h +++ b/sys/amd64/include/vmm_dev.h @@ -90,6 +90,8 @@ struct vm_run { int cpuid; struct vm_exit vm_exit; + size_t dmasksize; + cpuset_t *dmask; }; struct vm_exception { diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c --- a/sys/amd64/vmm/io/vlapic.c +++ b/sys/amd64/vmm/io/vlapic.c @@ -1037,7 +1037,7 @@ { int i; bool phys; - cpuset_t dmask, ipimask; + cpuset_t dmask, *ipimask; uint64_t icrval; uint32_t dest, vec, mode, shorthand; struct vcpu *vcpu; @@ -1089,7 +1089,8 @@ * ipimask is a set of vCPUs needing userland handling of the current * IPI. */ - CPU_ZERO(&ipimask); + ipimask = vcpu_dmask(vlapic->vcpu); + CPU_ZERO(ipimask); switch (mode) { case APIC_DELMODE_FIXED: @@ -1130,23 +1131,22 @@ i == vlapic->vcpuid) break; - CPU_SETOF(i, &ipimask); + CPU_SETOF(i, ipimask); break; } - CPU_COPY(&dmask, &ipimask); + CPU_COPY(&dmask, ipimask); break; default: return (1); } - if (!CPU_EMPTY(&ipimask)) { + if (!CPU_EMPTY(ipimask)) { vmexit = vm_exitinfo(vlapic->vcpu); vmexit->exitcode = VM_EXITCODE_IPI; vmexit->u.ipi.mode = mode; vmexit->u.ipi.vector = vec; - vmexit->u.ipi.dmask = ipimask; *retu = true; } @@ -1166,7 +1166,7 @@ vm_handle_ipi(struct vcpu *vcpu, struct vm_exit *vme, bool *retu) { struct vlapic *vlapic = vm_lapic(vcpu); - cpuset_t *dmask = &vme->u.ipi.dmask; + cpuset_t *dmask = vcpu_dmask(vcpu); uint8_t vec = vme->u.ipi.vector; *retu = true; diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c --- a/sys/amd64/vmm/vmm.c +++ b/sys/amd64/vmm/vmm.c @@ -125,6 +125,7 @@ struct vm_exit exitinfo; /* (x) exit reason and collateral */ uint64_t nextrip; /* (x) next instruction to execute */ uint64_t tsc_offset; /* (o) TSC offsetting */ + cpuset_t dmask; /* (x) destination mask of vmexit */ }; #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) @@ -2317,6 +2318,12 @@ return (vcpu->vm); } +cpuset_t * +vcpu_dmask(struct vcpu *vcpu) +{ + return (&vcpu->dmask); +} + int vcpu_vcpuid(struct vcpu *vcpu) { diff --git a/sys/amd64/vmm/vmm_dev.c b/sys/amd64/vmm/vmm_dev.c --- a/sys/amd64/vmm/vmm_dev.c +++ b/sys/amd64/vmm/vmm_dev.c @@ -93,6 +93,13 @@ #define VM_SNAPSHOT_REQ_OLD \ _IOWR('v', IOCNUM_SNAPSHOT_REQ, struct vm_snapshot_meta_old) + +struct vm_run_old { + int cpuid; + struct vm_exit vm_exit; +}; + +#define VM_RUN_OLD _IOWR('v', IOCNUM_RUN, struct vm_run_old) #endif struct devmem_softc { @@ -395,6 +402,9 @@ struct vm_register *vmreg; struct vm_seg_desc *vmsegdesc; struct vm_register_set *vmregset; +#ifdef COMPAT_FREEBSD13 + struct vm_run_old *vmrun_old; +#endif struct vm_run *vmrun; struct vm_exception *vmexc; struct vm_lapic_irq *vmirq; @@ -458,6 +468,9 @@ * is the vcpuid. */ switch (cmd) { +#ifdef COMPAT_FREEBSD13 + case VM_RUN_OLD: +#endif case VM_RUN: case VM_GET_REGISTER: case VM_SET_REGISTER: @@ -580,9 +593,30 @@ } switch(cmd) { +#ifdef COMPAT_FREEBSD13 + case VM_RUN_OLD: + vmrun_old = (struct vm_run_old *)data; + error = vm_run(vcpu, &vmrun_old->vm_exit); + if (error == 0) { + if (vmrun_old->vm_exit.exitcode == VM_EXITCODE_IPI) { + memset(&vmrun_old->vm_exit.u.ipi.dmask, 0, + sizeof(vmrun_old->vm_exit.u.ipi.dmask)); + memcpy(&vmrun_old->vm_exit.u.ipi.dmask, + vcpu_dmask(vcpu), + min(sizeof(vmrun_old->vm_exit.u.ipi.dmask), + sizeof(cpuset_t))); + } + } + + break; +#endif case VM_RUN: vmrun = (struct vm_run *)data; error = vm_run(vcpu, &vmrun->vm_exit); + if (error == 0) { + copyout(vcpu_dmask(vcpu), vmrun->dmask, + min(vmrun->dmasksize, sizeof(cpuset_t))); + } break; case VM_SUSPEND: vmsuspend = (struct vm_suspend *)data; diff --git a/usr.sbin/bhyve/bhyverun.c b/usr.sbin/bhyve/bhyverun.c --- a/usr.sbin/bhyve/bhyverun.c +++ b/usr.sbin/bhyve/bhyverun.c @@ -922,14 +922,13 @@ } static int -vmexit_ipi(struct vmctx *ctx __unused, struct vcpu *vcpu __unused, - struct vm_exit *vme) +vmexit_ipi(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_exit *vme) { int error = -1; int i; switch (vme->u.ipi.mode) { case APIC_DELMODE_INIT: - CPU_FOREACH_ISSET(i, &vme->u.ipi.dmask) { + CPU_FOREACH_ISSET(i, vcpu_dmask(vcpu)) { error = vm_suspend_cpu(vcpu_info[i].vcpu); if (error) { warnx("%s: failed to suspend cpu %d\n", @@ -939,7 +938,7 @@ } break; case APIC_DELMODE_STARTUP: - CPU_FOREACH_ISSET(i, &vme->u.ipi.dmask) { + CPU_FOREACH_ISSET(i, vcpu_dmask(vcpu)) { spinup_ap(vcpu_info[i].vcpu, vme->u.ipi.vector << PAGE_SHIFT); }