Changeset View
Standalone View
sys/amd64/vmm/vmm.c
Show First 20 Lines • Show All 270 Lines • ▼ Show 20 Lines | vcpu_cleanup(struct vm *vm, int i, bool destroy) | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
vcpu_init(struct vm *vm, int vcpu_id, bool create) | vcpu_init(struct vm *vm, int vcpu_id, bool create) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU, | KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, | ||||
("vcpu_init: invalid vcpu %d", vcpu_id)); | ("vcpu_init: invalid vcpu %d", vcpu_id)); | ||||
vcpu = &vm->vcpu[vcpu_id]; | vcpu = &vm->vcpu[vcpu_id]; | ||||
if (create) { | if (create) { | ||||
KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already " | KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already " | ||||
"initialized", vcpu_id)); | "initialized", vcpu_id)); | ||||
vcpu_lock_init(vcpu); | vcpu_lock_init(vcpu); | ||||
Show All 22 Lines | vcpu_trace_exceptions(struct vm *vm, int vcpuid) | ||||
return (trace_guest_exceptions); | return (trace_guest_exceptions); | ||||
} | } | ||||
struct vm_exit * | struct vm_exit * | ||||
vm_exitinfo(struct vm *vm, int cpuid) | vm_exitinfo(struct vm *vm, int cpuid) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
if (cpuid < 0 || cpuid >= VM_MAXCPU) | if (cpuid < 0 || cpuid >= vm->maxcpus) | ||||
panic("vm_exitinfo: invalid cpuid %d", cpuid); | panic("vm_exitinfo: invalid cpuid %d", cpuid); | ||||
vcpu = &vm->vcpu[cpuid]; | vcpu = &vm->vcpu[cpuid]; | ||||
return (&vcpu->exitinfo); | return (&vcpu->exitinfo); | ||||
} | } | ||||
static void | static void | ||||
▲ Show 20 Lines • Show All 96 Lines • ▼ Show 20 Lines | if (create) | ||||
vm->vrtc = vrtc_init(vm); | vm->vrtc = vrtc_init(vm); | ||||
CPU_ZERO(&vm->active_cpus); | CPU_ZERO(&vm->active_cpus); | ||||
CPU_ZERO(&vm->debug_cpus); | CPU_ZERO(&vm->debug_cpus); | ||||
vm->suspend = 0; | vm->suspend = 0; | ||||
CPU_ZERO(&vm->suspended_cpus); | CPU_ZERO(&vm->suspended_cpus); | ||||
for (i = 0; i < VM_MAXCPU; i++) | for (i = 0; i < vm->maxcpus; i++) | ||||
vcpu_init(vm, i, create); | vcpu_init(vm, i, create); | ||||
} | } | ||||
/* | /* | ||||
* The default CPU topology is a single thread per package. | * The default CPU topology is a single thread per package. | ||||
*/ | */ | ||||
u_int cores_per_package = 1; | u_int cores_per_package = 1; | ||||
u_int threads_per_core = 1; | u_int threads_per_core = 1; | ||||
Show All 21 Lines | vm_create(const char *name, struct vm **retvm) | ||||
vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); | vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); | ||||
strcpy(vm->name, name); | strcpy(vm->name, name); | ||||
vm->vmspace = vmspace; | vm->vmspace = vmspace; | ||||
mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); | mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); | ||||
vm->sockets = 1; | vm->sockets = 1; | ||||
vm->cores = cores_per_package; /* XXX backwards compatibility */ | vm->cores = cores_per_package; /* XXX backwards compatibility */ | ||||
vm->threads = threads_per_core; /* XXX backwards compatibility */ | vm->threads = threads_per_core; /* XXX backwards compatibility */ | ||||
vm->maxcpus = 0; /* XXX not implemented */ | vm->maxcpus = 0; /* XXX not implemented */ | ||||
jhb: I think you can drop this line. | |||||
rgrimesAuthorUnsubmitted Done Inline ActionsThis line is needed for other work that checks this 0 value to mean "we have not allocated" data structures. Also I didnt touch this in this review, so that would be a change beyond the purpose of this code review. rgrimes: This line is needed for other work that checks this 0 value to mean "we have not allocated"… | |||||
jhbUnsubmitted Done Inline ActionsUsually I would wait to include that until the change that actually uses it. As it is, it looks odd in this change. jhb: Usually I would wait to include that until the change that actually uses it. As it is, it… | |||||
rgrimesAuthorUnsubmitted Done Inline ActionsJohn, if you do NOT set this value here all the accessors used to replace VM_MAXCPU by the vm->maxcpus would get a value of 0 seriously breaking things. rgrimes: John, if you do NOT set this value here all the accessors used to replace VM_MAXCPU by the vm… | |||||
jhbUnsubmitted Done Inline ActionsHuh? The comment is about the 'maxcpus = 0' line that is pointless since the next line sets it to VM_MAXCPU. jhb: Huh? The comment is about the 'maxcpus = 0' line that is pointless since the next line sets it… | |||||
rgrimesAuthorUnsubmitted Done Inline ActionsLet me try again. The original line of code that sets this to 0 and has the comment not implemented is still correct, in the since I have no reason to change it at this time. BUT if I do not change the value stored in this (the following line) the code would break bhyve in a very bad way, so I choose to leave the line that shall become valid later, and have its comment removed when it "is implemented", ie initing this to 0 is the correct long term value, and added a temporary line that shall be later deleted when the code can handle the fact that vm->maxcpus might be 0 and some mallocs need to be done. Since you have accepted the code anyway we should probably just drop this issue and move forward as this all cleans up when the final version is committed. rgrimes: Let me try again. The original line of code that sets this to 0 and has the comment not… | |||||
vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */ | |||||
Done Inline ActionsNuke the 0 assignment so that maxcpus is never in what would now be an illegal state? pmooney_pfmooney.com: Nuke the 0 assignment so that maxcpus is never in what would now be an illegal state? | |||||
Done Inline ActionsAgreed, I need to update the diff and upload again. rgrimes: Agreed, I need to update the diff and upload again. | |||||
vm_init(vm, true); | vm_init(vm, true); | ||||
*retvm = vm; | *retvm = vm; | ||||
return (0); | return (0); | ||||
} | } | ||||
void | void | ||||
vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, | vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, | ||||
uint16_t *threads, uint16_t *maxcpus) | uint16_t *threads, uint16_t *maxcpus) | ||||
{ | { | ||||
*sockets = vm->sockets; | *sockets = vm->sockets; | ||||
*cores = vm->cores; | *cores = vm->cores; | ||||
*threads = vm->threads; | *threads = vm->threads; | ||||
*maxcpus = vm->maxcpus; | *maxcpus = vm->maxcpus; | ||||
} | } | ||||
uint16_t | |||||
vm_get_maxcpus(struct vm *vm) | |||||
{ | |||||
return (vm->maxcpus); | |||||
} | |||||
int | int | ||||
vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, | vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, | ||||
uint16_t threads, uint16_t maxcpus) | uint16_t threads, uint16_t maxcpus) | ||||
{ | { | ||||
if (maxcpus != 0) | if (maxcpus != 0) | ||||
return (EINVAL); /* XXX remove when supported */ | return (EINVAL); /* XXX remove when supported */ | ||||
if ((sockets * cores * threads) > VM_MAXCPU) | if ((sockets * cores * threads) > vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
/* XXX need to check sockets * cores * threads == vCPU, how? */ | /* XXX need to check sockets * cores * threads == vCPU, how? */ | ||||
vm->sockets = sockets; | vm->sockets = sockets; | ||||
vm->cores = cores; | vm->cores = cores; | ||||
vm->threads = threads; | vm->threads = threads; | ||||
vm->maxcpus = maxcpus; | vm->maxcpus = maxcpus; | ||||
jhbUnsubmitted Done Inline ActionsI think you can drop this line, especially given the XXX on line 499 after the maxcpus != 0 check. jhb: I think you can drop this line, especially given the XXX on line 499 after the maxcpus != 0… | |||||
rgrimesAuthorUnsubmitted Done Inline ActionsThis and line 499 are handled in other code work, for now there is no need to change this here to make this patch do what it needs to do. rgrimes: This and line 499 are handled in other code work, for now there is no need to change this here… | |||||
vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */ | |||||
Done Inline ActionsNuke the = maxcpus assignment so that a smaller provided value won't temporarily put the vm in an illegal state? pmooney_pfmooney.com: Nuke the `= maxcpus` assignment so that a smaller provided value won't temporarily put the vm… | |||||
Done Inline ActionsAgreed, I should of done this yesterday right after the call when it was fresh in my mind, I had meant to nuke this as per the call discussion. rgrimes: Agreed, I should of done this yesterday right after the call when it was fresh in my mind, I… | |||||
return(0); | return(0); | ||||
} | } | ||||
static void | static void | ||||
vm_cleanup(struct vm *vm, bool destroy) | vm_cleanup(struct vm *vm, bool destroy) | ||||
{ | { | ||||
struct mem_map *mm; | struct mem_map *mm; | ||||
int i; | int i; | ||||
ppt_unassign_all(vm); | ppt_unassign_all(vm); | ||||
if (vm->iommu != NULL) | if (vm->iommu != NULL) | ||||
iommu_destroy_domain(vm->iommu); | iommu_destroy_domain(vm->iommu); | ||||
if (destroy) | if (destroy) | ||||
vrtc_cleanup(vm->vrtc); | vrtc_cleanup(vm->vrtc); | ||||
else | else | ||||
vrtc_reset(vm->vrtc); | vrtc_reset(vm->vrtc); | ||||
vpmtmr_cleanup(vm->vpmtmr); | vpmtmr_cleanup(vm->vpmtmr); | ||||
vatpit_cleanup(vm->vatpit); | vatpit_cleanup(vm->vatpit); | ||||
vhpet_cleanup(vm->vhpet); | vhpet_cleanup(vm->vhpet); | ||||
vatpic_cleanup(vm->vatpic); | vatpic_cleanup(vm->vatpic); | ||||
vioapic_cleanup(vm->vioapic); | vioapic_cleanup(vm->vioapic); | ||||
for (i = 0; i < VM_MAXCPU; i++) | for (i = 0; i < vm->maxcpus; i++) | ||||
vcpu_cleanup(vm, i, destroy); | vcpu_cleanup(vm, i, destroy); | ||||
VMCLEANUP(vm->cookie); | VMCLEANUP(vm->cookie); | ||||
/* | /* | ||||
* System memory is removed from the guest address space only when | * System memory is removed from the guest address space only when | ||||
* the VM is destroyed. This is because the mapping remains the same | * the VM is destroyed. This is because the mapping remains the same | ||||
* across VM reset. | * across VM reset. | ||||
▲ Show 20 Lines • Show All 418 Lines • ▼ Show 20 Lines | vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot, | ||||
vm_page_t m; | vm_page_t m; | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
/* | /* | ||||
* All vcpus are frozen by ioctls that modify the memory map | * All vcpus are frozen by ioctls that modify the memory map | ||||
* (e.g. VM_MMAP_MEMSEG). Therefore 'vm->memmap[]' stability is | * (e.g. VM_MMAP_MEMSEG). Therefore 'vm->memmap[]' stability is | ||||
* guaranteed if at least one vcpu is in the VCPU_FROZEN state. | * guaranteed if at least one vcpu is in the VCPU_FROZEN state. | ||||
*/ | */ | ||||
int state; | int state; | ||||
KASSERT(vcpuid >= -1 && vcpuid < VM_MAXCPU, ("%s: invalid vcpuid %d", | KASSERT(vcpuid >= -1 && vcpuid < vm->maxcpus, ("%s: invalid vcpuid %d", | ||||
__func__, vcpuid)); | __func__, vcpuid)); | ||||
for (i = 0; i < VM_MAXCPU; i++) { | for (i = 0; i < vm->maxcpus; i++) { | ||||
if (vcpuid != -1 && vcpuid != i) | if (vcpuid != -1 && vcpuid != i) | ||||
continue; | continue; | ||||
state = vcpu_get_state(vm, i, NULL); | state = vcpu_get_state(vm, i, NULL); | ||||
KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d", | KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d", | ||||
__func__, state)); | __func__, state)); | ||||
} | } | ||||
#endif | #endif | ||||
pageoff = gpa & PAGE_MASK; | pageoff = gpa & PAGE_MASK; | ||||
Show All 29 Lines | vm_gpa_release(void *cookie) | ||||
vm_page_unhold(m); | vm_page_unhold(m); | ||||
vm_page_unlock(m); | vm_page_unlock(m); | ||||
} | } | ||||
int | int | ||||
vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval) | vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval) | ||||
{ | { | ||||
if (vcpu < 0 || vcpu >= VM_MAXCPU) | if (vcpu < 0 || vcpu >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (reg >= VM_REG_LAST) | if (reg >= VM_REG_LAST) | ||||
return (EINVAL); | return (EINVAL); | ||||
return (VMGETREG(vm->cookie, vcpu, reg, retval)); | return (VMGETREG(vm->cookie, vcpu, reg, retval)); | ||||
} | } | ||||
int | int | ||||
vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val) | vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
int error; | int error; | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (reg >= VM_REG_LAST) | if (reg >= VM_REG_LAST) | ||||
return (EINVAL); | return (EINVAL); | ||||
error = VMSETREG(vm->cookie, vcpuid, reg, val); | error = VMSETREG(vm->cookie, vcpuid, reg, val); | ||||
if (error || reg != VM_REG_GUEST_RIP) | if (error || reg != VM_REG_GUEST_RIP) | ||||
return (error); | return (error); | ||||
Show All 37 Lines | is_segment_register(int reg) | ||||
} | } | ||||
} | } | ||||
int | int | ||||
vm_get_seg_desc(struct vm *vm, int vcpu, int reg, | vm_get_seg_desc(struct vm *vm, int vcpu, int reg, | ||||
struct seg_desc *desc) | struct seg_desc *desc) | ||||
{ | { | ||||
if (vcpu < 0 || vcpu >= VM_MAXCPU) | if (vcpu < 0 || vcpu >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (!is_segment_register(reg) && !is_descriptor_table(reg)) | if (!is_segment_register(reg) && !is_descriptor_table(reg)) | ||||
return (EINVAL); | return (EINVAL); | ||||
return (VMGETDESC(vm->cookie, vcpu, reg, desc)); | return (VMGETDESC(vm->cookie, vcpu, reg, desc)); | ||||
} | } | ||||
int | int | ||||
vm_set_seg_desc(struct vm *vm, int vcpu, int reg, | vm_set_seg_desc(struct vm *vm, int vcpu, int reg, | ||||
struct seg_desc *desc) | struct seg_desc *desc) | ||||
{ | { | ||||
if (vcpu < 0 || vcpu >= VM_MAXCPU) | if (vcpu < 0 || vcpu >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (!is_segment_register(reg) && !is_descriptor_table(reg)) | if (!is_segment_register(reg) && !is_descriptor_table(reg)) | ||||
return (EINVAL); | return (EINVAL); | ||||
return (VMSETDESC(vm->cookie, vcpu, reg, desc)); | return (VMSETDESC(vm->cookie, vcpu, reg, desc)); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 155 Lines • ▼ Show 20 Lines | do { \ | ||||
else \ | else \ | ||||
VM_CTR0(vm, fmt); \ | VM_CTR0(vm, fmt); \ | ||||
} while (0) | } while (0) | ||||
static void | static void | ||||
vm_handle_rendezvous(struct vm *vm, int vcpuid) | vm_handle_rendezvous(struct vm *vm, int vcpuid) | ||||
{ | { | ||||
KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), | KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus), | ||||
("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); | ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); | ||||
mtx_lock(&vm->rendezvous_mtx); | mtx_lock(&vm->rendezvous_mtx); | ||||
while (vm->rendezvous_func != NULL) { | while (vm->rendezvous_func != NULL) { | ||||
/* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ | /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ | ||||
CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus); | CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus); | ||||
if (vcpuid != -1 && | if (vcpuid != -1 && | ||||
▲ Show 20 Lines • Show All 262 Lines • ▼ Show 20 Lines | if (vm->rendezvous_func == NULL) { | ||||
vcpu_lock(vcpu); | vcpu_lock(vcpu); | ||||
} | } | ||||
} | } | ||||
vcpu_unlock(vcpu); | vcpu_unlock(vcpu); | ||||
/* | /* | ||||
* Wakeup the other sleeping vcpus and return to userspace. | * Wakeup the other sleeping vcpus and return to userspace. | ||||
*/ | */ | ||||
for (i = 0; i < VM_MAXCPU; i++) { | for (i = 0; i < vm->maxcpus; i++) { | ||||
if (CPU_ISSET(i, &vm->suspended_cpus)) { | if (CPU_ISSET(i, &vm->suspended_cpus)) { | ||||
vcpu_notify_event(vm, i, false); | vcpu_notify_event(vm, i, false); | ||||
} | } | ||||
} | } | ||||
*retu = true; | *retu = true; | ||||
return (0); | return (0); | ||||
} | } | ||||
Show All 25 Lines | if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { | ||||
return (EALREADY); | return (EALREADY); | ||||
} | } | ||||
VM_CTR1(vm, "virtual machine successfully suspended %d", how); | VM_CTR1(vm, "virtual machine successfully suspended %d", how); | ||||
/* | /* | ||||
* Notify all active vcpus that they are now suspended. | * Notify all active vcpus that they are now suspended. | ||||
*/ | */ | ||||
for (i = 0; i < VM_MAXCPU; i++) { | for (i = 0; i < vm->maxcpus; i++) { | ||||
if (CPU_ISSET(i, &vm->active_cpus)) | if (CPU_ISSET(i, &vm->active_cpus)) | ||||
vcpu_notify_event(vm, i, false); | vcpu_notify_event(vm, i, false); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
void | void | ||||
▲ Show 20 Lines • Show All 69 Lines • ▼ Show 20 Lines | vm_run(struct vm *vm, struct vm_run *vmrun) | ||||
struct pcb *pcb; | struct pcb *pcb; | ||||
uint64_t tscval; | uint64_t tscval; | ||||
struct vm_exit *vme; | struct vm_exit *vme; | ||||
bool retu, intr_disabled; | bool retu, intr_disabled; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
vcpuid = vmrun->cpuid; | vcpuid = vmrun->cpuid; | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (!CPU_ISSET(vcpuid, &vm->active_cpus)) | if (!CPU_ISSET(vcpuid, &vm->active_cpus)) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) | if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) | ||||
return (EINVAL); | return (EINVAL); | ||||
▲ Show 20 Lines • Show All 84 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
struct vm *vm; | struct vm *vm; | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
enum vcpu_state state; | enum vcpu_state state; | ||||
uint64_t rip; | uint64_t rip; | ||||
int error; | int error; | ||||
vm = arg; | vm = arg; | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
vcpu = &vm->vcpu[vcpuid]; | vcpu = &vm->vcpu[vcpuid]; | ||||
state = vcpu_get_state(vm, vcpuid, NULL); | state = vcpu_get_state(vm, vcpuid, NULL); | ||||
if (state == VCPU_RUNNING) { | if (state == VCPU_RUNNING) { | ||||
/* | /* | ||||
* When a vcpu is "running" the next instruction is determined | * When a vcpu is "running" the next instruction is determined | ||||
* by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'. | * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'. | ||||
Show All 22 Lines | |||||
} | } | ||||
int | int | ||||
vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info) | vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
int type, vector; | int type, vector; | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
vcpu = &vm->vcpu[vcpuid]; | vcpu = &vm->vcpu[vcpuid]; | ||||
if (info & VM_INTINFO_VALID) { | if (info & VM_INTINFO_VALID) { | ||||
type = info & VM_INTINFO_TYPE; | type = info & VM_INTINFO_TYPE; | ||||
vector = info & 0xff; | vector = info & 0xff; | ||||
if (type == VM_INTINFO_NMI && vector != IDT_NMI) | if (type == VM_INTINFO_NMI && vector != IDT_NMI) | ||||
▲ Show 20 Lines • Show All 124 Lines • ▼ Show 20 Lines | |||||
int | int | ||||
vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) | vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
uint64_t info1, info2; | uint64_t info1, info2; | ||||
int valid; | int valid; | ||||
KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid)); | KASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus, ("invalid vcpu %d", vcpuid)); | ||||
vcpu = &vm->vcpu[vcpuid]; | vcpu = &vm->vcpu[vcpuid]; | ||||
info1 = vcpu->exitintinfo; | info1 = vcpu->exitintinfo; | ||||
vcpu->exitintinfo = 0; | vcpu->exitintinfo = 0; | ||||
info2 = 0; | info2 = 0; | ||||
if (vcpu->exception_pending) { | if (vcpu->exception_pending) { | ||||
Show All 23 Lines | vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) | ||||
return (valid); | return (valid); | ||||
} | } | ||||
int | int | ||||
vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2) | vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
vcpu = &vm->vcpu[vcpuid]; | vcpu = &vm->vcpu[vcpuid]; | ||||
*info1 = vcpu->exitintinfo; | *info1 = vcpu->exitintinfo; | ||||
*info2 = vcpu_exception_intinfo(vcpu); | *info2 = vcpu_exception_intinfo(vcpu); | ||||
return (0); | return (0); | ||||
} | } | ||||
int | int | ||||
vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid, | vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid, | ||||
uint32_t errcode, int restart_instruction) | uint32_t errcode, int restart_instruction) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
uint64_t regval; | uint64_t regval; | ||||
int error; | int error; | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (vector < 0 || vector >= 32) | if (vector < 0 || vector >= 32) | ||||
return (EINVAL); | return (EINVAL); | ||||
/* | /* | ||||
* A double fault exception should never be injected directly into | * A double fault exception should never be injected directly into | ||||
* the guest. It is a derived exception that results from specific | * the guest. It is a derived exception that results from specific | ||||
▲ Show 20 Lines • Show All 74 Lines • ▼ Show 20 Lines | |||||
static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); | static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); | ||||
int | int | ||||
vm_inject_nmi(struct vm *vm, int vcpuid) | vm_inject_nmi(struct vm *vm, int vcpuid) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
vcpu = &vm->vcpu[vcpuid]; | vcpu = &vm->vcpu[vcpuid]; | ||||
vcpu->nmi_pending = 1; | vcpu->nmi_pending = 1; | ||||
vcpu_notify_event(vm, vcpuid, false); | vcpu_notify_event(vm, vcpuid, false); | ||||
return (0); | return (0); | ||||
} | } | ||||
int | int | ||||
vm_nmi_pending(struct vm *vm, int vcpuid) | vm_nmi_pending(struct vm *vm, int vcpuid) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); | panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); | ||||
vcpu = &vm->vcpu[vcpuid]; | vcpu = &vm->vcpu[vcpuid]; | ||||
return (vcpu->nmi_pending); | return (vcpu->nmi_pending); | ||||
} | } | ||||
void | void | ||||
vm_nmi_clear(struct vm *vm, int vcpuid) | vm_nmi_clear(struct vm *vm, int vcpuid) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); | panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); | ||||
vcpu = &vm->vcpu[vcpuid]; | vcpu = &vm->vcpu[vcpuid]; | ||||
if (vcpu->nmi_pending == 0) | if (vcpu->nmi_pending == 0) | ||||
panic("vm_nmi_clear: inconsistent nmi_pending state"); | panic("vm_nmi_clear: inconsistent nmi_pending state"); | ||||
vcpu->nmi_pending = 0; | vcpu->nmi_pending = 0; | ||||
vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); | vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); | ||||
} | } | ||||
static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); | static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); | ||||
int | int | ||||
vm_inject_extint(struct vm *vm, int vcpuid) | vm_inject_extint(struct vm *vm, int vcpuid) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
vcpu = &vm->vcpu[vcpuid]; | vcpu = &vm->vcpu[vcpuid]; | ||||
vcpu->extint_pending = 1; | vcpu->extint_pending = 1; | ||||
vcpu_notify_event(vm, vcpuid, false); | vcpu_notify_event(vm, vcpuid, false); | ||||
return (0); | return (0); | ||||
} | } | ||||
int | int | ||||
vm_extint_pending(struct vm *vm, int vcpuid) | vm_extint_pending(struct vm *vm, int vcpuid) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
panic("vm_extint_pending: invalid vcpuid %d", vcpuid); | panic("vm_extint_pending: invalid vcpuid %d", vcpuid); | ||||
vcpu = &vm->vcpu[vcpuid]; | vcpu = &vm->vcpu[vcpuid]; | ||||
return (vcpu->extint_pending); | return (vcpu->extint_pending); | ||||
} | } | ||||
void | void | ||||
vm_extint_clear(struct vm *vm, int vcpuid) | vm_extint_clear(struct vm *vm, int vcpuid) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
panic("vm_extint_pending: invalid vcpuid %d", vcpuid); | panic("vm_extint_pending: invalid vcpuid %d", vcpuid); | ||||
vcpu = &vm->vcpu[vcpuid]; | vcpu = &vm->vcpu[vcpuid]; | ||||
if (vcpu->extint_pending == 0) | if (vcpu->extint_pending == 0) | ||||
panic("vm_extint_clear: inconsistent extint_pending state"); | panic("vm_extint_clear: inconsistent extint_pending state"); | ||||
vcpu->extint_pending = 0; | vcpu->extint_pending = 0; | ||||
vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1); | vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1); | ||||
} | } | ||||
int | int | ||||
vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) | vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) | ||||
{ | { | ||||
if (vcpu < 0 || vcpu >= VM_MAXCPU) | if (vcpu < 0 || vcpu >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (type < 0 || type >= VM_CAP_MAX) | if (type < 0 || type >= VM_CAP_MAX) | ||||
return (EINVAL); | return (EINVAL); | ||||
return (VMGETCAP(vm->cookie, vcpu, type, retval)); | return (VMGETCAP(vm->cookie, vcpu, type, retval)); | ||||
} | } | ||||
int | int | ||||
vm_set_capability(struct vm *vm, int vcpu, int type, int val) | vm_set_capability(struct vm *vm, int vcpu, int type, int val) | ||||
{ | { | ||||
if (vcpu < 0 || vcpu >= VM_MAXCPU) | if (vcpu < 0 || vcpu >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (type < 0 || type >= VM_CAP_MAX) | if (type < 0 || type >= VM_CAP_MAX) | ||||
return (EINVAL); | return (EINVAL); | ||||
return (VMSETCAP(vm->cookie, vcpu, type, val)); | return (VMSETCAP(vm->cookie, vcpu, type, val)); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 68 Lines • ▼ Show 20 Lines | |||||
int | int | ||||
vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, | vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, | ||||
bool from_idle) | bool from_idle) | ||||
{ | { | ||||
int error; | int error; | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
panic("vm_set_run_state: invalid vcpuid %d", vcpuid); | panic("vm_set_run_state: invalid vcpuid %d", vcpuid); | ||||
vcpu = &vm->vcpu[vcpuid]; | vcpu = &vm->vcpu[vcpuid]; | ||||
vcpu_lock(vcpu); | vcpu_lock(vcpu); | ||||
error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle); | error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle); | ||||
vcpu_unlock(vcpu); | vcpu_unlock(vcpu); | ||||
return (error); | return (error); | ||||
} | } | ||||
enum vcpu_state | enum vcpu_state | ||||
vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) | vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
enum vcpu_state state; | enum vcpu_state state; | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
panic("vm_get_run_state: invalid vcpuid %d", vcpuid); | panic("vm_get_run_state: invalid vcpuid %d", vcpuid); | ||||
vcpu = &vm->vcpu[vcpuid]; | vcpu = &vm->vcpu[vcpuid]; | ||||
vcpu_lock(vcpu); | vcpu_lock(vcpu); | ||||
state = vcpu->state; | state = vcpu->state; | ||||
if (hostcpu != NULL) | if (hostcpu != NULL) | ||||
*hostcpu = vcpu->hostcpu; | *hostcpu = vcpu->hostcpu; | ||||
vcpu_unlock(vcpu); | vcpu_unlock(vcpu); | ||||
return (state); | return (state); | ||||
} | } | ||||
int | int | ||||
vm_activate_cpu(struct vm *vm, int vcpuid) | vm_activate_cpu(struct vm *vm, int vcpuid) | ||||
{ | { | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (CPU_ISSET(vcpuid, &vm->active_cpus)) | if (CPU_ISSET(vcpuid, &vm->active_cpus)) | ||||
return (EBUSY); | return (EBUSY); | ||||
VCPU_CTR0(vm, vcpuid, "activated"); | VCPU_CTR0(vm, vcpuid, "activated"); | ||||
CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); | CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); | ||||
return (0); | return (0); | ||||
} | } | ||||
int | int | ||||
vm_suspend_cpu(struct vm *vm, int vcpuid) | vm_suspend_cpu(struct vm *vm, int vcpuid) | ||||
{ | { | ||||
int i; | int i; | ||||
if (vcpuid < -1 || vcpuid >= VM_MAXCPU) | if (vcpuid < -1 || vcpuid >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (vcpuid == -1) { | if (vcpuid == -1) { | ||||
vm->debug_cpus = vm->active_cpus; | vm->debug_cpus = vm->active_cpus; | ||||
for (i = 0; i < VM_MAXCPU; i++) { | for (i = 0; i < vm->maxcpus; i++) { | ||||
if (CPU_ISSET(i, &vm->active_cpus)) | if (CPU_ISSET(i, &vm->active_cpus)) | ||||
vcpu_notify_event(vm, i, false); | vcpu_notify_event(vm, i, false); | ||||
} | } | ||||
} else { | } else { | ||||
if (!CPU_ISSET(vcpuid, &vm->active_cpus)) | if (!CPU_ISSET(vcpuid, &vm->active_cpus)) | ||||
return (EINVAL); | return (EINVAL); | ||||
CPU_SET_ATOMIC(vcpuid, &vm->debug_cpus); | CPU_SET_ATOMIC(vcpuid, &vm->debug_cpus); | ||||
vcpu_notify_event(vm, vcpuid, false); | vcpu_notify_event(vm, vcpuid, false); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
int | int | ||||
vm_resume_cpu(struct vm *vm, int vcpuid) | vm_resume_cpu(struct vm *vm, int vcpuid) | ||||
{ | { | ||||
if (vcpuid < -1 || vcpuid >= VM_MAXCPU) | if (vcpuid < -1 || vcpuid >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (vcpuid == -1) { | if (vcpuid == -1) { | ||||
CPU_ZERO(&vm->debug_cpus); | CPU_ZERO(&vm->debug_cpus); | ||||
} else { | } else { | ||||
if (!CPU_ISSET(vcpuid, &vm->debug_cpus)) | if (!CPU_ISSET(vcpuid, &vm->debug_cpus)) | ||||
return (EINVAL); | return (EINVAL); | ||||
Show All 35 Lines | |||||
{ | { | ||||
return (vm->vcpu[vcpuid].stats); | return (vm->vcpu[vcpuid].stats); | ||||
} | } | ||||
int | int | ||||
vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) | vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) | ||||
{ | { | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
*state = vm->vcpu[vcpuid].x2apic_state; | *state = vm->vcpu[vcpuid].x2apic_state; | ||||
return (0); | return (0); | ||||
} | } | ||||
int | int | ||||
vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) | vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) | ||||
{ | { | ||||
if (vcpuid < 0 || vcpuid >= VM_MAXCPU) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (state >= X2APIC_STATE_LAST) | if (state >= X2APIC_STATE_LAST) | ||||
return (EINVAL); | return (EINVAL); | ||||
vm->vcpu[vcpuid].x2apic_state = state; | vm->vcpu[vcpuid].x2apic_state = state; | ||||
vlapic_set_x2apic_state(vm, vcpuid, state); | vlapic_set_x2apic_state(vm, vcpuid, state); | ||||
▲ Show 20 Lines • Show All 70 Lines • ▼ Show 20 Lines | vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest, | ||||
vm_rendezvous_func_t func, void *arg) | vm_rendezvous_func_t func, void *arg) | ||||
{ | { | ||||
int i; | int i; | ||||
/* | /* | ||||
* Enforce that this function is called without any locks | * Enforce that this function is called without any locks | ||||
*/ | */ | ||||
WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); | WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); | ||||
KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), | KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus), | ||||
("vm_smp_rendezvous: invalid vcpuid %d", vcpuid)); | ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid)); | ||||
restart: | restart: | ||||
mtx_lock(&vm->rendezvous_mtx); | mtx_lock(&vm->rendezvous_mtx); | ||||
if (vm->rendezvous_func != NULL) { | if (vm->rendezvous_func != NULL) { | ||||
/* | /* | ||||
* If a rendezvous is already in progress then we need to | * If a rendezvous is already in progress then we need to | ||||
* call the rendezvous handler in case this 'vcpuid' is one | * call the rendezvous handler in case this 'vcpuid' is one | ||||
Show All 13 Lines | restart: | ||||
vm->rendezvous_arg = arg; | vm->rendezvous_arg = arg; | ||||
vm_set_rendezvous_func(vm, func); | vm_set_rendezvous_func(vm, func); | ||||
mtx_unlock(&vm->rendezvous_mtx); | mtx_unlock(&vm->rendezvous_mtx); | ||||
/* | /* | ||||
* Wake up any sleeping vcpus and trigger a VM-exit in any running | * Wake up any sleeping vcpus and trigger a VM-exit in any running | ||||
* vcpus so they handle the rendezvous as soon as possible. | * vcpus so they handle the rendezvous as soon as possible. | ||||
*/ | */ | ||||
for (i = 0; i < VM_MAXCPU; i++) { | for (i = 0; i < vm->maxcpus; i++) { | ||||
if (CPU_ISSET(i, &dest)) | if (CPU_ISSET(i, &dest)) | ||||
vcpu_notify_event(vm, i, false); | vcpu_notify_event(vm, i, false); | ||||
} | } | ||||
vm_handle_rendezvous(vm, vcpuid); | vm_handle_rendezvous(vm, vcpuid); | ||||
} | } | ||||
struct vatpic * | struct vatpic * | ||||
▲ Show 20 Lines • Show All 164 Lines • Show Last 20 Lines |
I think you can drop this line.