Changeset View
Changeset View
Standalone View
Standalone View
head/sys/amd64/vmm/vmm.c
Show First 20 Lines • Show All 1,237 Lines • ▼ Show 20 Lines | |||||
#define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \ | #define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \ | ||||
do { \ | do { \ | ||||
if (vcpuid >= 0) \ | if (vcpuid >= 0) \ | ||||
VCPU_CTR0(vm, vcpuid, fmt); \ | VCPU_CTR0(vm, vcpuid, fmt); \ | ||||
else \ | else \ | ||||
VM_CTR0(vm, fmt); \ | VM_CTR0(vm, fmt); \ | ||||
} while (0) | } while (0) | ||||
static void | static int | ||||
vm_handle_rendezvous(struct vm *vm, int vcpuid) | vm_handle_rendezvous(struct vm *vm, int vcpuid) | ||||
{ | { | ||||
struct thread *td; | |||||
int error; | |||||
KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus), | KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus), | ||||
("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); | ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); | ||||
error = 0; | |||||
td = curthread; | |||||
mtx_lock(&vm->rendezvous_mtx); | mtx_lock(&vm->rendezvous_mtx); | ||||
while (vm->rendezvous_func != NULL) { | while (vm->rendezvous_func != NULL) { | ||||
/* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ | /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ | ||||
CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus); | CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus); | ||||
if (vcpuid != -1 && | if (vcpuid != -1 && | ||||
CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && | CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && | ||||
!CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { | !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { | ||||
VCPU_CTR0(vm, vcpuid, "Calling rendezvous func"); | VCPU_CTR0(vm, vcpuid, "Calling rendezvous func"); | ||||
(*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg); | (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg); | ||||
CPU_SET(vcpuid, &vm->rendezvous_done_cpus); | CPU_SET(vcpuid, &vm->rendezvous_done_cpus); | ||||
} | } | ||||
if (CPU_CMP(&vm->rendezvous_req_cpus, | if (CPU_CMP(&vm->rendezvous_req_cpus, | ||||
&vm->rendezvous_done_cpus) == 0) { | &vm->rendezvous_done_cpus) == 0) { | ||||
VCPU_CTR0(vm, vcpuid, "Rendezvous completed"); | VCPU_CTR0(vm, vcpuid, "Rendezvous completed"); | ||||
vm->rendezvous_func = NULL; | vm->rendezvous_func = NULL; | ||||
wakeup(&vm->rendezvous_func); | wakeup(&vm->rendezvous_func); | ||||
break; | break; | ||||
} | } | ||||
RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion"); | RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion"); | ||||
mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, | mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, | ||||
"vmrndv", 0); | "vmrndv", hz); | ||||
if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) { | |||||
mtx_unlock(&vm->rendezvous_mtx); | |||||
error = thread_check_susp(td, true); | |||||
if (error != 0) | |||||
return (error); | |||||
mtx_lock(&vm->rendezvous_mtx); | |||||
} | } | ||||
} | |||||
mtx_unlock(&vm->rendezvous_mtx); | mtx_unlock(&vm->rendezvous_mtx); | ||||
return (0); | |||||
} | } | ||||
/* | /* | ||||
* Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. | * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. | ||||
*/ | */ | ||||
static int | static int | ||||
vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu) | vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu) | ||||
{ | { | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
const char *wmesg; | const char *wmesg; | ||||
int t, vcpu_halted, vm_halted; | struct thread *td; | ||||
int error, t, vcpu_halted, vm_halted; | |||||
KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); | KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); | ||||
vcpu = &vm->vcpu[vcpuid]; | vcpu = &vm->vcpu[vcpuid]; | ||||
vcpu_halted = 0; | vcpu_halted = 0; | ||||
vm_halted = 0; | vm_halted = 0; | ||||
error = 0; | |||||
td = curthread; | |||||
vcpu_lock(vcpu); | vcpu_lock(vcpu); | ||||
while (1) { | while (1) { | ||||
/* | /* | ||||
* Do a final check for pending NMI or interrupts before | * Do a final check for pending NMI or interrupts before | ||||
* really putting this thread to sleep. Also check for | * really putting this thread to sleep. Also check for | ||||
* software events that would cause this vcpu to wakeup. | * software events that would cause this vcpu to wakeup. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | while (1) { | ||||
vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); | vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); | ||||
/* | /* | ||||
* XXX msleep_spin() cannot be interrupted by signals so | * XXX msleep_spin() cannot be interrupted by signals so | ||||
* wake up periodically to check pending signals. | * wake up periodically to check pending signals. | ||||
*/ | */ | ||||
msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); | msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); | ||||
vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); | vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); | ||||
vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t); | vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t); | ||||
if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) { | |||||
vcpu_unlock(vcpu); | |||||
error = thread_check_susp(td, false); | |||||
if (error != 0) | |||||
return (error); | |||||
vcpu_lock(vcpu); | |||||
} | } | ||||
} | |||||
if (vcpu_halted) | if (vcpu_halted) | ||||
CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); | CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); | ||||
vcpu_unlock(vcpu); | vcpu_unlock(vcpu); | ||||
if (vm_halted) | if (vm_halted) | ||||
vm_suspend(vm, VM_SUSPEND_HALT); | vm_suspend(vm, VM_SUSPEND_HALT); | ||||
▲ Show 20 Lines • Show All 119 Lines • ▼ Show 20 Lines | error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging, | ||||
mread, mwrite, retu); | mread, mwrite, retu); | ||||
return (error); | return (error); | ||||
} | } | ||||
static int | static int | ||||
vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu) | vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu) | ||||
{ | { | ||||
int i, done; | int error, i; | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
struct thread *td; | |||||
done = 0; | error = 0; | ||||
vcpu = &vm->vcpu[vcpuid]; | vcpu = &vm->vcpu[vcpuid]; | ||||
td = curthread; | |||||
CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); | CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); | ||||
/* | /* | ||||
* Wait until all 'active_cpus' have suspended themselves. | * Wait until all 'active_cpus' have suspended themselves. | ||||
* | * | ||||
* Since a VM may be suspended at any time including when one or | * Since a VM may be suspended at any time including when one or | ||||
* more vcpus are doing a rendezvous we need to call the rendezvous | * more vcpus are doing a rendezvous we need to call the rendezvous | ||||
* handler while we are waiting to prevent a deadlock. | * handler while we are waiting to prevent a deadlock. | ||||
*/ | */ | ||||
vcpu_lock(vcpu); | vcpu_lock(vcpu); | ||||
while (1) { | while (error == 0) { | ||||
if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { | if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { | ||||
VCPU_CTR0(vm, vcpuid, "All vcpus suspended"); | VCPU_CTR0(vm, vcpuid, "All vcpus suspended"); | ||||
break; | break; | ||||
} | } | ||||
if (vm->rendezvous_func == NULL) { | if (vm->rendezvous_func == NULL) { | ||||
VCPU_CTR0(vm, vcpuid, "Sleeping during suspend"); | VCPU_CTR0(vm, vcpuid, "Sleeping during suspend"); | ||||
vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); | vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); | ||||
msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); | msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); | ||||
vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); | vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); | ||||
if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) { | |||||
vcpu_unlock(vcpu); | |||||
error = thread_check_susp(td, false); | |||||
vcpu_lock(vcpu); | |||||
} | |||||
} else { | } else { | ||||
VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend"); | VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend"); | ||||
vcpu_unlock(vcpu); | vcpu_unlock(vcpu); | ||||
vm_handle_rendezvous(vm, vcpuid); | error = vm_handle_rendezvous(vm, vcpuid); | ||||
vcpu_lock(vcpu); | vcpu_lock(vcpu); | ||||
} | } | ||||
} | } | ||||
vcpu_unlock(vcpu); | vcpu_unlock(vcpu); | ||||
/* | /* | ||||
* Wakeup the other sleeping vcpus and return to userspace. | * Wakeup the other sleeping vcpus and return to userspace. | ||||
*/ | */ | ||||
for (i = 0; i < vm->maxcpus; i++) { | for (i = 0; i < vm->maxcpus; i++) { | ||||
if (CPU_ISSET(i, &vm->suspended_cpus)) { | if (CPU_ISSET(i, &vm->suspended_cpus)) { | ||||
vcpu_notify_event(vm, i, false); | vcpu_notify_event(vm, i, false); | ||||
} | } | ||||
} | } | ||||
*retu = true; | *retu = true; | ||||
return (0); | return (error); | ||||
} | } | ||||
static int | static int | ||||
vm_handle_reqidle(struct vm *vm, int vcpuid, bool *retu) | vm_handle_reqidle(struct vm *vm, int vcpuid, bool *retu) | ||||
{ | { | ||||
struct vcpu *vcpu = &vm->vcpu[vcpuid]; | struct vcpu *vcpu = &vm->vcpu[vcpuid]; | ||||
vcpu_lock(vcpu); | vcpu_lock(vcpu); | ||||
▲ Show 20 Lines • Show All 157 Lines • ▼ Show 20 Lines | if (error == 0) { | ||||
case VM_EXITCODE_SUSPENDED: | case VM_EXITCODE_SUSPENDED: | ||||
error = vm_handle_suspend(vm, vcpuid, &retu); | error = vm_handle_suspend(vm, vcpuid, &retu); | ||||
break; | break; | ||||
case VM_EXITCODE_IOAPIC_EOI: | case VM_EXITCODE_IOAPIC_EOI: | ||||
vioapic_process_eoi(vm, vcpuid, | vioapic_process_eoi(vm, vcpuid, | ||||
vme->u.ioapic_eoi.vector); | vme->u.ioapic_eoi.vector); | ||||
break; | break; | ||||
case VM_EXITCODE_RENDEZVOUS: | case VM_EXITCODE_RENDEZVOUS: | ||||
vm_handle_rendezvous(vm, vcpuid); | error = vm_handle_rendezvous(vm, vcpuid); | ||||
error = 0; | |||||
break; | break; | ||||
case VM_EXITCODE_HLT: | case VM_EXITCODE_HLT: | ||||
intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); | intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); | ||||
error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu); | error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu); | ||||
break; | break; | ||||
case VM_EXITCODE_PAGING: | case VM_EXITCODE_PAGING: | ||||
error = vm_handle_paging(vm, vcpuid, &retu); | error = vm_handle_paging(vm, vcpuid, &retu); | ||||
break; | break; | ||||
▲ Show 20 Lines • Show All 761 Lines • ▼ Show 20 Lines | |||||
vm_apicid2vcpuid(struct vm *vm, int apicid) | vm_apicid2vcpuid(struct vm *vm, int apicid) | ||||
{ | { | ||||
/* | /* | ||||
* XXX apic id is assumed to be numerically identical to vcpu id | * XXX apic id is assumed to be numerically identical to vcpu id | ||||
*/ | */ | ||||
return (apicid); | return (apicid); | ||||
} | } | ||||
void | int | ||||
vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest, | vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest, | ||||
vm_rendezvous_func_t func, void *arg) | vm_rendezvous_func_t func, void *arg) | ||||
{ | { | ||||
int i; | int error, i; | ||||
/* | /* | ||||
* Enforce that this function is called without any locks | * Enforce that this function is called without any locks | ||||
*/ | */ | ||||
WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); | WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); | ||||
KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus), | KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus), | ||||
("vm_smp_rendezvous: invalid vcpuid %d", vcpuid)); | ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid)); | ||||
restart: | restart: | ||||
mtx_lock(&vm->rendezvous_mtx); | mtx_lock(&vm->rendezvous_mtx); | ||||
if (vm->rendezvous_func != NULL) { | if (vm->rendezvous_func != NULL) { | ||||
/* | /* | ||||
* If a rendezvous is already in progress then we need to | * If a rendezvous is already in progress then we need to | ||||
* call the rendezvous handler in case this 'vcpuid' is one | * call the rendezvous handler in case this 'vcpuid' is one | ||||
* of the targets of the rendezvous. | * of the targets of the rendezvous. | ||||
*/ | */ | ||||
RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress"); | RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress"); | ||||
mtx_unlock(&vm->rendezvous_mtx); | mtx_unlock(&vm->rendezvous_mtx); | ||||
vm_handle_rendezvous(vm, vcpuid); | error = vm_handle_rendezvous(vm, vcpuid); | ||||
if (error != 0) | |||||
return (error); | |||||
goto restart; | goto restart; | ||||
} | } | ||||
KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " | KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " | ||||
"rendezvous is still in progress")); | "rendezvous is still in progress")); | ||||
RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous"); | RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous"); | ||||
vm->rendezvous_req_cpus = dest; | vm->rendezvous_req_cpus = dest; | ||||
CPU_ZERO(&vm->rendezvous_done_cpus); | CPU_ZERO(&vm->rendezvous_done_cpus); | ||||
vm->rendezvous_arg = arg; | vm->rendezvous_arg = arg; | ||||
vm->rendezvous_func = func; | vm->rendezvous_func = func; | ||||
mtx_unlock(&vm->rendezvous_mtx); | mtx_unlock(&vm->rendezvous_mtx); | ||||
/* | /* | ||||
* Wake up any sleeping vcpus and trigger a VM-exit in any running | * Wake up any sleeping vcpus and trigger a VM-exit in any running | ||||
* vcpus so they handle the rendezvous as soon as possible. | * vcpus so they handle the rendezvous as soon as possible. | ||||
*/ | */ | ||||
for (i = 0; i < vm->maxcpus; i++) { | for (i = 0; i < vm->maxcpus; i++) { | ||||
if (CPU_ISSET(i, &dest)) | if (CPU_ISSET(i, &dest)) | ||||
vcpu_notify_event(vm, i, false); | vcpu_notify_event(vm, i, false); | ||||
} | } | ||||
vm_handle_rendezvous(vm, vcpuid); | return (vm_handle_rendezvous(vm, vcpuid)); | ||||
} | } | ||||
struct vatpic * | struct vatpic * | ||||
vm_atpic(struct vm *vm) | vm_atpic(struct vm *vm) | ||||
{ | { | ||||
return (vm->vatpic); | return (vm->vatpic); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 159 Lines • Show Last 20 Lines |