Changeset View
Changeset View
Standalone View
Standalone View
sys/amd64/vmm/vmm.c
Show First 20 Lines • Show All 71 Lines • ▼ Show 20 Lines | |||||
#include "vmm_util.h" | #include "vmm_util.h" | ||||
#include "vatpic.h" | #include "vatpic.h" | ||||
#include "vatpit.h" | #include "vatpit.h" | ||||
#include "vhpet.h" | #include "vhpet.h" | ||||
#include "vioapic.h" | #include "vioapic.h" | ||||
#include "vlapic.h" | #include "vlapic.h" | ||||
#include "vpmtmr.h" | #include "vpmtmr.h" | ||||
#include "vrtc.h" | #include "vrtc.h" | ||||
#include "vmm_host_stat.h" | |||||
#include "vmm_stat.h" | #include "vmm_stat.h" | ||||
#include "vmm_lapic.h" | #include "vmm_lapic.h" | ||||
#include "io/ppt.h" | #include "io/ppt.h" | ||||
#include "io/iommu.h" | #include "io/iommu.h" | ||||
struct vlapic; | struct vlapic; | ||||
▲ Show 20 Lines • Show All 272 Lines • ▼ Show 20 Lines | |||||
static int | static int | ||||
vmm_handler(module_t mod, int what, void *arg) | vmm_handler(module_t mod, int what, void *arg) | ||||
{ | { | ||||
int error; | int error; | ||||
switch (what) { | switch (what) { | ||||
case MOD_LOAD: | case MOD_LOAD: | ||||
vmmdev_init(); | vmmdev_init(); | ||||
vmm_host_stat_init(); | |||||
error = vmm_init(); | error = vmm_init(); | ||||
if (error == 0) | if (error == 0) | ||||
vmm_initialized = 1; | vmm_initialized = 1; | ||||
break; | break; | ||||
case MOD_UNLOAD: | case MOD_UNLOAD: | ||||
error = vmmdev_cleanup(); | error = vmmdev_cleanup(); | ||||
if (error == 0) { | if (error == 0) { | ||||
vmm_resume_p = NULL; | vmm_resume_p = NULL; | ||||
iommu_cleanup(); | iommu_cleanup(); | ||||
if (vmm_ipinum != IPI_AST) | if (vmm_ipinum != IPI_AST) | ||||
lapic_ipi_free(vmm_ipinum); | lapic_ipi_free(vmm_ipinum); | ||||
error = VMM_CLEANUP(); | error = VMM_CLEANUP(); | ||||
/* | /* | ||||
* Something bad happened - prevent new | * Something bad happened - prevent new | ||||
* VMs from being created | * VMs from being created | ||||
*/ | */ | ||||
if (error) | if (error) | ||||
vmm_initialized = 0; | vmm_initialized = 0; | ||||
vmm_host_stat_cleanup(); | |||||
} | } | ||||
break; | break; | ||||
default: | default: | ||||
error = 0; | error = 0; | ||||
break; | break; | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 1,248 Lines • ▼ Show 20 Lines | |||||
int | int | ||||
vm_run(struct vm *vm, struct vm_run *vmrun) | vm_run(struct vm *vm, struct vm_run *vmrun) | ||||
{ | { | ||||
struct vm_eventinfo evinfo; | struct vm_eventinfo evinfo; | ||||
int error, vcpuid; | int error, vcpuid; | ||||
struct vcpu *vcpu; | struct vcpu *vcpu; | ||||
struct pcb *pcb; | struct pcb *pcb; | ||||
uint64_t tscval; | uint64_t tscval_enter, tscval_delta; | ||||
struct vm_exit *vme; | struct vm_exit *vme; | ||||
bool retu, intr_disabled; | bool retu, intr_disabled; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
vcpuid = vmrun->cpuid; | vcpuid = vmrun->cpuid; | ||||
if (vcpuid < 0 || vcpuid >= vm->maxcpus) | if (vcpuid < 0 || vcpuid >= vm->maxcpus) | ||||
return (EINVAL); | return (EINVAL); | ||||
Show All 11 Lines | vm_run(struct vm *vm, struct vm_run *vmrun) | ||||
evinfo.sptr = &vm->suspend; | evinfo.sptr = &vm->suspend; | ||||
evinfo.iptr = &vcpu->reqidle; | evinfo.iptr = &vcpu->reqidle; | ||||
restart: | restart: | ||||
critical_enter(); | critical_enter(); | ||||
KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), | KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), | ||||
("vm_run: absurd pm_active")); | ("vm_run: absurd pm_active")); | ||||
tscval = rdtsc(); | tscval_enter = rdtsc(); | ||||
pcb = PCPU_GET(curpcb); | pcb = PCPU_GET(curpcb); | ||||
set_pcb_flags(pcb, PCB_FULL_IRET); | set_pcb_flags(pcb, PCB_FULL_IRET); | ||||
restore_guest_fpustate(vcpu); | restore_guest_fpustate(vcpu); | ||||
vcpu_require_state(vm, vcpuid, VCPU_RUNNING); | vcpu_require_state(vm, vcpuid, VCPU_RUNNING); | ||||
error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, &evinfo); | error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, &evinfo); | ||||
vcpu_require_state(vm, vcpuid, VCPU_FROZEN); | vcpu_require_state(vm, vcpuid, VCPU_FROZEN); | ||||
save_guest_fpustate(vcpu); | save_guest_fpustate(vcpu); | ||||
vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); | tscval_delta = rdtsc() - tscval_enter; | ||||
vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, tscval_delta); | |||||
critical_exit(); | critical_exit(); | ||||
vmm_host_stat_cpu_ticks_incr(curcpu, tscval_delta); | |||||
jhb: If you do this inside of critical_exit(), you can probably avoid the need for atomics entirely… | |||||
crowston_protonmail.comAuthorUnsubmitted Done Inline ActionsI believe, formally speaking, without the atomics, we will still have a data race between the load and store operations unless I add a fence on the load side. On x86, relaxed atomics are essentially free, so it seems like using atomics is a lower cost than extending the life of the critical section. crowston_protonmail.com: I believe, formally speaking, without the atomics, we will still have a data race between the… | |||||
jhbUnsubmitted Not Done Inline ActionsThere's always a race on reading and it doesn't really matter if you lose (these are just stats after all). I would echo Peter's comment that if it's going to bother trying to use atomics instead of some other existing synchronization mechanism, it would be best to use counter64. I just think doing the ++ (which doesn't even have to be atomic) inside critical_enter() is even cheaper than that. jhb: There's always a race on reading and it doesn't really matter if you lose (these are just stats… | |||||
if (error == 0) { | if (error == 0) { | ||||
retu = false; | retu = false; | ||||
vcpu->nextrip = vme->rip + vme->inst_length; | vcpu->nextrip = vme->rip + vme->inst_length; | ||||
switch (vme->exitcode) { | switch (vme->exitcode) { | ||||
case VM_EXITCODE_REQIDLE: | case VM_EXITCODE_REQIDLE: | ||||
error = vm_handle_reqidle(vm, vcpuid, &retu); | error = vm_handle_reqidle(vm, vcpuid, &retu); | ||||
break; | break; | ||||
▲ Show 20 Lines • Show All 1,000 Lines • Show Last 20 Lines |
If you do this inside of critical_exit(), you can probably avoid the need for atomics entirely as you won't be migrated to a different CPU during the increment.