Changeset View
Changeset View
Standalone View
Standalone View
sys/amd64/vmm/vmm.c
Show First 20 Lines • Show All 127 Lines • ▼ Show 20 Lines | |||||
#define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) | #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) | ||||
#define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) | #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) | ||||
struct mem_seg { | struct mem_seg { | ||||
size_t len; | size_t len; | ||||
bool sysmem; | bool sysmem; | ||||
struct vm_object *object; | struct vm_object *object; | ||||
}; | }; | ||||
#define VM_MAX_MEMSEGS 3 | #define VM_MAX_MEMSEGS 4 | ||||
struct mem_map { | struct mem_map { | ||||
vm_paddr_t gpa; | vm_paddr_t gpa; | ||||
size_t len; | size_t len; | ||||
vm_ooffset_t segoff; | vm_ooffset_t segoff; | ||||
int segid; | int segid; | ||||
int prot; | int prot; | ||||
int flags; | int flags; | ||||
▲ Show 20 Lines • Show All 106 Lines • ▼ Show 20 Lines | |||||
static int trace_guest_exceptions; | static int trace_guest_exceptions; | ||||
SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN, | ||||
&trace_guest_exceptions, 0, | &trace_guest_exceptions, 0, | ||||
"Trap into hypervisor on all guest exceptions and reflect them back"); | "Trap into hypervisor on all guest exceptions and reflect them back"); | ||||
static void vm_free_memmap(struct vm *vm, int ident); | static void vm_free_memmap(struct vm *vm, int ident); | ||||
static bool sysmem_mapping(struct vm *vm, struct mem_map *mm); | static bool sysmem_mapping(struct vm *vm, struct mem_map *mm); | ||||
static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr); | static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr); | ||||
static int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, | |||||
int segid, vm_ooffset_t segoff); | |||||
#ifdef KTR | #ifdef KTR | ||||
static const char * | static const char * | ||||
vcpu_state2str(enum vcpu_state state) | vcpu_state2str(enum vcpu_state state) | ||||
{ | { | ||||
switch (state) { | switch (state) { | ||||
case VCPU_IDLE: | case VCPU_IDLE: | ||||
▲ Show 20 Lines • Show All 457 Lines • ▼ Show 20 Lines | |||||
vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, | vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, | ||||
size_t len, int prot, int flags) | size_t len, int prot, int flags) | ||||
{ | { | ||||
struct mem_seg *seg; | struct mem_seg *seg; | ||||
struct mem_map *m, *map; | struct mem_map *m, *map; | ||||
vm_ooffset_t last; | vm_ooffset_t last; | ||||
int i, error; | int i, error; | ||||
if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0) | if ((len != 0 && prot == 0) || (prot & ~(VM_PROT_ALL)) != 0) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (flags & ~VM_MEMMAP_F_WIRED) | if (flags & ~VM_MEMMAP_F_WIRED) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (segid < 0 || segid >= VM_MAX_MEMSEGS) | if (segid < 0 || segid >= VM_MAX_MEMSEGS) | ||||
return (EINVAL); | return (EINVAL); | ||||
seg = &vm->mem_segs[segid]; | seg = &vm->mem_segs[segid]; | ||||
if (seg->object == NULL) | if (seg->object == NULL) | ||||
return (EINVAL); | return (EINVAL); | ||||
last = first + len; | last = first + len; | ||||
if (first < 0 || first >= last || last > seg->len) | if (first < 0 || first > last || last > seg->len) | ||||
return (EINVAL); | return (EINVAL); | ||||
if ((gpa | first | last) & PAGE_MASK) | if ((gpa | first | last) & PAGE_MASK) | ||||
return (EINVAL); | return (EINVAL); | ||||
/* The same thing at the same place but with zero length means unmap */ | |||||
if (len == 0) | |||||
return vm_munmap_memseg(vm, gpa, segid, first); | |||||
map = NULL; | map = NULL; | ||||
for (i = 0; i < VM_MAX_MEMMAPS; i++) { | for (i = 0; i < VM_MAX_MEMMAPS; i++) { | ||||
m = &vm->mem_maps[i]; | m = &vm->mem_maps[i]; | ||||
if (m->len == 0) { | if (m->len == 0) { | ||||
map = m; | map = m; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
Show All 19 Lines | vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, | ||||
} | } | ||||
map->gpa = gpa; | map->gpa = gpa; | ||||
map->len = len; | map->len = len; | ||||
map->segoff = first; | map->segoff = first; | ||||
map->segid = segid; | map->segid = segid; | ||||
map->prot = prot; | map->prot = prot; | ||||
map->flags = flags; | map->flags = flags; | ||||
return (0); | |||||
} | |||||
static int | |||||
vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t segoff) | |||||
{ | |||||
struct mem_map *m; | |||||
int i; | |||||
for (i = 0; i < VM_MAX_MEMMAPS; i++) { | |||||
m = &vm->mem_maps[i]; | |||||
if (m->gpa == gpa && m->segid == segid && m->segoff == segoff) | |||||
break; | |||||
} | |||||
if (i >= VM_MAX_MEMMAPS) | |||||
return (ENOENT); | |||||
vm_free_memmap(vm, i); | |||||
return (0); | return (0); | ||||
} | } | ||||
int | int | ||||
vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, | vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, | ||||
vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) | vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) | ||||
{ | { | ||||
struct mem_map *mm, *mmnext; | struct mem_map *mm, *mmnext; | ||||
▲ Show 20 Lines • Show All 2,129 Lines • Show Last 20 Lines |