Changeset View
Standalone View
lib/libvmmapi/vmmapi.h
Show All 36 Lines | |||||
/* | /* | ||||
* API version for out-of-tree consumers like grub-bhyve for making compile | * API version for out-of-tree consumers like grub-bhyve for making compile | ||||
* time decisions. | * time decisions. | ||||
*/ | */ | ||||
#define VMMAPI_VERSION 0103 /* 2 digit major followed by 2 digit minor */ | #define VMMAPI_VERSION 0103 /* 2 digit major followed by 2 digit minor */ | ||||
struct iovec; | struct iovec; | ||||
struct vmctx; | struct vmctx; | ||||
enum x2apic_state; | |||||
/* | /* | ||||
* Different styles of mapping the memory assigned to a VM into the address | * Different styles of mapping the memory assigned to a VM into the address | ||||
* space of the controlling process. | * space of the controlling process. | ||||
*/ | */ | ||||
enum vm_mmap_style { | enum vm_mmap_style { | ||||
VM_MMAP_NONE, /* no mapping */ | VM_MMAP_NONE, /* no mapping */ | ||||
VM_MMAP_ALL, /* fully and statically mapped */ | VM_MMAP_ALL, /* fully and statically mapped */ | ||||
VM_MMAP_SPARSE, /* mappings created on-demand */ | VM_MMAP_SPARSE, /* mappings created on-demand */ | ||||
}; | }; | ||||
alexandru.elisei_gmail.com: struct vmctx memory representation is very x86-centric. There is no lowmem on arm64, no highmem… | |||||
/* | /* | ||||
* 'flags' value passed to 'vm_set_memflags()'. | * 'flags' value passed to 'vm_set_memflags()'. | ||||
*/ | */ | ||||
#define VM_MEM_F_INCORE 0x01 /* include guest memory in core file */ | #define VM_MEM_F_INCORE 0x01 /* include guest memory in core file */ | ||||
#define VM_MEM_F_WIRED 0x02 /* guest memory is wired */ | #define VM_MEM_F_WIRED 0x02 /* guest memory is wired */ | ||||
/* | |||||
* Identifiers for memory segments: | |||||
* - vm_setup_memory() uses VM_SYSMEM for the system memory segment. | |||||
* - the remaining identifiers can be used to create devmem segments. | |||||
*/ | |||||
enum { | |||||
VM_SYSMEM, | |||||
VM_BOOTROM, | |||||
VM_FRAMEBUFFER, | |||||
}; | |||||
/* | |||||
* Get the length and name of the memory segment identified by 'segid'. | |||||
* Note that system memory segments are identified with a nul name. | |||||
* | |||||
* Returns 0 on success and non-zero otherwise. | |||||
*/ | |||||
int vm_get_memseg(struct vmctx *ctx, int ident, size_t *lenp, char *name, | |||||
size_t namesiz); | |||||
/* | |||||
* Iterate over the guest address space. This function finds an address range | |||||
* that starts at an address >= *gpa. | |||||
* | |||||
* Returns 0 if the next address range was found and non-zero otherwise. | |||||
*/ | |||||
int vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid, | |||||
vm_ooffset_t *segoff, size_t *len, int *prot, int *flags); | |||||
/* | |||||
* Create a device memory segment identified by 'segid'. | |||||
* | |||||
* Returns a pointer to the memory segment on success and MAP_FAILED otherwise. | |||||
*/ | |||||
void *vm_create_devmem(struct vmctx *ctx, int segid, const char *name, | |||||
size_t len); | |||||
/* | |||||
* Map the memory segment identified by 'segid' into the guest address space | |||||
* at [gpa,gpa+len) with protection 'prot'. | |||||
*/ | |||||
int vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, | |||||
vm_ooffset_t segoff, size_t len, int prot); | |||||
int vm_create(const char *name); | int vm_create(const char *name); | ||||
int vm_get_device_fd(struct vmctx *ctx); | int vm_get_device_fd(struct vmctx *ctx); | ||||
struct vmctx *vm_open(const char *name); | struct vmctx *vm_open(const char *name); | ||||
Not Done Inline ActionsI am unclear why this block was being moved around in the file? rgrimes: I am unclear why this block was being moved around in the file? | |||||
void vm_destroy(struct vmctx *ctx); | void vm_destroy(struct vmctx *ctx); | ||||
int vm_parse_memsize(const char *optarg, size_t *memsize); | int vm_parse_memsize(const char *optarg, size_t *memsize); | ||||
int vm_setup_memory(struct vmctx *ctx, size_t len, enum vm_mmap_style s); | int vm_setup_memory(struct vmctx *ctx, size_t len, enum vm_mmap_style s); | ||||
void *vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len); | void *vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len); | ||||
int vm_get_gpa_pmap(struct vmctx *, uint64_t gpa, uint64_t *pte, int *num); | int vm_get_gpa_pmap(struct vmctx *, uint64_t gpa, uint64_t *pte, int *num); | ||||
int vm_gla2gpa(struct vmctx *, int vcpuid, struct vm_guest_paging *paging, | int vm_gla2gpa(struct vmctx *, int vcpuid, struct vm_guest_paging *paging, | ||||
uint64_t gla, int prot, uint64_t *gpa, int *fault); | uint64_t gla, int prot, uint64_t *gpa, int *fault); | ||||
int vm_gla2gpa_nofault(struct vmctx *, int vcpuid, | int vm_gla2gpa_nofault(struct vmctx *, int vcpuid, | ||||
struct vm_guest_paging *paging, uint64_t gla, int prot, | struct vm_guest_paging *paging, uint64_t gla, int prot, | ||||
uint64_t *gpa, int *fault); | uint64_t *gpa, int *fault); | ||||
uint32_t vm_get_lowmem_limit(struct vmctx *ctx); | |||||
void vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit); | |||||
void vm_set_memflags(struct vmctx *ctx, int flags); | void vm_set_memflags(struct vmctx *ctx, int flags); | ||||
int vm_get_memflags(struct vmctx *ctx); | int vm_get_memflags(struct vmctx *ctx); | ||||
size_t vm_get_lowmem_size(struct vmctx *ctx); | |||||
size_t vm_get_highmem_size(struct vmctx *ctx); | |||||
int vm_set_desc(struct vmctx *ctx, int vcpu, int reg, | |||||
uint64_t base, uint32_t limit, uint32_t access); | |||||
int vm_get_desc(struct vmctx *ctx, int vcpu, int reg, | |||||
uint64_t *base, uint32_t *limit, uint32_t *access); | |||||
int vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg, | |||||
struct seg_desc *seg_desc); | |||||
int vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val); | int vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val); | ||||
int vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *retval); | int vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *retval); | ||||
int vm_set_register_set(struct vmctx *ctx, int vcpu, unsigned int count, | int vm_set_register_set(struct vmctx *ctx, int vcpu, unsigned int count, | ||||
const int *regnums, uint64_t *regvals); | const int *regnums, uint64_t *regvals); | ||||
int vm_get_register_set(struct vmctx *ctx, int vcpu, unsigned int count, | int vm_get_register_set(struct vmctx *ctx, int vcpu, unsigned int count, | ||||
const int *regnums, uint64_t *regvals); | const int *regnums, uint64_t *regvals); | ||||
int vm_run(struct vmctx *ctx, int vcpu, struct vm_exit *ret_vmexit); | int vm_run(struct vmctx *ctx, int vcpu, struct vm_exit *ret_vmexit); | ||||
int vm_suspend(struct vmctx *ctx, enum vm_suspend_how how); | int vm_suspend(struct vmctx *ctx, enum vm_suspend_how how); | ||||
int vm_reinit(struct vmctx *ctx); | int vm_reinit(struct vmctx *ctx); | ||||
int vm_apicid2vcpu(struct vmctx *ctx, int apicid); | |||||
int vm_inject_exception(struct vmctx *ctx, int vcpu, int vector, | |||||
int errcode_valid, uint32_t errcode, int restart_instruction); | |||||
int vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector); | |||||
int vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector); | |||||
int vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg); | |||||
int vm_ioapic_assert_irq(struct vmctx *ctx, int irq); | |||||
int vm_ioapic_deassert_irq(struct vmctx *ctx, int irq); | |||||
int vm_ioapic_pulse_irq(struct vmctx *ctx, int irq); | |||||
int vm_ioapic_pincount(struct vmctx *ctx, int *pincount); | |||||
int vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq); | |||||
int vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq); | |||||
int vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq); | |||||
int vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq, | |||||
enum vm_intr_trigger trigger); | |||||
int vm_inject_nmi(struct vmctx *ctx, int vcpu); | |||||
int vm_capability_name2type(const char *capname); | int vm_capability_name2type(const char *capname); | ||||
const char *vm_capability_type2name(int type); | const char *vm_capability_type2name(int type); | ||||
int vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, | int vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, | ||||
int *retval); | int *retval); | ||||
int vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, | int vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, | ||||
int val); | int val); | ||||
int vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func); | int vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func); | ||||
int vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func); | int vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func); | ||||
int vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, | int vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, | ||||
vm_paddr_t gpa, size_t len, vm_paddr_t hpa); | vm_paddr_t gpa, size_t len, vm_paddr_t hpa); | ||||
int vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, | int vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, | ||||
int func, uint64_t addr, uint64_t msg, int numvec); | int func, uint64_t addr, uint64_t msg, int numvec); | ||||
int vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, | int vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, | ||||
int func, int idx, uint64_t addr, uint64_t msg, | int func, int idx, uint64_t addr, uint64_t msg, | ||||
uint32_t vector_control); | uint32_t vector_control); | ||||
int vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *i1, uint64_t *i2); | |||||
int vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t exit_intinfo); | |||||
const cap_ioctl_t *vm_get_ioctls(size_t *len); | const cap_ioctl_t *vm_get_ioctls(size_t *len); | ||||
/* | /* | ||||
* Return a pointer to the statistics buffer. Note that this is not MT-safe. | * Return a pointer to the statistics buffer. Note that this is not MT-safe. | ||||
*/ | */ | ||||
uint64_t *vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv, | uint64_t *vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv, | ||||
int *ret_entries); | int *ret_entries); | ||||
const char *vm_get_stat_desc(struct vmctx *ctx, int index); | const char *vm_get_stat_desc(struct vmctx *ctx, int index); | ||||
int vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *s); | |||||
int vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state s); | |||||
int vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities); | |||||
/* | /* | ||||
* Translate the GLA range [gla,gla+len) into GPA segments in 'iov'. | * Translate the GLA range [gla,gla+len) into GPA segments in 'iov'. | ||||
* The 'iovcnt' should be big enough to accommodate all GPA segments. | * The 'iovcnt' should be big enough to accommodate all GPA segments. | ||||
* | * | ||||
* retval fault Interpretation | * retval fault Interpretation | ||||
* 0 0 Success | * 0 0 Success | ||||
* 0 1 An exception was injected into the guest | * 0 1 An exception was injected into the guest | ||||
* EFAULT N/A Error | * EFAULT N/A Error | ||||
*/ | */ | ||||
int vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *pg, | int vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *pg, | ||||
uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt, | uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt, | ||||
int *fault); | int *fault); | ||||
void vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *guest_iov, | void vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *guest_iov, | ||||
void *host_dst, size_t len); | void *host_dst, size_t len); | ||||
void vm_copyout(struct vmctx *ctx, int vcpu, const void *host_src, | void vm_copyout(struct vmctx *ctx, int vcpu, const void *host_src, | ||||
struct iovec *guest_iov, size_t len); | struct iovec *guest_iov, size_t len); | ||||
void vm_copy_teardown(struct vmctx *ctx, int vcpu, struct iovec *iov, | void vm_copy_teardown(struct vmctx *ctx, int vcpu, struct iovec *iov, | ||||
int iovcnt); | int iovcnt); | ||||
/* RTC */ | |||||
int vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value); | |||||
int vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval); | |||||
int vm_rtc_settime(struct vmctx *ctx, time_t secs); | |||||
int vm_rtc_gettime(struct vmctx *ctx, time_t *secs); | |||||
/* Reset vcpu register state */ | |||||
int vcpu_reset(struct vmctx *ctx, int vcpu); | |||||
int vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus); | int vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus); | ||||
int vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus); | int vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus); | ||||
int vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus); | int vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus); | ||||
int vm_activate_cpu(struct vmctx *ctx, int vcpu); | int vm_activate_cpu(struct vmctx *ctx, int vcpu); | ||||
int vm_suspend_cpu(struct vmctx *ctx, int vcpu); | int vm_suspend_cpu(struct vmctx *ctx, int vcpu); | ||||
int vm_resume_cpu(struct vmctx *ctx, int vcpu); | int vm_resume_cpu(struct vmctx *ctx, int vcpu); | ||||
/* CPU topology */ | /* CPU topology */ | ||||
int vm_set_topology(struct vmctx *ctx, uint16_t sockets, uint16_t cores, | int vm_set_topology(struct vmctx *ctx, uint16_t sockets, uint16_t cores, | ||||
uint16_t threads, uint16_t maxcpus); | uint16_t threads, uint16_t maxcpus); | ||||
int vm_get_topology(struct vmctx *ctx, uint16_t *sockets, uint16_t *cores, | int vm_get_topology(struct vmctx *ctx, uint16_t *sockets, uint16_t *cores, | ||||
uint16_t *threads, uint16_t *maxcpus); | uint16_t *threads, uint16_t *maxcpus); | ||||
/* | const char *vm_get_name(struct vmctx *ctx); | ||||
alexandru.elisei_gmail.comAuthorUnsubmitted Done Inline Actionsstruct vmctx is now machine dependent, and I needed an accessor for the VM name to be used by vm_destroy(). alexandru.elisei_gmail.com: struct vmctx is now machine dependent, and I needed an accessor for the VM name to be used by… | |||||
jhbUnsubmitted Not Done Inline ActionsWe could still expose 'struct vmctx' in the library sources itself via an internal header and then you wouldn't need the accessor functions for 'fd' and 'name' for use in the library. If consumers of the library need the accessors we can still add them, but if they aren't currently needed we can wait to add them until they are needed. jhb: We could still expose 'struct vmctx' in the library sources itself via an internal header and… | |||||
alexandru.elisei_gmail.comAuthorUnsubmitted Done Inline Actionsget_device_fd() was already present in vmmapi.h. My changes were to add vm_get_name() and use get_device_fd() instead of accessing the fd directly when doing an ioctl. struct vmctx is needs to be MD because the memory representation is amd64 centric. I will add a internal header file in the lib/vmmapi/amd64/ directory and I'll include it vmmapi.c. alexandru.elisei_gmail.com: get_device_fd() was already present in vmmapi.h. My changes were to add vm_get_name() and use… | |||||
* FreeBSD specific APIs | |||||
*/ | #if defined(__amd64__) | ||||
jhbUnsubmitted Not Done Inline ActionsI would put the nested include at the top of the file just after the include guards as that is the normal style for nested includes in FreeBSD. jhb: I would put the nested include at the top of the file just after the include guards as that is… | |||||
alexandru.elisei_gmail.comAuthorUnsubmitted Done Inline ActionsSure, I'll do that. alexandru.elisei_gmail.com: Sure, I'll do that. | |||||
int vm_setup_freebsd_registers(struct vmctx *ctx, int vcpu, | #include <amd64/vmmapi_amd64.h> | ||||
uint64_t rip, uint64_t cr3, uint64_t gdtbase, | #endif | ||||
uint64_t rsp); | |||||
int vm_setup_freebsd_registers_i386(struct vmctx *vmctx, int vcpu, | |||||
uint32_t eip, uint32_t gdtbase, | |||||
uint32_t esp); | |||||
void vm_setup_freebsd_gdt(uint64_t *gdtr); | |||||
#endif /* _VMMAPI_H_ */ | #endif /* _VMMAPI_H_ */ |
struct vmctx memory representation is very x86-centric. There is no lowmem on arm64, no highmem on arm. Because of this I have added a MD member of type struct vmmem to struct vmctx.
This means that now struct vmctx is public, instead of being opaque. One other approach would be to keep struct vmctx opaque and have each architecture define it, but that makes all functions that access struct vmctx (like vm_get_device_fd) machine-dependent.