Index: sys/amd64/include/hypercall.h =================================================================== --- /dev/null +++ sys/amd64/include/hypercall.h @@ -0,0 +1,376 @@ +/*- + * Copyright (c) 2016 Domagoj Stolfa + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_HYPERCALL_H_ +#define _MACHINE_HYPERCALL_H_ + +#include +#include + +#include +#include + +#define HYPERCALL_RET_NOT_IMPL -2 +#define HYPERCALL_RET_ERROR -1 +#define HYPERCALL_RET_SUCCESS 0 + +#define VMCALL ".byte 0x0f,0x01,0xc1\n" +#define VMMCALL ".byte 0x0f,0x01,0xd9\n" + +typedef struct hypercall_arg { + __uint64_t len; + __uint64_t val; +} hc_arg_t; + +/* + * Used to create additional known hypercalls. The name + * of each of the enums should correspond to the function + * being called once the hypercall is initiated. + * Each enum should have it's corresponding number next + * to it and should be in order, as the ring_plevel + * array expects it to be that way. + * + * Keep in sync with ring_plevel. + */ +enum hypercall_index { + HYPERCALL_DTRACE_PROBE_CREATE = 0, + HYPERCALL_DTRACE_PROBE = 1, + HYPERCALL_DTRACE_RESERVED1 = 2, /* Reserved for DTrace */ + HYPERCALL_DTRACE_RESERVED2 = 3, /* Reserved for DTrace */ + HYPERCALL_DTRACE_RESERVED3 = 4, /* Reserved for DTrace */ + HYPERCALL_DTRACE_RESERVED4 = 5, /* Reserved for DTrace */ + HYPERCALL_INDEX_MAX +}; + +static __inline __int64_t +hypercall0(__uint64_t c) +{ + const __uint64_t nargs = 0; + __int64_t ret; + if (cpu_vendor_id == CPU_VENDOR_INTEL) { + __asm __volatile( + VMCALL + : "=a"(ret) + : "a"(c), "b"(nargs) + : "memory", "rsp"); + } else { + __asm __volatile( + VMMCALL + : "=a"(ret) + : "a"(c), "b"(nargs) + : "memory", "rsp"); + } + return (ret); +} + +static __inline __int64_t +hypercall1(__uint64_t c, hc_arg_t *arg0) +{ + const __uint64_t nargs = 1; + __int64_t ret; + if (cpu_vendor_id == CPU_VENDOR_INTEL) { + __asm __volatile( + "push %[val0]\n" + "push %[len0]\n" + VMCALL + "add $16, %%rsp\n" + : "=a"(ret) + : "a"(c), + [val0] "r"(arg0->val), [len0] "r"(arg0->len), + "b"(nargs) + : "memory", "rsp"); + } else { + __asm __volatile( + "push %[val0]\n" + "push %[len0]\n" + VMMCALL + "add $16, %%rsp\n" + : "=a"(ret) + : "a"(c), + [val0] "r"(arg0->val), [len0] "r"(arg0->len), + "b"(nargs) + : "memory", "rsp"); + } + return (ret); +} + +static __inline __int64_t +hypercall2(__uint64_t c, hc_arg_t *arg0, + hc_arg_t *arg1) +{ + const __uint64_t nargs = 2; + __int64_t ret; + if (cpu_vendor_id == CPU_VENDOR_INTEL) { + __asm __volatile( + "push %[val1]\n" + "push %[len1]\n" + "push %[val0]\n" + "push %[len0]\n" + VMCALL + "add $32, %%rsp\n" + : "=a"(ret) + : "a"(c), + [val1] "r"(arg1->val), [len1] "r"(arg1->len), + [val0] "r"(arg0->val), [len0] "r"(arg0->len), + "b"(nargs) + : "memory", "rsp"); + } else { + __asm __volatile( + "push %[val1]\n" + "push %[len1]\n" + "push %[val0]\n" + "push %[len0]\n" + VMMCALL + "add $32, %%rsp\n" + : "=a"(ret) + : "a"(c), + [val1] "r"(arg1->val), [len1] "r"(arg1->len), + [val0] "r"(arg0->val), [len0] "r"(arg0->len), + "b"(nargs) + : "memory", "rsp"); + } + return (ret); +} + +static __inline __int64_t +hypercall3(__uint64_t c, hc_arg_t *arg0, + hc_arg_t *arg1, hc_arg_t *arg2) +{ + const __uint64_t nargs = 3; + __int64_t ret; + if (cpu_vendor_id == CPU_VENDOR_INTEL) { + __asm __volatile( + "push %[val2]\n" + "push %[len2]\n" + "push %[val1]\n" + "push %[len1]\n" + "push %[val0]\n" + "push %[len0]\n" + VMCALL + "add $48, %%rsp\n" + : "=a"(ret) + : "a"(c), + [val2] "r"(arg2->val), [len2] "r"(arg2->len), + [val1] "r"(arg1->val), [len1] "r"(arg1->len), + [val0] "r"(arg0->val), [len0] "r"(arg0->len), + "b"(nargs) + : "memory", "rsp"); + } else { + __asm __volatile( + "push %[val2]\n" + "push %[len2]\n" + "push %[val1]\n" + "push %[len1]\n" + "push %[val0]\n" + "push %[len0]\n" + VMMCALL + "add $48, %%rsp\n" + : "=a"(ret) + : "a"(c), + [val2] "r"(arg2->val), [len2] "r"(arg2->len), + [val1] "r"(arg1->val), [len1] "r"(arg1->len), + [val0] "r"(arg0->val), [len0] "r"(arg0->len), + "b"(nargs) + : "memory", "rsp"); + } + return (ret); +} + +static __inline __int64_t +hypercall4(__uint64_t c, hc_arg_t *arg0, + hc_arg_t *arg1, hc_arg_t *arg2, + hc_arg_t *arg3) +{ + const __uint64_t nargs = 4; + __int64_t ret; + if (cpu_vendor_id == CPU_VENDOR_INTEL) { + __asm __volatile( + "push %[val3]\n" + "push %[len3]\n" + "push %[val2]\n" + "push %[len2]\n" + "push %[val1]\n" + "push %[len1]\n" + "push %[val0]\n" + "push %[len0]\n" + VMCALL + "add $64, %%rsp\n" + : "=a"(ret) + : "a"(c), + [val3] "r"(arg3->val), [len3] "r"(arg3->len), + [val2] "r"(arg2->val), [len2] "r"(arg2->len), + [val1] "r"(arg1->val), [len1] "r"(arg1->len), + [val0] "r"(arg0->val), [len0] "r"(arg0->len), + "b"(nargs) + : "memory", "rsp"); + } else { + __asm __volatile( + "push %[val3]\n" + "push %[len3]\n" + "push %[val2]\n" + "push %[len2]\n" + "push %[val1]\n" + "push %[len1]\n" + "push %[val0]\n" + "push %[len0]\n" + VMMCALL + "add $64, %%rsp\n" + : "=a"(ret) + : "a"(c), + [val3] "r"(arg3->val), [len3] "r"(arg3->len), + [val2] "r"(arg2->val), [len2] "r"(arg2->len), + [val1] "r"(arg1->val), [len1] "r"(arg1->len), + [val0] "r"(arg0->val), [len0] "r"(arg0->len), + "b"(nargs) + : "memory", "rsp"); + } + return (ret); +} + +static __inline __int64_t +hypercall5(__uint64_t c, hc_arg_t *arg0, + hc_arg_t *arg1, hc_arg_t *arg2, + hc_arg_t *arg3, hc_arg_t *arg4) +{ + const __uint64_t nargs = 5; + __int64_t ret; + if (cpu_vendor_id == CPU_VENDOR_INTEL) { + __asm __volatile( + "push %[val4]\n" + "push %[len4]\n" + "push %[val3]\n" + "push %[len3]\n" + "push %[val2]\n" + "push %[len2]\n" + "push %[val1]\n" + "push %[len1]\n" + "push %[val0]\n" + "push %[len0]\n" + VMCALL + "add $80, %%rsp\n" + : "=a"(ret) + : "a"(c), + [val4] "r"(arg4->val), [len4] "r"(arg4->len), + [val3] "r"(arg3->val), [len3] "r"(arg3->len), + [val2] "r"(arg2->val), [len2] "r"(arg2->len), + [val1] "r"(arg1->val), [len1] "r"(arg1->len), + [val0] "r"(arg0->val), [len0] "r"(arg0->len), + "b"(nargs) + : "memory", "rsp"); + } else { + __asm __volatile( + "push %[val4]\n" + "push %[len4]\n" + "push %[val3]\n" + "push %[len3]\n" + "push %[val2]\n" + "push %[len2]\n" + "push %[val1]\n" + "push %[len1]\n" + "push %[val0]\n" + "push %[len0]\n" + VMMCALL + "add $80, %%rsp\n" + : "=a"(ret) + : "a"(c), + [val4] "r"(arg4->val), [len4] "r"(arg4->len), + [val3] "r"(arg3->val), [len3] "r"(arg3->len), + [val2] "r"(arg2->val), [len2] "r"(arg2->len), + [val1] "r"(arg1->val), [len1] "r"(arg1->len), + [val0] "r"(arg0->val), [len0] "r"(arg0->len), + "b"(nargs) + : "memory", "rsp"); + } + return (ret); +} + +static __inline __int64_t +hypercall6(__uint64_t c, hc_arg_t *arg0, + hc_arg_t *arg1, hc_arg_t *arg2, + hc_arg_t *arg3, hc_arg_t *arg4, + hc_arg_t *arg5) +{ + const __uint64_t nargs = 6; + __int64_t ret; + if (cpu_vendor_id == CPU_VENDOR_INTEL) { + __asm __volatile( + "push %[val5]\n" + "push %[len5]\n" + "push %[val4]\n" + "push %[len4]\n" + "push %[val3]\n" + "push %[len3]\n" + "push %[val2]\n" + "push %[len2]\n" + "push %[val1]\n" + "push %[len1]\n" + "push %[val0]\n" + "push %[len0]\n" + VMCALL + "add $96, %%rsp\n" + : "=a"(ret) + : "a"(c), + [val5] "r"(arg5->val), [len5] "r"(arg5->len), + [val4] "r"(arg4->val), [len4] "r"(arg4->len), + [val3] "r"(arg3->val), [len3] "r"(arg3->len), + [val2] "r"(arg2->val), [len2] "r"(arg2->len), + [val1] "r"(arg1->val), [len1] "r"(arg1->len), + [val0] "r"(arg0->val), [len0] "r"(arg0->len), + "b"(nargs) + : "memory", "rsp"); + } else { + __asm __volatile( + "push %[val5]\n" + "push %[len5]\n" + "push %[val4]\n" + "push %[len4]\n" + "push %[val3]\n" + "push %[len3]\n" + "push %[val2]\n" + "push %[len2]\n" + "push %[val1]\n" + "push %[len1]\n" + "push %[val0]\n" + "push %[len0]\n" + VMMCALL + "add $96, %%rsp\n" + : "=a"(ret) + : "a"(c), + [val5] "r"(arg5->val), [len5] "r"(arg5->len), + [val4] "r"(arg4->val), [len4] "r"(arg4->len), + [val3] "r"(arg3->val), [len3] "r"(arg3->len), + [val2] "r"(arg2->val), [len2] "r"(arg2->len), + [val1] "r"(arg1->val), [len1] "r"(arg1->len), + [val0] "r"(arg0->val), [len0] "r"(arg0->len), + "b"(nargs) + : "memory", "rsp"); + } + return (ret); +} + +#endif /* _MACHINE_HYPERCALL_H_ */ Index: sys/amd64/include/vmm.h =================================================================== --- sys/amd64/include/vmm.h +++ sys/amd64/include/vmm.h @@ -533,6 +533,7 @@ VM_EXITCODE_MWAIT, VM_EXITCODE_SVM, VM_EXITCODE_REQIDLE, + VM_EXITCODE_HYPERCALL, VM_EXITCODE_MAX }; @@ -573,6 +574,10 @@ struct vm_guest_paging paging; }; +struct vm_hypercall { + struct vm_guest_paging paging; +}; + struct vm_exit { enum vm_exitcode exitcode; int inst_length; /* 0 means unknown */ @@ -636,7 +641,8 @@ struct { enum vm_suspend_how how; } suspended; - struct vm_task_switch task_switch; + struct vm_task_switch task_switch; + struct vm_hypercall hypercall; } u; }; Index: sys/amd64/vmm/amd/svm.c =================================================================== --- sys/amd64/vmm/amd/svm.c +++ sys/amd64/vmm/amd/svm.c @@ -158,7 +158,6 @@ static int svm_cleanup(void) { - smp_rendezvous(NULL, svm_disable, NULL, NULL); return (0); } @@ -469,6 +468,9 @@ svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); + /* Enable VMMCALL to be used for DTrace probes */ + svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMMCALL); + /* * From section "Canonicalization and Consistency Checks" in APMv2 * the VMRUN intercept bit must be set to pass the consistency check. @@ -849,6 +851,29 @@ vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); } +static void +svm_handle_hypercall(struct svm_softc *svm_sc, int vcpu, struct vmcb *vmcb, struct vm_exit *vmexit) +{ + struct vm_guest_paging *paging; + struct vmcb_segment seg; + uint64_t rsp; + int error; + + paging = &vmexit->u.hypercall.paging; + vmexit->exitcode = VM_EXITCODE_HYPERCALL; + + error = vmcb_read(svm_sc, vcpu, VM_REG_GUEST_RSP, + &rsp); + KASSERT(error == 0, ("%s: error %d getting RSP", + __func__, error)); + + error = vmcb_seg(vmcb, VM_REG_GUEST_SS, &seg); + KASSERT(error == 0, ("%s: error %d getting segment SS", + __func__, error)); + + svm_paging_info(vmcb, paging); +} + #ifdef KTR static const char * intrtype_to_str(int intr_type) @@ -1243,6 +1268,12 @@ return ("monitor"); case VMCB_EXIT_MWAIT: return ("mwait"); + case VMCB_EXIT_VMMCALL: + return ("vmmcall"); + case VMCB_EXIT_VMLOAD: + return ("vmload"); + case VMCB_EXIT_VMSAVE: + return ("vmsave"); default: snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); return (reasonbuf); @@ -1344,6 +1375,9 @@ case VMCB_EXIT_NMI: /* external NMI */ handled = 1; break; + case VMCB_EXIT_VMMCALL: + svm_handle_hypercall(svm_sc, vcpu, vmcb, vmexit); + break; case 0x40 ... 0x5F: vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); reflect = 1; Index: sys/amd64/vmm/amd/vmcb.h =================================================================== --- sys/amd64/vmm/amd/vmcb.h +++ sys/amd64/vmm/amd/vmcb.h @@ -139,6 +139,8 @@ #define VMCB_EXIT_IO 0x7B #define VMCB_EXIT_MSR 0x7C #define VMCB_EXIT_SHUTDOWN 0x7F +#define VMCB_EXIT_VMMCALL 0x81 +#define VMCB_EXIT_VMLOAD 0x82 #define VMCB_EXIT_VMSAVE 0x83 #define VMCB_EXIT_MONITOR 0x8A #define VMCB_EXIT_MWAIT 0x8B Index: sys/amd64/vmm/intel/vmx.c =================================================================== --- sys/amd64/vmm/intel/vmx.c +++ sys/amd64/vmm/intel/vmx.c @@ -2474,6 +2474,10 @@ case EXIT_REASON_MWAIT: vmexit->exitcode = VM_EXITCODE_MWAIT; break; + case EXIT_REASON_VMCALL: + vmexit->exitcode = VM_EXITCODE_HYPERCALL; + vmx_paging_info(&vmexit->u.hypercall.paging); + break; default: vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); break; Index: sys/amd64/vmm/vmm.c =================================================================== --- sys/amd64/vmm/vmm.c +++ sys/amd64/vmm/vmm.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #include @@ -224,6 +225,24 @@ &trace_guest_exceptions, 0, "Trap into hypervisor on all guest exceptions and reflect them back"); +/* + * The maximum amount of arguments currently supproted + * through the hypercall functionality in the VMM. + * Everything higher than HYPERCALL_MAX_ARGS will be + * discarded. + */ +#define HYPERCALL_MAX_ARGS 6 + +static int8_t ring_plevel[HYPERCALL_INDEX_MAX] = { + [HYPERCALL_DTRACE_PROBE_CREATE] = 0, + [HYPERCALL_DTRACE_PROBE] = 0, + [HYPERCALL_DTRACE_RESERVED1] = 0, /* Reserved for DTrace */ + [HYPERCALL_DTRACE_RESERVED2] = 0, /* Reserved for DTrace */ + [HYPERCALL_DTRACE_RESERVED3] = 0, /* Reserved for DTrace */ + [HYPERCALL_DTRACE_RESERVED4] = 0, /* Reserved for DTrace */ +}; + + static void vm_free_memmap(struct vm *vm, int ident); static bool sysmem_mapping(struct vm *vm, struct mem_map *mm); static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr); @@ -538,8 +557,9 @@ if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) return (ENOMEM); - else + else { return (0); + } } int @@ -1507,6 +1527,149 @@ return (0); } +static int +hypercall_copy_arg(struct vm *vm, int vcpuid, uint64_t ds_base, + struct hypercall_arg *arg, struct vm_guest_paging *paging, void *dst) +{ + struct vm_copyinfo copyinfo[2]; + uint64_t gla; + int error, fault; + + gla = ds_base + arg->val; + error = vm_copy_setup(vm, vcpuid, paging, gla, arg->len, + PROT_READ, copyinfo, nitems(copyinfo), &fault); + if (error || fault) { + return (error); + } + + vm_copyin(vm, vcpuid, copyinfo, dst, arg->len); + vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo)); + + return (0); +} + +static int +vm_handle_hypercall(struct vm *vm, int vcpuid, struct vm_exit *vmexit, bool *retu) +{ + struct vm_copyinfo copyinfo[2]; + struct vm_guest_paging *paging; + struct hypercall_arg args[HYPERCALL_MAX_ARGS]; + struct seg_desc ss_desc, cs_desc; + uint64_t hcid, nargs, rsp, stack_gla, cr0, rflags; + int error, fault, stackaddrsize, size, handled, addrsize; + + error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RAX, &hcid); + KASSERT(error == 0, ("%s: error %d getting RAX", + __func__, error)); + /* + * Ensure that the hypercall called by the guest never exceed + * the maximum number of hypercalls defined. + */ + if (hcid >= HYPERCALL_INDEX_MAX) { + error = vm_set_register(vm, vcpuid, VM_REG_GUEST_RAX, HYPERCALL_RET_ERROR); + KASSERT(error == 0, ("%s: error %d setting RAX", + __func__, error)); + return (0); + } + + error = vm_get_seg_desc(vm, vcpuid, VM_REG_GUEST_CS, &cs_desc); + KASSERT(error == 0, ("%s: error %d getting CS descriptor", + __func__, error)); + + /* + * The check ensures that each of the hypercalls that is called + * from the guest is called from the correct protection ring. + */ + if (SEG_DESC_DPL(cs_desc.access) != ring_plevel[hcid]) { + error = vm_set_register(vm, vcpuid, VM_REG_GUEST_RAX, HYPERCALL_RET_ERROR); + KASSERT(error == 0, ("%s: error %d setting RAX", + __func__, error)); + return (0); + } + + error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RBX, &nargs); + KASSERT(error == 0, ("%s: error %d getting RBX", + __func__, error)); + + if (nargs > HYPERCALL_MAX_ARGS) { + error = vm_set_register(vm, vcpuid, VM_REG_GUEST_RAX, HYPERCALL_RET_ERROR); + KASSERT(error == 0, ("%s: error %d setting RAX", + __func__, error)); + return (0); + } + + handled = 0; + paging = &vmexit->u.hypercall.paging; + stackaddrsize = 8; + addrsize = 8; + size = sizeof(struct hypercall_arg); + + error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0); + KASSERT(error == 0, ("%s: error %d getting CR0", + __func__, error)); + error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags); + KASSERT(error == 0, ("%s: error %d getting RFLAGS", + __func__, error)); + error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RSP, &rsp); + KASSERT(error == 0, ("%s: error %d getting RSP", + __func__, error)); + + + error = vm_get_seg_desc(vm, vcpuid, VM_REG_GUEST_SS, &ss_desc); + KASSERT(error == 0, ("%s: error %d getting SS descriptor", + __func__, error)); + + if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS, &ss_desc, + rsp, addrsize, stackaddrsize, PROT_READ, &stack_gla)) { + vm_inject_ss(vm, vcpuid, 0); + return (0); + } + + if (vie_canonical_check(paging->cpu_mode, stack_gla)) { + vm_inject_ss(vm, vcpuid, 0); + return (0); + } + + if (vie_alignment_check(paging->cpl, addrsize, cr0, rflags, stack_gla)) { + vm_inject_ac(vm, vcpuid, 0); + return (0); + } + + error = vm_copy_setup(vm, vcpuid, paging, stack_gla, nargs * size, + PROT_READ, copyinfo, nitems(copyinfo), &fault); + if (error || fault) { + return (error); + } + + vm_copyin(vm, vcpuid, copyinfo, args, nargs * size); + vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo)); + + /* + * From this point on, all the arguments passed in from the + * guest are contained in the args array. + */ + + switch (hcid) { + case HYPERCALL_DTRACE_PROBE_CREATE: + case HYPERCALL_DTRACE_PROBE: + case HYPERCALL_DTRACE_RESERVED1: + case HYPERCALL_DTRACE_RESERVED2: + case HYPERCALL_DTRACE_RESERVED3: + case HYPERCALL_DTRACE_RESERVED4: + error = vm_set_register(vm, vcpuid, VM_REG_GUEST_RAX, HYPERCALL_RET_NOT_IMPL); + KASSERT(error == 0, ("%s: error %d setting RAX", + __func__, error)); + break; + default: + error = vm_set_register(vm, vcpuid, VM_REG_GUEST_RAX, HYPERCALL_RET_NOT_IMPL); + KASSERT(error == 0, ("%s: error %d setting RAX", + __func__, error)); + break; + } + + return (0); +} + int vm_suspend(struct vm *vm, enum vm_suspend_how how) { @@ -1675,6 +1838,9 @@ case VM_EXITCODE_MWAIT: vm_inject_ud(vm, vcpuid); break; + case VM_EXITCODE_HYPERCALL: + error = vm_handle_hypercall(vm, vcpuid, vme, &retu); + break; default: retu = true; /* handled in userland */ break; @@ -1855,6 +2021,7 @@ /* Handle exceptions serially */ *retinfo = info2; } + return (1); } Index: sys/kern/subr_param.c =================================================================== --- sys/kern/subr_param.c +++ sys/kern/subr_param.c @@ -149,6 +149,7 @@ "hv", "vmware", "kvm", + "bhyve", NULL }; CTASSERT(nitems(vm_guest_sysctl_names) - 1 == VM_LAST); Index: sys/sys/systm.h =================================================================== --- sys/sys/systm.h +++ sys/sys/systm.h @@ -74,7 +74,7 @@ * Keep in sync with vm_guest_sysctl_names[]. */ enum VM_GUEST { VM_GUEST_NO = 0, VM_GUEST_VM, VM_GUEST_XEN, VM_GUEST_HV, - VM_GUEST_VMWARE, VM_GUEST_KVM, VM_LAST }; + VM_GUEST_VMWARE, VM_GUEST_KVM, VM_GUEST_BHYVE, VM_LAST }; #if defined(WITNESS) || defined(INVARIANT_SUPPORT) void kassert_panic(const char *fmt, ...) __printflike(1, 2); Index: sys/x86/x86/identcpu.c =================================================================== --- sys/x86/x86/identcpu.c +++ sys/x86/x86/identcpu.c @@ -1288,6 +1288,8 @@ vm_guest = VM_GUEST_HV; else if (strcmp(hv_vendor, "KVMKVMKVM") == 0) vm_guest = VM_GUEST_KVM; + else if (strcmp(hv_vendor, "bhyve bhyve ") == 0) + vm_guest = VM_GUEST_BHYVE; } return; }