Index: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c =================================================================== --- projects/bhyve_svm/sys/amd64/vmm/amd/svm.c (revision 271202) +++ projects/bhyve_svm/sys/amd64/vmm/amd/svm.c (revision 271203) @@ -1,1595 +1,1720 @@ /*- * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "vmm_lapic.h" #include "vmm_msr.h" #include "vmm_stat.h" #include "vmm_ktr.h" #include "vmm_ioport.h" #include "vatpic.h" #include "vlapic.h" #include "vlapic_priv.h" #include "x86.h" #include "vmcb.h" #include "svm.h" #include "svm_softc.h" #include "npt.h" /* * SVM CPUID function 0x8000_000A, edx bit decoding. */ #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ -#define AMD_CPUID_SVM_ASID_FLUSH BIT(6) /* Flush by ASID */ +#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ +#define VMCB_CACHE_DEFAULT \ + (VMCB_CACHE_ASID | VMCB_CACHE_IOPM | VMCB_CACHE_NP) + MALLOC_DEFINE(M_SVM, "svm", "svm"); MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); /* Per-CPU context area. */ extern struct pcpu __pcpu[]; static bool svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit); static int svm_msr_rw_ok(uint8_t *btmap, uint64_t msr); static int svm_msr_rd_ok(uint8_t *btmap, uint64_t msr); static int svm_msr_index(uint64_t msr, int *index, int *bit); static int svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc); -static uint32_t svm_feature; /* AMD SVM features. */ +static uint32_t svm_feature; /* AMD SVM features. */ -/* - * Starting guest ASID, 0 is reserved for host. - * Each guest will have its own unique ASID. - */ -static uint32_t guest_asid = 1; +/* Maximum ASIDs supported by the processor */ +static uint32_t nasid; -/* - * Max ASID processor can support. - * This limit the maximum number of virtual machines that can be created. - */ -static int max_asid; +/* Current ASID generation for each host cpu */ +static struct asid asid[MAXCPU]; /* * SVM host state saved area of size 4KB for each core. */ static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); /* * S/w saved host context. */ static struct svm_regctx host_ctx[MAXCPU]; static VMM_STAT_AMD(VCPU_EXITINTINFO, "Valid VMCB EXITINTINFO"); static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "VMM pending exception injected"); /* * Common function to enable or disabled SVM for a CPU. */ static int cpu_svm_enable_disable(boolean_t enable) { uint64_t efer_msr; efer_msr = rdmsr(MSR_EFER); if (enable) efer_msr |= EFER_SVM; else efer_msr &= ~EFER_SVM; wrmsr(MSR_EFER, efer_msr); return(0); } /* * Disable SVM on a CPU. */ static void svm_disable(void *arg __unused) { (void)cpu_svm_enable_disable(FALSE); } /* * Disable SVM for all CPUs. */ static int svm_cleanup(void) { smp_rendezvous(NULL, svm_disable, NULL, NULL); return (0); } /* * Check for required BHyVe SVM features in a CPU. */ static int svm_cpuid_features(void) { u_int regs[4]; /* CPUID Fn8000_000A is for SVM */ do_cpuid(0x8000000A, regs); svm_feature = regs[3]; printf("SVM rev: 0x%x NASID:0x%x\n", regs[0] & 0xFF, regs[1]); - max_asid = regs[1]; - + nasid = regs[1]; + KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); + printf("SVM Features:0x%b\n", svm_feature, "\020" "\001NP" /* Nested paging */ "\002LbrVirt" /* LBR virtualization */ "\003SVML" /* SVM lock */ "\004NRIPS" /* NRIP save */ "\005TscRateMsr" /* MSR based TSC rate control */ "\006VmcbClean" /* VMCB clean bits */ "\007FlushByAsid" /* Flush by ASID */ "\010DecodeAssist" /* Decode assist */ "\011" "\012" "\013PauseFilter" "\014" "\015PauseFilterThreshold" "\016AVIC" ); /* SVM Lock */ if (!(svm_feature & AMD_CPUID_SVM_SVML)) { printf("SVM is disabled by BIOS, please enable in BIOS.\n"); return (ENXIO); } /* * bhyve need RVI to work. */ if (!(svm_feature & AMD_CPUID_SVM_NP)) { printf("Missing Nested paging or RVI SVM support in processor.\n"); return (EIO); } if (svm_feature & AMD_CPUID_SVM_NRIP_SAVE) return (0); return (EIO); } +static __inline int +flush_by_asid(void) +{ + return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); +} + /* * Enable SVM for a CPU. */ static void svm_enable(void *arg __unused) { uint64_t hsave_pa; (void)cpu_svm_enable_disable(TRUE); hsave_pa = vtophys(hsave[curcpu]); wrmsr(MSR_VM_HSAVE_PA, hsave_pa); if (rdmsr(MSR_VM_HSAVE_PA) != hsave_pa) { panic("VM_HSAVE_PA is wrong on CPU%d\n", curcpu); } } /* * Check if a processor support SVM. */ static int is_svm_enabled(void) { uint64_t msr; /* Section 15.4 Enabling SVM from APM2. */ if ((amd_feature2 & AMDID2_SVM) == 0) { printf("SVM is not supported on this processor.\n"); return (ENXIO); } msr = rdmsr(MSR_VM_CR); /* Make sure SVM is not disabled by BIOS. */ if ((msr & VM_CR_SVMDIS) == 0) { return svm_cpuid_features(); } printf("SVM disabled by Key, consult TPM/BIOS manual.\n"); return (ENXIO); } /* * Enable SVM on CPU and initialize nested page table h/w. */ static int svm_init(int ipinum) { - int err; + int err, cpu; err = is_svm_enabled(); if (err) return (err); + for (cpu = 0; cpu < MAXCPU; cpu++) { + /* + * Initialize the host ASIDs to their "highest" valid values. + * + * The next ASID allocation will rollover both 'gen' and 'num' + * and start off the sequence at {1,1}. + */ + asid[cpu].gen = ~0UL; + asid[cpu].num = nasid - 1; + } svm_npt_init(ipinum); /* Start SVM on all CPUs */ smp_rendezvous(NULL, svm_enable, NULL, NULL); - + return (0); } static void svm_restore(void) { svm_enable(NULL); } /* * Get index and bit position for a MSR in MSR permission * bitmap. Two bits are used for each MSR, lower bit is * for read and higher bit is for write. */ static int svm_msr_index(uint64_t msr, int *index, int *bit) { uint32_t base, off; /* Pentium compatible MSRs */ #define MSR_PENTIUM_START 0 #define MSR_PENTIUM_END 0x1FFF /* AMD 6th generation and Intel compatible MSRs */ #define MSR_AMD6TH_START 0xC0000000UL #define MSR_AMD6TH_END 0xC0001FFFUL /* AMD 7th and 8th generation compatible MSRs */ #define MSR_AMD7TH_START 0xC0010000UL #define MSR_AMD7TH_END 0xC0011FFFUL *index = -1; *bit = (msr % 4) * 2; base = 0; if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { *index = msr / 4; return (0); } base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { off = (msr - MSR_AMD6TH_START); *index = (off + base) / 4; return (0); } base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { off = (msr - MSR_AMD7TH_START); *index = (off + base) / 4; return (0); } return (EIO); } /* * Give virtual cpu the complete access to MSR(read & write). */ static int svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) { int index, bit, err; err = svm_msr_index(msr, &index, &bit); if (err) { ERR("MSR 0x%lx is not writeable by guest.\n", msr); return (err); } if (index < 0 || index > (SVM_MSR_BITMAP_SIZE)) { ERR("MSR 0x%lx index out of range(%d).\n", msr, index); return (EINVAL); } if (bit < 0 || bit > 8) { ERR("MSR 0x%lx bit out of range(%d).\n", msr, bit); return (EINVAL); } /* Disable intercept for read and write. */ if (read) perm_bitmap[index] &= ~(1UL << bit); if (write) perm_bitmap[index] &= ~(2UL << bit); CTR2(KTR_VMM, "Guest has control:0x%x on SVM:MSR(0x%lx).\n", (perm_bitmap[index] >> bit) & 0x3, msr); return (0); } static int svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) { return svm_msr_perm(perm_bitmap, msr, true, true); } static int svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) { return svm_msr_perm(perm_bitmap, msr, true, false); } +static __inline void +vcpu_set_dirty(struct svm_vcpu *vcpustate, uint32_t dirtybits) +{ + vcpustate->dirty |= dirtybits; +} + /* * Initialise a virtual machine. */ static void * svm_vminit(struct vm *vm, pmap_t pmap) { struct svm_softc *svm_sc; struct svm_vcpu *vcpu; vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; - int i, error; + int i; - if (guest_asid >= max_asid) { - ERR("Host support max ASID:%d, can't create more guests.\n", - max_asid); - return (NULL); - } - svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc), M_SVM, M_WAITOK | M_ZERO); svm_sc->vm = vm; svm_sc->svm_feature = svm_feature; svm_sc->vcpu_cnt = VM_MAXCPU; svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); + /* - * Each guest has its own unique ASID. - * ASID(Address Space Identifier) is used by TLB entry. - */ - svm_sc->asid = guest_asid++; - - /* * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc. */ memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); /* * Following MSR can be completely controlled by virtual machines * since access to following are translated to access to VMCB. */ svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); /* For Nested Paging/RVI only. */ svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); /* Intercept access to all I/O ports. */ memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); /* Cache physical address for multiple vcpus. */ iopm_pa = vtophys(svm_sc->iopm_bitmap); msrpm_pa = vtophys(svm_sc->msr_bitmap); pml4_pa = svm_sc->nptp; for (i = 0; i < svm_sc->vcpu_cnt; i++) { vcpu = svm_get_vcpu(svm_sc, i); vcpu->lastcpu = NOCPU; vcpu->vmcb_pa = vtophys(&vcpu->vmcb); - error = svm_init_vmcb(&vcpu->vmcb, iopm_pa, msrpm_pa, pml4_pa, - svm_sc->asid); - if (error) - goto cleanup; + svm_init_vmcb(&vcpu->vmcb, iopm_pa, msrpm_pa, pml4_pa); } - return (svm_sc); - -cleanup: - free(svm_sc, M_SVM); - return (NULL); } static int svm_cpl(struct vmcb_state *state) { /* * From APMv2: * "Retrieve the CPL from the CPL field in the VMCB, not * from any segment DPL" */ return (state->cpl); } static enum vm_cpu_mode svm_vcpu_mode(struct vmcb *vmcb) { struct vmcb_segment *seg; struct vmcb_state *state; state = &vmcb->state; if (state->efer & EFER_LMA) { seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); /* * Section 4.8.1 for APM2, check if Code Segment has * Long attribute set in descriptor. */ if (seg->attrib & VMCB_CS_ATTRIB_L) return (CPU_MODE_64BIT); else return (CPU_MODE_COMPATIBILITY); } else if (state->cr0 & CR0_PE) { return (CPU_MODE_PROTECTED); } else { return (CPU_MODE_REAL); } } static enum vm_paging_mode svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) { if ((cr0 & CR0_PG) == 0) return (PAGING_MODE_FLAT); if ((cr4 & CR4_PAE) == 0) return (PAGING_MODE_32); if (efer & EFER_LME) return (PAGING_MODE_64); else return (PAGING_MODE_PAE); } /* * ins/outs utility routines */ static uint64_t svm_inout_str_index(struct svm_regctx *regs, int in) { uint64_t val; val = in ? regs->e.g.sctx_rdi : regs->e.g.sctx_rsi; return (val); } static uint64_t svm_inout_str_count(struct svm_regctx *regs, int rep) { uint64_t val; val = rep ? regs->sctx_rcx : 1; return (val); } static void svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, int in, struct vm_inout_str *vis) { int error, s; if (in) { vis->seg_name = VM_REG_GUEST_ES; } else { /* The segment field has standard encoding */ s = (info1 >> 10) & 0x7; vis->seg_name = vm_segment_name(s); } error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); } static int svm_inout_str_addrsize(uint64_t info1) { uint32_t size; size = (info1 >> 7) & 0x7; switch (size) { case 1: return (2); /* 16 bit */ case 2: return (4); /* 32 bit */ case 4: return (8); /* 64 bit */ default: panic("%s: invalid size encoding %d", __func__, size); } } static void svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) { struct vmcb_state *state; state = &vmcb->state; paging->cr3 = state->cr3; paging->cpl = svm_cpl(state); paging->cpu_mode = svm_vcpu_mode(vmcb); paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, state->efer); } /* * Handle guest I/O intercept. */ static bool svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) { struct vmcb_ctrl *ctrl; struct vmcb_state *state; struct svm_regctx *regs; struct vm_inout_str *vis; uint64_t info1; state = svm_get_vmcb_state(svm_sc, vcpu); ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); regs = svm_get_guest_regctx(svm_sc, vcpu); info1 = ctrl->exitinfo1; vmexit->exitcode = VM_EXITCODE_INOUT; vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; vmexit->u.inout.string = (info1 & BIT(2)) ? 1 : 0; vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; vmexit->u.inout.bytes = (info1 >> 4) & 0x7; vmexit->u.inout.port = (uint16_t)(info1 >> 16); vmexit->u.inout.eax = (uint32_t)(state->rax); if (vmexit->u.inout.string) { vmexit->exitcode = VM_EXITCODE_INOUT_STR; vis = &vmexit->u.inout_str; svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); vis->rflags = state->rflags; vis->cr0 = state->cr0; vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); vis->addrsize = svm_inout_str_addrsize(info1); svm_inout_str_seginfo(svm_sc, vcpu, info1, vmexit->u.inout.in, vis); } return (false); } static int svm_npf_paging(uint64_t exitinfo1) { if (exitinfo1 & VMCB_NPF_INFO1_W) return (VM_PROT_WRITE); return (VM_PROT_READ); } static bool svm_npf_emul_fault(uint64_t exitinfo1) { if (exitinfo1 & VMCB_NPF_INFO1_ID) { return (false); } if (exitinfo1 & VMCB_NPF_INFO1_GPT) { return (false); } if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { return (false); } return (true); } static void svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) { struct vm_guest_paging *paging; struct vmcb_segment *seg; paging = &vmexit->u.inst_emul.paging; vmexit->exitcode = VM_EXITCODE_INST_EMUL; vmexit->u.inst_emul.gpa = gpa; vmexit->u.inst_emul.gla = VIE_INVALID_GLA; svm_paging_info(vmcb, paging); /* * If DecodeAssist SVM feature doesn't exist, we don't have NPF * instuction length. RIP will be calculated based on the length * determined by instruction emulation. */ vmexit->inst_length = VIE_INST_SIZE; seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); switch(paging->cpu_mode) { case CPU_MODE_PROTECTED: case CPU_MODE_COMPATIBILITY: /* * Section 4.8.1 of APM2, Default Operand Size or D bit. */ vmexit->u.inst_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ? 1 : 0; break; default: vmexit->u.inst_emul.cs_d = 0; break; } } /* * Special handling of EFER MSR. * SVM guest must have SVM EFER bit set, prohibit guest from cleareing SVM * enable bit in EFER. */ static void svm_efer(struct svm_softc *svm_sc, int vcpu, boolean_t write) { struct svm_regctx *swctx; struct vmcb_state *state; state = svm_get_vmcb_state(svm_sc, vcpu); swctx = svm_get_guest_regctx(svm_sc, vcpu); if (write) { state->efer = ((swctx->e.g.sctx_rdx & (uint32_t)~0) << 32) | ((uint32_t)state->rax) | EFER_SVM; } else { state->rax = (uint32_t)state->efer; swctx->e.g.sctx_rdx = (uint32_t)(state->efer >> 32); } } static void svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) { struct vmcb_ctrl *ctrl; uint64_t intinfo; ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); intinfo = ctrl->exitintinfo; if (!VMCB_EXITINTINFO_VALID(intinfo)) return; /* * From APMv2, Section "Intercepts during IDT interrupt delivery" * * If a #VMEXIT happened during event delivery then record the event * that was being delivered. */ VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); } /* * Determine the cause of virtual cpu exit and handle VMEXIT. * Return: false - Break vcpu execution loop and handle vmexit * in kernel or user space. * true - Continue vcpu run. */ static bool svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) { struct vmcb_state *state; struct vmcb_ctrl *ctrl; struct svm_regctx *ctx; uint64_t code, info1, info2, val; uint32_t eax, ecx, edx; bool update_rip, loop, retu; KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); state = svm_get_vmcb_state(svm_sc, vcpu); ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); ctx = svm_get_guest_regctx(svm_sc, vcpu); code = ctrl->exitcode; info1 = ctrl->exitinfo1; info2 = ctrl->exitinfo2; update_rip = true; loop = true; vmexit->exitcode = VM_EXITCODE_VMX; vmexit->u.vmx.status = 0; KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " "injection valid bit is set %#lx", __func__, ctrl->eventinj)); svm_save_intinfo(svm_sc, vcpu); switch (code) { case VMCB_EXIT_MC: /* Machine Check. */ vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MTRAP, 1); vmexit->exitcode = VM_EXITCODE_MTRAP; loop = false; break; case VMCB_EXIT_MSR: /* MSR access. */ eax = state->rax; ecx = ctx->sctx_rcx; edx = ctx->e.g.sctx_rdx; if (ecx == MSR_EFER) { VCPU_CTR0(svm_sc->vm, vcpu,"VMEXIT EFER\n"); svm_efer(svm_sc, vcpu, info1); break; } retu = false; if (info1) { /* VM exited because of write MSR */ vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); vmexit->exitcode = VM_EXITCODE_WRMSR; vmexit->u.msr.code = ecx; val = (uint64_t)edx << 32 | eax; if (emulate_wrmsr(svm_sc->vm, vcpu, ecx, val, &retu)) { vmexit->u.msr.wval = val; loop = false; } else loop = retu ? false : true; VCPU_CTR3(svm_sc->vm, vcpu, "VMEXIT WRMSR(%s handling) 0x%lx @0x%x", loop ? "kernel" : "user", val, ecx); } else { vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); vmexit->exitcode = VM_EXITCODE_RDMSR; vmexit->u.msr.code = ecx; if (emulate_rdmsr(svm_sc->vm, vcpu, ecx, &retu)) { loop = false; } else loop = retu ? false : true; VCPU_CTR3(svm_sc->vm, vcpu, "SVM:VMEXIT RDMSR" " MSB=0x%08x, LSB=%08x @0x%x", ctx->e.g.sctx_rdx, state->rax, ecx); } #define MSR_AMDK8_IPM 0xc0010055 /* * We can't hide AMD C1E idle capability since its * based on CPU generation, for now ignore access to * this MSR by vcpus * XXX: special handling of AMD C1E - Ignore. */ if (ecx == MSR_AMDK8_IPM) loop = true; break; case VMCB_EXIT_INTR: /* * Exit on External Interrupt. * Give host interrupt handler to run and if its guest * interrupt, local APIC will inject event in guest. */ update_rip = false; VCPU_CTR1(svm_sc->vm, vcpu, "SVM:VMEXIT ExtInt" " RIP:0x%lx.\n", state->rip); vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); break; case VMCB_EXIT_IO: loop = svm_handle_io(svm_sc, vcpu, vmexit); vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); break; case VMCB_EXIT_CPUID: vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); (void)x86_emulate_cpuid(svm_sc->vm, vcpu, (uint32_t *)&state->rax, (uint32_t *)&ctx->sctx_rbx, (uint32_t *)&ctx->sctx_rcx, (uint32_t *)&ctx->e.g.sctx_rdx); VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT CPUID\n"); break; case VMCB_EXIT_HLT: vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); if (ctrl->v_irq) { /* Interrupt is pending, can't halt guest. */ vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT_IGNORED, 1); VCPU_CTR0(svm_sc->vm, vcpu, "VMEXIT halt ignored."); } else { VCPU_CTR0(svm_sc->vm, vcpu, "VMEXIT halted CPU."); vmexit->exitcode = VM_EXITCODE_HLT; vmexit->u.hlt.rflags = state->rflags; loop = false; } break; case VMCB_EXIT_PAUSE: VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT pause"); vmexit->exitcode = VM_EXITCODE_PAUSE; vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); break; case VMCB_EXIT_NPF: loop = false; update_rip = false; if (info1 & VMCB_NPF_INFO1_RSV) { VCPU_CTR2(svm_sc->vm, vcpu, "SVM_ERR:NPT" " reserved bit is set," "INFO1:0x%lx INFO2:0x%lx .\n", info1, info2); break; } /* EXITINFO2 has the physical fault address (GPA). */ if(vm_mem_allocated(svm_sc->vm, info2)) { VCPU_CTR3(svm_sc->vm, vcpu, "SVM:NPF-paging," "RIP:0x%lx INFO1:0x%lx INFO2:0x%lx .\n", state->rip, info1, info2); vmexit->exitcode = VM_EXITCODE_PAGING; vmexit->u.paging.gpa = info2; vmexit->u.paging.fault_type = svm_npf_paging(info1); vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); } else if (svm_npf_emul_fault(info1)) { VCPU_CTR3(svm_sc->vm, vcpu, "SVM:NPF inst_emul," "RIP:0x%lx INFO1:0x%lx INFO2:0x%lx .\n", state->rip, info1, info2); svm_handle_inst_emul(svm_get_vmcb(svm_sc, vcpu), info2, vmexit); vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); } break; case VMCB_EXIT_SHUTDOWN: VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT shutdown."); loop = false; break; case VMCB_EXIT_INVALID: VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT INVALID."); loop = false; break; default: /* Return to user space. */ loop = false; update_rip = false; VCPU_CTR3(svm_sc->vm, vcpu, "VMEXIT=0x%lx" " EXITINFO1: 0x%lx EXITINFO2:0x%lx\n", ctrl->exitcode, info1, info2); VCPU_CTR3(svm_sc->vm, vcpu, "SVM:RIP: 0x%lx nRIP:0x%lx" " Inst decoder len:%d\n", state->rip, ctrl->nrip, ctrl->inst_decode_size); vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); break; } vmexit->rip = state->rip; if (update_rip) { if (ctrl->nrip == 0) { VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:nRIP is not set " "for RIP0x%lx.\n", state->rip); vmexit->exitcode = VM_EXITCODE_VMX; } else vmexit->rip = ctrl->nrip; } /* If vcpu execution is continued, update RIP. */ if (loop) { state->rip = vmexit->rip; } if (state->rip == 0) { VCPU_CTR0(svm_sc->vm, vcpu, "SVM_ERR:RIP is NULL\n"); vmexit->exitcode = VM_EXITCODE_VMX; } return (loop); } /* * Inject NMI to virtual cpu. */ static int svm_inject_nmi(struct svm_softc *svm_sc, int vcpu) { struct vmcb_ctrl *ctrl; KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); /* Can't inject another NMI if last one is pending.*/ if (!vm_nmi_pending(svm_sc->vm, vcpu)) return (0); /* Inject NMI, vector number is not used.*/ vmcb_eventinject(ctrl, VMCB_EVENTINJ_TYPE_NMI, IDT_NMI, 0, false); /* Acknowledge the request is accepted.*/ vm_nmi_clear(svm_sc->vm, vcpu); VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Injected NMI.\n"); return (1); } static void svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) { struct vmcb_ctrl *ctrl; uint64_t intinfo; ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) return; KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " "valid: %#lx", __func__, intinfo)); vmcb_eventinject(ctrl, VMCB_EXITINTINFO_TYPE(intinfo), VMCB_EXITINTINFO_VECTOR(intinfo), VMCB_EXITINTINFO_EC(intinfo), VMCB_EXITINTINFO_EC_VALID(intinfo)); vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); } /* * Inject event to virtual cpu. */ static void svm_inj_interrupts(struct svm_softc *svm_sc, int vcpu, struct vlapic *vlapic) { struct vmcb_ctrl *ctrl; struct vmcb_state *state; int extint_pending; int vector; KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); state = svm_get_vmcb_state(svm_sc, vcpu); ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); svm_inj_intinfo(svm_sc, vcpu); /* Can't inject multiple events at once. */ if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Last event(0x%lx) is pending.\n", ctrl->eventinj); return ; } /* Wait for guest to come out of interrupt shadow. */ if (ctrl->intr_shadow) { VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Guest in interrupt shadow.\n"); return; } /* NMI event has priority over interrupts.*/ if (svm_inject_nmi(svm_sc, vcpu)) { return; } extint_pending = vm_extint_pending(svm_sc->vm, vcpu); if (!extint_pending) { /* Ask the local apic for a vector to inject */ if (!vlapic_pending_intr(vlapic, &vector)) return; } else { /* Ask the legacy pic for a vector to inject */ vatpic_pending_intr(svm_sc->vm, &vector); } if (vector < 32 || vector > 255) { VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:Event injection" "invalid vector=%d.\n", vector); ERR("SVM_ERR:Event injection invalid vector=%d.\n", vector); return; } if ((state->rflags & PSL_I) == 0) { VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Interrupt is disabled\n"); return; } vmcb_eventinject(ctrl, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); if (!extint_pending) { /* Update the Local APIC ISR */ vlapic_intr_accepted(vlapic, vector); } else { vm_extint_clear(svm_sc->vm, vcpu); vatpic_intr_accepted(svm_sc->vm, vector); /* * XXX need to recheck exting_pending ala VT-x */ } VCPU_CTR1(svm_sc->vm, vcpu, "SVM:event injected,vector=%d.\n", vector); } -static void +static __inline void restore_host_tss(void) { struct system_segment_descriptor *tss_sd; /* * The TSS descriptor was in use prior to launching the guest so it * has been marked busy. * * 'ltr' requires the descriptor to be marked available so change the * type to "64-bit available TSS". */ tss_sd = PCPU_GET(tss); tss_sd->sd_type = SDT_SYSTSS; ltr(GSEL(GPROC0_SEL, SEL_KPL)); } +static void +check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) +{ + struct svm_vcpu *vcpustate; + struct vmcb_ctrl *ctrl; + long eptgen; + bool alloc_asid; + + KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " + "active on cpu %u", __func__, thiscpu)); + + vcpustate = svm_get_vcpu(sc, vcpuid); + ctrl = svm_get_vmcb_ctrl(sc, vcpuid); + + /* + * The TLB entries associated with the vcpu's ASID are not valid + * if either of the following conditions is true: + * + * 1. The vcpu's ASID generation is different than the host cpu's + * ASID generation. This happens when the vcpu migrates to a new + * host cpu. It can also happen when the number of vcpus executing + * on a host cpu is greater than the number of ASIDs available. + * + * 2. The pmap generation number is different than the value cached in + * the 'vcpustate'. This happens when the host invalidates pages + * belonging to the guest. + * + * asidgen eptgen Action + * mismatch mismatch + * 0 0 (a) + * 0 1 (b1) or (b2) + * 1 0 (c) + * 1 1 (d) + * + * (a) There is no mismatch in eptgen or ASID generation and therefore + * no further action is needed. + * + * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is + * retained and the TLB entries associated with this ASID + * are flushed by VMRUN. + * + * (b2) If the cpu does not support FlushByAsid then a new ASID is + * allocated. + * + * (c) A new ASID is allocated. + * + * (d) A new ASID is allocated. + */ + + alloc_asid = false; + eptgen = pmap->pm_eptgen; + ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; + + if (vcpustate->asid.gen != asid[thiscpu].gen) { + alloc_asid = true; /* (c) and (d) */ + } else if (vcpustate->eptgen != eptgen) { + if (flush_by_asid()) + ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ + else + alloc_asid = true; /* (b2) */ + } else { + /* + * This is the common case (a). + */ + KASSERT(!alloc_asid, ("ASID allocation not necessary")); + KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, + ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); + } + + if (alloc_asid) { + if (++asid[thiscpu].num >= nasid) { + asid[thiscpu].num = 1; + if (++asid[thiscpu].gen == 0) + asid[thiscpu].gen = 1; + /* + * If this cpu does not support "flush-by-asid" + * then flush the entire TLB on a generation + * bump. Subsequent ASID allocation in this + * generation can be done without a TLB flush. + */ + if (!flush_by_asid()) + ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; + } + vcpustate->asid.gen = asid[thiscpu].gen; + vcpustate->asid.num = asid[thiscpu].num; + + ctrl->asid = vcpustate->asid.num; + vcpu_set_dirty(vcpustate, VMCB_CACHE_ASID); + /* + * If this cpu supports "flush-by-asid" then the TLB + * was not flushed after the generation bump. The TLB + * is flushed selectively after every new ASID allocation. + */ + if (flush_by_asid()) + ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; + } + vcpustate->eptgen = eptgen; + + KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); + KASSERT(ctrl->asid == vcpustate->asid.num, + ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); +} + /* * Start vcpu with specified RIP. */ static int svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, void *rend_cookie, void *suspended_cookie) { struct svm_regctx *hctx, *gctx; struct svm_softc *svm_sc; struct svm_vcpu *vcpustate; struct vmcb_state *state; struct vmcb_ctrl *ctrl; struct vm_exit *vmexit; struct vlapic *vlapic; struct vm *vm; uint64_t vmcb_pa; + u_int thiscpu; bool loop; /* Continue vcpu execution loop. */ loop = true; svm_sc = arg; vm = svm_sc->vm; vcpustate = svm_get_vcpu(svm_sc, vcpu); state = svm_get_vmcb_state(svm_sc, vcpu); ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); vmexit = vm_exitinfo(vm, vcpu); vlapic = vm_lapic(vm, vcpu); + /* + * Stash 'curcpu' on the stack as 'thiscpu'. + * + * The per-cpu data area is not accessible until MSR_GSBASE is restored + * after the #VMEXIT. Since VMRUN is executed inside a critical section + * 'curcpu' and 'thiscpu' are guaranteed to identical. + */ + thiscpu = curcpu; + gctx = svm_get_guest_regctx(svm_sc, vcpu); - hctx = &host_ctx[curcpu]; + hctx = &host_ctx[thiscpu]; vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; - if (vcpustate->lastcpu != curcpu) { - /* Virtual CPU is running on a diiferent CPU now.*/ - vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); - + if (vcpustate->lastcpu != thiscpu) { /* - * Flush all TLB mappings for this guest on this CPU, - * it might have stale entries since vcpu has migrated - * or vmm is restarted. + * Force new ASID allocation by invalidating the generation. */ - ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; + vcpustate->asid.gen = 0; - /* Can't use any cached VMCB state by cpu.*/ - ctrl->vmcb_clean = VMCB_CACHE_NONE; - } else { /* - * XXX: Using same ASID for all vcpus of a VM will cause TLB - * corruption. This can easily be produced by muxing two vcpus - * on same core. - * For now, flush guest TLB for every vmrun. + * Invalidate the VMCB state cache by marking all fields dirty. */ - ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; - - /* - * This is the same cpu on which vcpu last ran so don't - * need to reload all VMCB state. - * ASID is unique for a guest. - * IOPM is unchanged. - * RVI/EPT is unchanged. + vcpu_set_dirty(vcpustate, 0xffffffff); + + /* + * XXX + * Setting 'vcpustate->lastcpu' here is bit premature because + * we may return from this function without actually executing + * the VMRUN instruction. This could happen if a rendezvous + * or an AST is pending on the first time through the loop. * + * This works for now but any new side-effects of vcpu + * migration should take this case into account. */ - ctrl->vmcb_clean = VMCB_CACHE_ASID | - VMCB_CACHE_IOPM | - VMCB_CACHE_NP; + vcpustate->lastcpu = thiscpu; + vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); } - vcpustate->lastcpu = curcpu; VCPU_CTR3(vm, vcpu, "SVM:Enter vmrun RIP:0x%lx" " inst len=%d/%d\n", rip, vmexit->inst_length, vmexit->u.inst_emul.vie.num_valid); /* Update Guest RIP */ state->rip = rip; - + do { vmexit->inst_length = 0; /* * Disable global interrupts to guarantee atomicity during * loading of guest state. This includes not only the state * loaded by the "vmrun" instruction but also software state * maintained by the hypervisor: suspended and rendezvous * state, NPT generation number, vlapic interrupts etc. */ disable_gintr(); if (vcpu_suspended(suspended_cookie)) { enable_gintr(); vm_exit_suspended(vm, vcpu, state->rip); break; } if (vcpu_rendezvous_pending(rend_cookie)) { enable_gintr(); vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; vmm_stat_incr(vm, vcpu, VMEXIT_RENDEZVOUS, 1); VCPU_CTR1(vm, vcpu, "SVM: VCPU rendezvous, RIP:0x%lx\n", state->rip); vmexit->rip = state->rip; break; } /* We are asked to give the cpu by scheduler. */ if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { enable_gintr(); vmexit->exitcode = VM_EXITCODE_BOGUS; vmm_stat_incr(vm, vcpu, VMEXIT_ASTPENDING, 1); VCPU_CTR1(vm, vcpu, "SVM: ASTPENDING, RIP:0x%lx\n", state->rip); vmexit->rip = state->rip; break; } svm_inj_interrupts(svm_sc, vcpu, vlapic); + /* Activate the nested pmap on 'thiscpu' */ + CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); + + /* + * Check the pmap generation and the ASID generation to + * ensure that the vcpu does not use stale TLB mappings. + */ + check_asid(svm_sc, vcpu, pmap, thiscpu); + + ctrl->vmcb_clean = VMCB_CACHE_DEFAULT & ~vcpustate->dirty; + vcpustate->dirty = 0; + /* Launch Virtual Machine. */ svm_launch(vmcb_pa, gctx, hctx); + CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); + /* * Restore MSR_GSBASE to point to the pcpu data area. * * Note that accesses done via PCPU_GET/PCPU_SET will work * only after MSR_GSBASE is restored. * * Also note that we don't bother restoring MSR_KGSBASE * since it is not used in the kernel and will be restored * when the VMRUN ioctl returns to userspace. */ - wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[vcpustate->lastcpu]); + wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]); + KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch", + thiscpu, curcpu)); /* * The host GDTR and IDTR is saved by VMRUN and restored * automatically on #VMEXIT. However, the host TSS needs * to be restored explicitly. */ restore_host_tss(); /* #VMEXIT disables interrupts so re-enable them here. */ enable_gintr(); /* Handle #VMEXIT and if required return to user space. */ loop = svm_vmexit(svm_sc, vcpu, vmexit); vcpustate->loop++; vmm_stat_incr(vm, vcpu, VMEXIT_COUNT, 1); } while (loop); return (0); } /* * Cleanup for virtual machine. */ static void svm_vmcleanup(void *arg) { struct svm_softc *svm_sc; svm_sc = arg; VCPU_CTR0(svm_sc->vm, 0, "SVM:cleanup\n"); free(svm_sc, M_SVM); } /* * Return pointer to hypervisor saved register state. */ static register_t * swctx_regptr(struct svm_regctx *regctx, int reg) { switch (reg) { case VM_REG_GUEST_RBX: return (®ctx->sctx_rbx); case VM_REG_GUEST_RCX: return (®ctx->sctx_rcx); case VM_REG_GUEST_RDX: return (®ctx->e.g.sctx_rdx); case VM_REG_GUEST_RDI: return (®ctx->e.g.sctx_rdi); case VM_REG_GUEST_RSI: return (®ctx->e.g.sctx_rsi); case VM_REG_GUEST_RBP: return (®ctx->sctx_rbp); case VM_REG_GUEST_R8: return (®ctx->sctx_r8); case VM_REG_GUEST_R9: return (®ctx->sctx_r9); case VM_REG_GUEST_R10: return (®ctx->sctx_r10); case VM_REG_GUEST_R11: return (®ctx->sctx_r11); case VM_REG_GUEST_R12: return (®ctx->sctx_r12); case VM_REG_GUEST_R13: return (®ctx->sctx_r13); case VM_REG_GUEST_R14: return (®ctx->sctx_r14); case VM_REG_GUEST_R15: return (®ctx->sctx_r15); default: ERR("Unknown register requested, reg=%d.\n", reg); break; } return (NULL); } /* * Interface to read guest registers. * This can be SVM h/w saved or hypervisor saved register. */ static int svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) { struct svm_softc *svm_sc; struct vmcb *vmcb; register_t *reg; svm_sc = arg; KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); vmcb = svm_get_vmcb(svm_sc, vcpu); if (vmcb_read(vmcb, ident, val) == 0) { return (0); } reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); if (reg != NULL) { *val = *reg; return (0); } ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); return (EINVAL); } /* * Interface to write to guest registers. * This can be SVM h/w saved or hypervisor saved register. */ static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val) { struct svm_softc *svm_sc; struct vmcb *vmcb; register_t *reg; svm_sc = arg; KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); vmcb = svm_get_vmcb(svm_sc, vcpu); if (vmcb_write(vmcb, ident, val) == 0) { return (0); } reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); - + if (reg != NULL) { *reg = val; return (0); } + + /* + * XXX deal with CR3 and invalidate TLB entries tagged with the + * vcpu's ASID. This needs to be treated differently depending on + * whether 'running' is true/false. + */ ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); return (EINVAL); } /* * Inteface to set various descriptors. */ static int svm_setdesc(void *arg, int vcpu, int type, struct seg_desc *desc) { struct svm_softc *svm_sc; struct vmcb *vmcb; struct vmcb_segment *seg; uint16_t attrib; svm_sc = arg; KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); vmcb = svm_get_vmcb(svm_sc, vcpu); VCPU_CTR1(svm_sc->vm, vcpu, "SVM:set_desc: Type%d\n", type); seg = vmcb_seg(vmcb, type); if (seg == NULL) { ERR("SVM_ERR:Unsupported segment type%d\n", type); return (EINVAL); } /* Map seg_desc access to VMCB attribute format.*/ attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF); VCPU_CTR3(svm_sc->vm, vcpu, "SVM:[sel %d attribute 0x%x limit:0x%x]\n", type, desc->access, desc->limit); seg->attrib = attrib; seg->base = desc->base; seg->limit = desc->limit; return (0); } /* * Interface to get guest descriptor. */ static int svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc) { struct svm_softc *svm_sc; struct vmcb_segment *seg; svm_sc = arg; KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_desc: Type%d\n", type); seg = vmcb_seg(svm_get_vmcb(svm_sc, vcpu), type); if (!seg) { ERR("SVM_ERR:Unsupported segment type%d\n", type); return (EINVAL); } /* Map seg_desc access to VMCB attribute format.*/ desc->access = ((seg->attrib & 0xF00) << 4) | (seg->attrib & 0xFF); desc->base = seg->base; desc->limit = seg->limit; /* * VT-x uses bit 16 (Unusable) to indicate a segment that has been * loaded with a NULL segment selector. The 'desc->access' field is * interpreted in the VT-x format by the processor-independent code. * * SVM uses the 'P' bit to convey the same information so convert it * into the VT-x format. For more details refer to section * "Segment State in the VMCB" in APMv2. */ if (type == VM_REG_GUEST_CS && type == VM_REG_GUEST_TR) desc->access |= 0x80; /* CS and TS always present */ if (!(desc->access & 0x80)) desc->access |= 0x10000; /* Unusable segment */ return (0); } static int svm_setcap(void *arg, int vcpu, int type, int val) { struct svm_softc *svm_sc; struct vmcb_ctrl *ctrl; int ret = ENOENT; svm_sc = arg; KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); switch (type) { case VM_CAP_HALT_EXIT: if (val) ctrl->ctrl1 |= VMCB_INTCPT_HLT; else ctrl->ctrl1 &= ~VMCB_INTCPT_HLT; ret = 0; VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:Halt exit %s.\n", val ? "enabled": "disabled"); break; case VM_CAP_PAUSE_EXIT: if (val) ctrl->ctrl1 |= VMCB_INTCPT_PAUSE; else ctrl->ctrl1 &= ~VMCB_INTCPT_PAUSE; ret = 0; VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:Pause exit %s.\n", val ? "enabled": "disabled"); break; case VM_CAP_MTRAP_EXIT: if (val) ctrl->exception |= BIT(IDT_MC); else ctrl->exception &= ~BIT(IDT_MC); ret = 0; VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:MC exit %s.\n", val ? "enabled": "disabled"); break; case VM_CAP_UNRESTRICTED_GUEST: /* SVM doesn't need special capability for SMP.*/ VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Set_gap:Unrestricted " "always enabled.\n"); ret = 0; break; default: break; } return (ret); } static int svm_getcap(void *arg, int vcpu, int type, int *retval) { struct svm_softc *svm_sc; struct vmcb_ctrl *ctrl; svm_sc = arg; KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); switch (type) { case VM_CAP_HALT_EXIT: *retval = (ctrl->ctrl1 & VMCB_INTCPT_HLT) ? 1 : 0; VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:Halt exit %s.\n", *retval ? "enabled": "disabled"); break; case VM_CAP_PAUSE_EXIT: *retval = (ctrl->ctrl1 & VMCB_INTCPT_PAUSE) ? 1 : 0; VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:Pause exit %s.\n", *retval ? "enabled": "disabled"); break; case VM_CAP_MTRAP_EXIT: *retval = (ctrl->exception & BIT(IDT_MC)) ? 1 : 0; VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:MC exit %s.\n", *retval ? "enabled": "disabled"); break; case VM_CAP_UNRESTRICTED_GUEST: VCPU_CTR0(svm_sc->vm, vcpu, "SVM:get_cap:Unrestricted.\n"); *retval = 1; break; default: break; } return (0); } static struct vlapic * svm_vlapic_init(void *arg, int vcpuid) { struct svm_softc *svm_sc; struct vlapic *vlapic; svm_sc = arg; vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); vlapic->vm = svm_sc->vm; vlapic->vcpuid = vcpuid; vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; vlapic_init(vlapic); return (vlapic); } static void svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) { vlapic_cleanup(vlapic); free(vlapic, M_SVM_VLAPIC); } struct vmm_ops vmm_ops_amd = { svm_init, svm_cleanup, svm_restore, svm_vminit, svm_vmrun, svm_vmcleanup, svm_getreg, svm_setreg, svm_getdesc, svm_setdesc, svm_getcap, svm_setcap, svm_npt_alloc, svm_npt_free, svm_vlapic_init, svm_vlapic_cleanup }; Index: projects/bhyve_svm/sys/amd64/vmm/amd/svm_softc.h =================================================================== --- projects/bhyve_svm/sys/amd64/vmm/amd/svm_softc.h (revision 271202) +++ projects/bhyve_svm/sys/amd64/vmm/amd/svm_softc.h (revision 271203) @@ -1,118 +1,125 @@ /*- * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SVM_SOFTC_H_ #define _SVM_SOFTC_H_ #define SVM_IO_BITMAP_SIZE (3 * PAGE_SIZE) #define SVM_MSR_BITMAP_SIZE (2 * PAGE_SIZE) +struct asid { + uint64_t gen; /* range is [1, ~0UL] */ + uint32_t num; /* range is [1, nasid - 1] */ +}; + /* * svm_vpcu contains SVM VMCB state and vcpu register state. */ struct svm_vcpu { - struct vmcb vmcb; /* hardware saved vcpu context */ - struct svm_regctx swctx; /* software saved vcpu context */ - uint64_t vmcb_pa; /* VMCB physical address */ - uint64_t loop; /* loop count for vcpu */ - int lastcpu; /* host cpu that the vcpu last ran on */ + struct vmcb vmcb; /* hardware saved vcpu context */ + struct svm_regctx swctx; /* software saved vcpu context */ + uint64_t vmcb_pa; /* VMCB physical address */ + uint64_t loop; /* loop count for vcpu */ + int lastcpu; /* host cpu that the vcpu last ran on */ + uint32_t dirty; /* state cache bits that must be cleared */ + long eptgen; /* pmap->pm_eptgen when the vcpu last ran */ + struct asid asid; } __aligned(PAGE_SIZE); /* * SVM softc, one per virtual machine. */ struct svm_softc { /* * IO permission map, VMCB.ctrl.iopm_base_pa should point to this. * If a bit is set, access to I/O port is intercepted. */ uint8_t iopm_bitmap[SVM_IO_BITMAP_SIZE]; /* * MSR permission bitmap, VMCB.ctrl.msrpm_base_pa should point to this. * Two bits are used for each MSR with the LSB used for read access * and the MSB used for write access. A value of '1' indicates that * the operation is intercepted. */ uint8_t msr_bitmap[SVM_MSR_BITMAP_SIZE]; uint8_t apic_page[VM_MAXCPU][PAGE_SIZE]; /* Nested Paging */ vm_offset_t nptp; /* Virtual machine pointer. */ struct vm *vm; /* Guest VCPU h/w and s/w context. */ struct svm_vcpu vcpu[VM_MAXCPU]; uint32_t svm_feature; /* SVM features from CPUID.*/ - int asid; /* Guest Address Space Identifier */ int vcpu_cnt; /* number of VCPUs for this guest.*/ } __aligned(PAGE_SIZE); CTASSERT((offsetof(struct svm_softc, nptp) & PAGE_MASK) == 0); static __inline struct svm_vcpu * svm_get_vcpu(struct svm_softc *sc, int vcpu) { return (&(sc->vcpu[vcpu])); } static __inline struct vmcb * svm_get_vmcb(struct svm_softc *sc, int vcpu) { return (&(sc->vcpu[vcpu].vmcb)); } static __inline struct vmcb_state * svm_get_vmcb_state(struct svm_softc *sc, int vcpu) { return (&(sc->vcpu[vcpu].vmcb.state)); } static __inline struct vmcb_ctrl * svm_get_vmcb_ctrl(struct svm_softc *sc, int vcpu) { return (&(sc->vcpu[vcpu].vmcb.ctrl)); } static __inline struct svm_regctx * svm_get_guest_regctx(struct svm_softc *sc, int vcpu) { return (&(sc->vcpu[vcpu].swctx)); } void svm_dump_vmcb(struct svm_softc *svm_sc, int vcpu); #endif /* _SVM_SOFTC_H_ */ Index: projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.c =================================================================== --- projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.c (revision 271202) +++ projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.c (revision 271203) @@ -1,386 +1,387 @@ /*- * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include "vmcb.h" #include "svm.h" /* * The VMCB aka Virtual Machine Control Block is a 4KB aligned page * in memory that describes the virtual machine. * * The VMCB contains: * - instructions or events in the guest to intercept * - control bits that modify execution environment of the guest * - guest processor state (e.g. general purpose registers) */ /* * Initialize SVM h/w context i.e. the VMCB control and saved state areas. */ -int +void svm_init_vmcb(struct vmcb *vmcb, uint64_t iopm_base_pa, uint64_t msrpm_base_pa, - uint64_t np_pml4, uint32_t asid) + uint64_t np_pml4) { struct vmcb_ctrl *ctrl; struct vmcb_state *state; uint16_t cr_shadow; ctrl = &vmcb->ctrl; state = &vmcb->state; ctrl->iopm_base_pa = iopm_base_pa; ctrl->msrpm_base_pa = msrpm_base_pa; /* Enable nested paging */ ctrl->np_enable = 1; ctrl->n_cr3 = np_pml4; /* * Intercept accesses to the control registers that are not shadowed * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. */ cr_shadow = BIT(0) | BIT(2) | BIT(3) | BIT(4) | BIT(8); ctrl->cr_write = ctrl->cr_read = ~cr_shadow; /* Intercept Machine Check exceptions. */ ctrl->exception = BIT(IDT_MC); /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ ctrl->ctrl1 = VMCB_INTCPT_IO | VMCB_INTCPT_MSR | VMCB_INTCPT_HLT | VMCB_INTCPT_CPUID | VMCB_INTCPT_INTR | VMCB_INTCPT_VINTR | VMCB_INTCPT_INIT | VMCB_INTCPT_NMI | VMCB_INTCPT_SMI | VMCB_INTCPT_FERR_FREEZE | VMCB_INTCPT_SHUTDOWN; /* * From section "Canonicalization and Consistency Checks" in APMv2 * the VMRUN intercept bit must be set to pass the consistency check. */ ctrl->ctrl2 = VMCB_INTCPT_VMRUN; - ctrl->asid = asid; + /* + * The ASID will be set to a non-zero value just before VMRUN. + */ + ctrl->asid = 0; /* * Section 15.21.1, Interrupt Masking in EFLAGS * Section 15.21.2, Virtualizing APIC.TPR * * This must be set for %rflag and %cr8 isolation of guest and host. */ ctrl->v_intr_masking = 1; /* Enable Last Branch Record aka LBR for debugging */ ctrl->lbr_virt_en = 1; state->dbgctl = BIT(0); /* EFER_SVM must always be set when the guest is executing */ state->efer = EFER_SVM; /* Set up the PAT to power-on state */ state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | PAT_VALUE(1, PAT_WRITE_THROUGH) | PAT_VALUE(2, PAT_UNCACHED) | PAT_VALUE(3, PAT_UNCACHEABLE) | PAT_VALUE(4, PAT_WRITE_BACK) | PAT_VALUE(5, PAT_WRITE_THROUGH) | PAT_VALUE(6, PAT_UNCACHED) | PAT_VALUE(7, PAT_UNCACHEABLE); - - return (0); } /* * Read from segment selector, control and general purpose register of VMCB. */ int vmcb_read(struct vmcb *vmcb, int ident, uint64_t *retval) { struct vmcb_state *state; struct vmcb_segment *seg; int err; state = &vmcb->state; err = 0; switch (ident) { case VM_REG_GUEST_CR0: *retval = state->cr0; break; case VM_REG_GUEST_CR2: *retval = state->cr2; break; case VM_REG_GUEST_CR3: *retval = state->cr3; break; case VM_REG_GUEST_CR4: *retval = state->cr4; break; case VM_REG_GUEST_DR7: *retval = state->dr7; break; case VM_REG_GUEST_EFER: *retval = state->efer; break; case VM_REG_GUEST_RAX: *retval = state->rax; break; case VM_REG_GUEST_RFLAGS: *retval = state->rflags; break; case VM_REG_GUEST_RIP: *retval = state->rip; break; case VM_REG_GUEST_RSP: *retval = state->rsp; break; case VM_REG_GUEST_CS: case VM_REG_GUEST_DS: case VM_REG_GUEST_ES: case VM_REG_GUEST_FS: case VM_REG_GUEST_GS: case VM_REG_GUEST_SS: case VM_REG_GUEST_GDTR: case VM_REG_GUEST_IDTR: case VM_REG_GUEST_LDTR: case VM_REG_GUEST_TR: seg = vmcb_seg(vmcb, ident); if (seg == NULL) { ERR("Invalid seg type %d\n", ident); err = EINVAL; break; } *retval = seg->selector; break; default: err = EINVAL; break; } return (err); } /* * Write to segment selector, control and general purpose register of VMCB. */ int vmcb_write(struct vmcb *vmcb, int ident, uint64_t val) { struct vmcb_state *state; struct vmcb_segment *seg; int err; state = &vmcb->state; err = 0; switch (ident) { case VM_REG_GUEST_CR0: state->cr0 = val; break; case VM_REG_GUEST_CR2: state->cr2 = val; break; case VM_REG_GUEST_CR3: state->cr3 = val; break; case VM_REG_GUEST_CR4: state->cr4 = val; break; case VM_REG_GUEST_DR7: state->dr7 = val; break; case VM_REG_GUEST_EFER: /* EFER_SVM must always be set when the guest is executing */ state->efer = val | EFER_SVM; break; case VM_REG_GUEST_RAX: state->rax = val; break; case VM_REG_GUEST_RFLAGS: state->rflags = val; break; case VM_REG_GUEST_RIP: state->rip = val; break; case VM_REG_GUEST_RSP: state->rsp = val; break; case VM_REG_GUEST_CS: case VM_REG_GUEST_DS: case VM_REG_GUEST_ES: case VM_REG_GUEST_FS: case VM_REG_GUEST_GS: case VM_REG_GUEST_SS: case VM_REG_GUEST_GDTR: case VM_REG_GUEST_IDTR: case VM_REG_GUEST_LDTR: case VM_REG_GUEST_TR: seg = vmcb_seg(vmcb, ident); if (seg == NULL) { ERR("Invalid segment type %d\n", ident); err = EINVAL; break; } seg->selector = val; break; default: err = EINVAL; } return (err); } /* * Return VMCB segment area. */ struct vmcb_segment * vmcb_seg(struct vmcb *vmcb, int type) { struct vmcb_state *state; struct vmcb_segment *seg; state = &vmcb->state; switch (type) { case VM_REG_GUEST_CS: seg = &state->cs; break; case VM_REG_GUEST_DS: seg = &state->ds; break; case VM_REG_GUEST_ES: seg = &state->es; break; case VM_REG_GUEST_FS: seg = &state->fs; break; case VM_REG_GUEST_GS: seg = &state->gs; break; case VM_REG_GUEST_SS: seg = &state->ss; break; case VM_REG_GUEST_GDTR: seg = &state->gdt; break; case VM_REG_GUEST_IDTR: seg = &state->idt; break; case VM_REG_GUEST_LDTR: seg = &state->ldt; break; case VM_REG_GUEST_TR: seg = &state->tr; break; default: seg = NULL; break; } return (seg); } /* * Inject an event to vcpu as described in section 15.20, "Event injection". */ void vmcb_eventinject(struct vmcb_ctrl *ctrl, int intr_type, int vector, uint32_t error, bool ec_valid) { KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event already pending %#lx", __func__, ctrl->eventinj)); KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", __func__, vector)); switch (intr_type) { case VMCB_EVENTINJ_TYPE_INTR: case VMCB_EVENTINJ_TYPE_NMI: case VMCB_EVENTINJ_TYPE_INTn: break; case VMCB_EVENTINJ_TYPE_EXCEPTION: if (vector >= 0 && vector <= 31 && vector != 2) break; /* FALLTHROUGH */ default: panic("%s: invalid intr_type/vector: %d/%d", __func__, intr_type, vector); } ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; if (ec_valid) { ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; ctrl->eventinj |= (uint64_t)error << 32; } } Index: projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.h =================================================================== --- projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.h (revision 271202) +++ projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.h (revision 271203) @@ -1,288 +1,287 @@ /*- * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _VMCB_H_ #define _VMCB_H_ /* * Secure Virtual Machine: AMD64 Programmer's Manual Vol2, Chapter 15 * Layout of VMCB: AMD64 Programmer's Manual Vol2, Appendix B */ /* VMCB Control offset 0xC */ #define VMCB_INTCPT_INTR BIT(0) #define VMCB_INTCPT_NMI BIT(1) #define VMCB_INTCPT_SMI BIT(2) #define VMCB_INTCPT_INIT BIT(3) #define VMCB_INTCPT_VINTR BIT(4) #define VMCB_INTCPT_CR0_WRITE BIT(5) #define VMCB_INTCPT_IDTR_READ BIT(6) #define VMCB_INTCPT_GDTR_READ BIT(7) #define VMCB_INTCPT_LDTR_READ BIT(8) #define VMCB_INTCPT_TR_READ BIT(9) #define VMCB_INTCPT_IDTR_WRITE BIT(10) #define VMCB_INTCPT_GDTR_WRITE BIT(11) #define VMCB_INTCPT_LDTR_WRITE BIT(12) #define VMCB_INTCPT_TR_WRITE BIT(13) #define VMCB_INTCPT_RDTSC BIT(14) #define VMCB_INTCPT_RDPMC BIT(15) #define VMCB_INTCPT_PUSHF BIT(16) #define VMCB_INTCPT_POPF BIT(17) #define VMCB_INTCPT_CPUID BIT(18) #define VMCB_INTCPT_RSM BIT(19) #define VMCB_INTCPT_IRET BIT(20) #define VMCB_INTCPT_INTn BIT(21) #define VMCB_INTCPT_INVD BIT(22) #define VMCB_INTCPT_PAUSE BIT(23) #define VMCB_INTCPT_HLT BIT(24) #define VMCB_INTCPT_INVPG BIT(25) #define VMCB_INTCPT_INVPGA BIT(26) #define VMCB_INTCPT_IO BIT(27) #define VMCB_INTCPT_MSR BIT(28) #define VMCB_INTCPT_TASK_SWITCH BIT(29) #define VMCB_INTCPT_FERR_FREEZE BIT(30) #define VMCB_INTCPT_SHUTDOWN BIT(31) /* VMCB Control offset 0x10 */ #define VMCB_INTCPT_VMRUN BIT(0) #define VMCB_INTCPT_VMMCALL BIT(1) #define VMCB_INTCPT_VMLOAD BIT(2) #define VMCB_INTCPT_VMSAVE BIT(3) #define VMCB_INTCPT_STGI BIT(4) #define VMCB_INTCPT_CLGI BIT(5) #define VMCB_INTCPT_SKINIT BIT(6) #define VMCB_INTCPT_RDTSCP BIT(7) #define VMCB_INTCPT_ICEBP BIT(8) #define VMCB_INTCPT_WBINVD BIT(9) #define VMCB_INTCPT_MONITOR BIT(10) #define VMCB_INTCPT_MWAIT BIT(11) #define VMCB_INTCPT_MWAIT_ARMED BIT(12) #define VMCB_INTCPT_XSETBV BIT(13) /* VMCB TLB control */ #define VMCB_TLB_FLUSH_NOTHING 0 /* Flush nothing */ #define VMCB_TLB_FLUSH_ALL 1 /* Flush entire TLB */ #define VMCB_TLB_FLUSH_GUEST 3 /* Flush all guest entries */ #define VMCB_TLB_FLUSH_GUEST_NONGLOBAL 7 /* Flush guest non-PG entries */ /* VMCB state caching */ #define VMCB_CACHE_NONE 0 /* No caching */ #define VMCB_CACHE_I BIT(0) /* Cache vectors, TSC offset */ #define VMCB_CACHE_IOPM BIT(1) /* I/O and MSR permission */ #define VMCB_CACHE_ASID BIT(2) /* ASID */ #define VMCB_CACHE_TPR BIT(3) /* V_TPR to V_INTR_VECTOR */ #define VMCB_CACHE_NP BIT(4) /* Nested Paging */ #define VMCB_CACHE_CR BIT(5) /* CR0, CR3, CR4 & EFER */ #define VMCB_CACHE_DR BIT(6) /* Debug registers */ #define VMCB_CACHE_DT BIT(7) /* GDT/IDT */ #define VMCB_CACHE_SEG BIT(8) /* User segments, CPL */ #define VMCB_CACHE_CR2 BIT(9) /* page fault address */ #define VMCB_CACHE_LBR BIT(10) /* Last branch */ - /* VMCB control event injection */ #define VMCB_EVENTINJ_EC_VALID BIT(11) /* Error Code valid */ #define VMCB_EVENTINJ_VALID BIT(31) /* Event valid */ /* Event types that can be injected */ #define VMCB_EVENTINJ_TYPE_INTR 0 #define VMCB_EVENTINJ_TYPE_NMI 2 #define VMCB_EVENTINJ_TYPE_EXCEPTION 3 #define VMCB_EVENTINJ_TYPE_INTn 4 /* VMCB exit code, APM vol2 Appendix C */ #define VMCB_EXIT_MC 0x52 #define VMCB_EXIT_INTR 0x60 #define VMCB_EXIT_PUSHF 0x70 #define VMCB_EXIT_POPF 0x71 #define VMCB_EXIT_CPUID 0x72 #define VMCB_EXIT_IRET 0x74 #define VMCB_EXIT_PAUSE 0x77 #define VMCB_EXIT_HLT 0x78 #define VMCB_EXIT_IO 0x7B #define VMCB_EXIT_MSR 0x7C #define VMCB_EXIT_SHUTDOWN 0x7F #define VMCB_EXIT_VMSAVE 0x83 #define VMCB_EXIT_NPF 0x400 #define VMCB_EXIT_INVALID -1 /* * Nested page fault. * Bit definitions to decode EXITINFO1. */ #define VMCB_NPF_INFO1_P BIT(0) /* Nested page present. */ #define VMCB_NPF_INFO1_W BIT(1) /* Access was write. */ #define VMCB_NPF_INFO1_U BIT(2) /* Access was user access. */ #define VMCB_NPF_INFO1_RSV BIT(3) /* Reserved bits present. */ #define VMCB_NPF_INFO1_ID BIT(4) /* Code read. */ #define VMCB_NPF_INFO1_GPA BIT(32) /* Guest physical address. */ #define VMCB_NPF_INFO1_GPT BIT(33) /* Guest page table. */ /* * EXITINTINFO, Interrupt exit info for all intrecepts. * Section 15.7.2, Intercepts during IDT Interrupt Delivery. */ #define VMCB_EXITINTINFO_VECTOR(x) ((x) & 0xFF) #define VMCB_EXITINTINFO_TYPE(x) (((x) >> 8) & 0x7) #define VMCB_EXITINTINFO_EC_VALID(x) (((x) & BIT(11)) ? 1 : 0) #define VMCB_EXITINTINFO_VALID(x) (((x) & BIT(31)) ? 1 : 0) #define VMCB_EXITINTINFO_EC(x) (((x) >> 32) & 0xFFFFFFFF) /* VMCB save state area segment format */ struct vmcb_segment { uint16_t selector; uint16_t attrib; uint32_t limit; uint64_t base; } __attribute__ ((__packed__)); CTASSERT(sizeof(struct vmcb_segment) == 16); /* Code segment descriptor attribute in 12 bit format as saved by VMCB. */ #define VMCB_CS_ATTRIB_L BIT(9) /* Long mode. */ #define VMCB_CS_ATTRIB_D BIT(10) /* OPerand size bit. */ /* * The VMCB is divided into two areas - the first one contains various * control bits including the intercept vector and the second one contains * the guest state. */ /* VMCB control area - padded up to 1024 bytes */ struct vmcb_ctrl { uint16_t cr_read; /* Offset 0, CR0-15 read/write */ uint16_t cr_write; uint16_t dr_read; /* Offset 4, DR0-DR15 */ uint16_t dr_write; uint32_t exception; /* Offset 8, bit mask for exceptions. */ uint32_t ctrl1; /* Offset 0xC, intercept events1 */ uint32_t ctrl2; /* Offset 0x10, intercept event2 */ uint8_t pad1[0x28]; /* Offsets 0x14-0x3B are reserved. */ uint16_t pause_filthresh; /* Offset 0x3C, PAUSE filter threshold */ uint16_t pause_filcnt; /* Offset 0x3E, PAUSE filter count */ uint64_t iopm_base_pa; /* 0x40: IOPM_BASE_PA */ uint64_t msrpm_base_pa; /* 0x48: MSRPM_BASE_PA */ uint64_t tsc_offset; /* 0x50: TSC_OFFSET */ uint32_t asid; /* 0x58: Guest ASID */ uint8_t tlb_ctrl; /* 0x5C: TLB_CONTROL */ uint8_t pad2[3]; /* 0x5D-0x5F: Reserved. */ uint8_t v_tpr; /* 0x60: V_TPR, guest CR8 */ uint8_t v_irq:1; /* Is virtual interrupt pending? */ uint8_t :7; /* Padding */ uint8_t v_intr_prio:4; /* 0x62: Priority for virtual interrupt. */ uint8_t v_ign_tpr:1; uint8_t :3; uint8_t v_intr_masking:1; /* Guest and host sharing of RFLAGS. */ uint8_t :7; uint8_t v_intr_vector; /* 0x65: Vector for virtual interrupt. */ uint8_t pad3[3]; /* Bit64-40 Reserved. */ uint64_t intr_shadow:1; /* 0x68: Interrupt shadow, section15.2.1 APM2 */ uint64_t :63; uint64_t exitcode; /* 0x70, Exitcode */ uint64_t exitinfo1; /* 0x78, EXITINFO1 */ uint64_t exitinfo2; /* 0x80, EXITINFO2 */ uint64_t exitintinfo; /* 0x88, Interrupt exit value. */ uint64_t np_enable:1; /* 0x90, Nested paging enable. */ uint64_t :63; uint8_t pad4[0x10]; /* 0x98-0xA7 reserved. */ uint64_t eventinj; /* 0xA8, Event injection. */ uint64_t n_cr3; /* B0, Nested page table. */ uint64_t lbr_virt_en:1; /* Enable LBR virtualization. */ uint64_t :63; uint32_t vmcb_clean; /* 0xC0: VMCB clean bits for caching */ uint32_t :32; /* 0xC4: Reserved */ uint64_t nrip; /* 0xC8: Guest next nRIP. */ uint8_t inst_decode_size; /* 0xD0: Instruction decode */ uint8_t inst_decode_bytes[15]; uint8_t padd6[0x320]; } __attribute__ ((__packed__)); CTASSERT(sizeof(struct vmcb_ctrl) == 1024); struct vmcb_state { struct vmcb_segment es; struct vmcb_segment cs; struct vmcb_segment ss; struct vmcb_segment ds; struct vmcb_segment fs; struct vmcb_segment gs; struct vmcb_segment gdt; struct vmcb_segment ldt; struct vmcb_segment idt; struct vmcb_segment tr; uint8_t pad1[0x2b]; /* Reserved: 0xA0-0xCA */ uint8_t cpl; uint8_t pad2[4]; uint64_t efer; uint8_t pad3[0x70]; /* Reserved: 0xd8-0x147 */ uint64_t cr4; uint64_t cr3; /* Guest CR3 */ uint64_t cr0; uint64_t dr7; uint64_t dr6; uint64_t rflags; uint64_t rip; uint8_t pad4[0x58]; /* Reserved: 0x180-0x1D7 */ uint64_t rsp; uint8_t pad5[0x18]; /* Reserved 0x1E0-0x1F7 */ uint64_t rax; uint64_t star; uint64_t lstar; uint64_t cstar; uint64_t sfmask; uint64_t kernelgsbase; uint64_t sysenter_cs; uint64_t sysenter_esp; uint64_t sysenter_eip; uint64_t cr2; uint8_t pad6[0x20]; uint64_t g_pat; uint64_t dbgctl; uint64_t br_from; uint64_t br_to; uint64_t int_from; uint64_t int_to; uint8_t pad7[0x968]; /* Reserved upto end of VMCB */ } __attribute__ ((__packed__)); CTASSERT(sizeof(struct vmcb_state) == 0xC00); struct vmcb { struct vmcb_ctrl ctrl; struct vmcb_state state; } __attribute__ ((__packed__)); CTASSERT(sizeof(struct vmcb) == PAGE_SIZE); CTASSERT(offsetof(struct vmcb, state) == 0x400); -int svm_init_vmcb(struct vmcb *vmcb, uint64_t iopm_base_pa, - uint64_t msrpm_base_pa, uint64_t np_pml4, uint32_t asid); +void svm_init_vmcb(struct vmcb *vmcb, uint64_t iopm_base_pa, + uint64_t msrpm_base_pa, uint64_t np_pml4); int vmcb_read(struct vmcb *vmcb, int ident, uint64_t *retval); int vmcb_write(struct vmcb *vmcb, int ident, uint64_t val); struct vmcb_segment *vmcb_seg(struct vmcb *vmcb, int type); void vmcb_eventinject(struct vmcb_ctrl *ctrl, int type, int vector, uint32_t error, bool ec_valid); #endif /* _VMCB_H_ */ Index: projects/bhyve_svm/sys/sys/bitset.h =================================================================== --- projects/bhyve_svm/sys/sys/bitset.h (revision 271202) +++ projects/bhyve_svm/sys/sys/bitset.h (revision 271203) @@ -1,175 +1,179 @@ /*- * Copyright (c) 2008, Jeffrey Roberson * All rights reserved. * * Copyright (c) 2008 Nokia Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_BITSET_H_ #define _SYS_BITSET_H_ #define BIT_CLR(_s, n, p) \ ((p)->__bits[__bitset_word(_s, n)] &= ~__bitset_mask((_s), (n))) #define BIT_COPY(_s, f, t) (void)(*(t) = *(f)) #define BIT_ISSET(_s, n, p) \ ((((p)->__bits[__bitset_word(_s, n)] & __bitset_mask((_s), (n))) != 0)) #define BIT_SET(_s, n, p) \ ((p)->__bits[__bitset_word(_s, n)] |= __bitset_mask((_s), (n))) #define BIT_ZERO(_s, p) do { \ __size_t __i; \ for (__i = 0; __i < __bitset_words((_s)); __i++) \ (p)->__bits[__i] = 0L; \ } while (0) #define BIT_FILL(_s, p) do { \ __size_t __i; \ for (__i = 0; __i < __bitset_words((_s)); __i++) \ (p)->__bits[__i] = -1L; \ } while (0) #define BIT_SETOF(_s, n, p) do { \ BIT_ZERO(_s, p); \ (p)->__bits[__bitset_word(_s, n)] = __bitset_mask((_s), (n)); \ } while (0) /* Is p empty. */ #define BIT_EMPTY(_s, p) __extension__ ({ \ __size_t __i; \ for (__i = 0; __i < __bitset_words((_s)); __i++) \ if ((p)->__bits[__i]) \ break; \ __i == __bitset_words((_s)); \ }) /* Is p full set. */ #define BIT_ISFULLSET(_s, p) __extension__ ({ \ __size_t __i; \ for (__i = 0; __i < __bitset_words((_s)); __i++) \ if ((p)->__bits[__i] != (long)-1) \ break; \ __i == __bitset_words((_s)); \ }) /* Is c a subset of p. */ #define BIT_SUBSET(_s, p, c) __extension__ ({ \ __size_t __i; \ for (__i = 0; __i < __bitset_words((_s)); __i++) \ if (((c)->__bits[__i] & \ (p)->__bits[__i]) != \ (c)->__bits[__i]) \ break; \ __i == __bitset_words((_s)); \ }) /* Are there any common bits between b & c? */ #define BIT_OVERLAP(_s, p, c) __extension__ ({ \ __size_t __i; \ for (__i = 0; __i < __bitset_words((_s)); __i++) \ if (((c)->__bits[__i] & \ (p)->__bits[__i]) != 0) \ break; \ __i != __bitset_words((_s)); \ }) /* Compare two sets, returns 0 if equal 1 otherwise. */ #define BIT_CMP(_s, p, c) __extension__ ({ \ __size_t __i; \ for (__i = 0; __i < __bitset_words((_s)); __i++) \ if (((c)->__bits[__i] != \ (p)->__bits[__i])) \ break; \ __i != __bitset_words((_s)); \ }) #define BIT_OR(_s, d, s) do { \ __size_t __i; \ for (__i = 0; __i < __bitset_words((_s)); __i++) \ (d)->__bits[__i] |= (s)->__bits[__i]; \ } while (0) #define BIT_AND(_s, d, s) do { \ __size_t __i; \ for (__i = 0; __i < __bitset_words((_s)); __i++) \ (d)->__bits[__i] &= (s)->__bits[__i]; \ } while (0) #define BIT_NAND(_s, d, s) do { \ __size_t __i; \ for (__i = 0; __i < __bitset_words((_s)); __i++) \ (d)->__bits[__i] &= ~(s)->__bits[__i]; \ } while (0) #define BIT_CLR_ATOMIC(_s, n, p) \ atomic_clear_long(&(p)->__bits[__bitset_word(_s, n)], \ __bitset_mask((_s), n)) #define BIT_SET_ATOMIC(_s, n, p) \ atomic_set_long(&(p)->__bits[__bitset_word(_s, n)], \ __bitset_mask((_s), n)) +#define BIT_SET_ATOMIC_ACQ(_s, n, p) \ + atomic_set_acq_long(&(p)->__bits[__bitset_word(_s, n)], \ + __bitset_mask((_s), n)) + /* Convenience functions catering special cases. */ #define BIT_AND_ATOMIC(_s, d, s) do { \ __size_t __i; \ for (__i = 0; __i < __bitset_words((_s)); __i++) \ atomic_clear_long(&(d)->__bits[__i], \ ~(s)->__bits[__i]); \ } while (0) #define BIT_OR_ATOMIC(_s, d, s) do { \ __size_t __i; \ for (__i = 0; __i < __bitset_words((_s)); __i++) \ atomic_set_long(&(d)->__bits[__i], \ (s)->__bits[__i]); \ } while (0) #define BIT_COPY_STORE_REL(_s, f, t) do { \ __size_t __i; \ for (__i = 0; __i < __bitset_words((_s)); __i++) \ atomic_store_rel_long(&(t)->__bits[__i], \ (f)->__bits[__i]); \ } while (0) #define BIT_FFS(_s, p) __extension__ ({ \ __size_t __i; \ int __bit; \ \ __bit = 0; \ for (__i = 0; __i < __bitset_words((_s)); __i++) { \ if ((p)->__bits[__i] != 0) { \ __bit = ffsl((p)->__bits[__i]); \ __bit += __i * _BITSET_BITS; \ break; \ } \ } \ __bit; \ }) #endif /* !_SYS_BITSET_H_ */ Index: projects/bhyve_svm/sys/sys/cpuset.h =================================================================== --- projects/bhyve_svm/sys/sys/cpuset.h (revision 271202) +++ projects/bhyve_svm/sys/sys/cpuset.h (revision 271203) @@ -1,139 +1,140 @@ /*- * Copyright (c) 2008, Jeffrey Roberson * All rights reserved. * * Copyright (c) 2008 Nokia Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_CPUSET_H_ #define _SYS_CPUSET_H_ #include #include #define CPUSETBUFSIZ ((2 + sizeof(long) * 2) * _NCPUWORDS) #define CPU_CLR(n, p) BIT_CLR(CPU_SETSIZE, n, p) #define CPU_COPY(f, t) BIT_COPY(CPU_SETSIZE, f, t) #define CPU_ISSET(n, p) BIT_ISSET(CPU_SETSIZE, n, p) #define CPU_SET(n, p) BIT_SET(CPU_SETSIZE, n, p) #define CPU_ZERO(p) BIT_ZERO(CPU_SETSIZE, p) #define CPU_FILL(p) BIT_FILL(CPU_SETSIZE, p) #define CPU_SETOF(n, p) BIT_SETOF(CPU_SETSIZE, n, p) #define CPU_EMPTY(p) BIT_EMPTY(CPU_SETSIZE, p) #define CPU_ISFULLSET(p) BIT_ISFULLSET(CPU_SETSIZE, p) #define CPU_SUBSET(p, c) BIT_SUBSET(CPU_SETSIZE, p, c) #define CPU_OVERLAP(p, c) BIT_OVERLAP(CPU_SETSIZE, p, c) #define CPU_CMP(p, c) BIT_CMP(CPU_SETSIZE, p, c) #define CPU_OR(d, s) BIT_OR(CPU_SETSIZE, d, s) #define CPU_AND(d, s) BIT_AND(CPU_SETSIZE, d, s) #define CPU_NAND(d, s) BIT_NAND(CPU_SETSIZE, d, s) #define CPU_CLR_ATOMIC(n, p) BIT_CLR_ATOMIC(CPU_SETSIZE, n, p) #define CPU_SET_ATOMIC(n, p) BIT_SET_ATOMIC(CPU_SETSIZE, n, p) +#define CPU_SET_ATOMIC_ACQ(n, p) BIT_SET_ATOMIC_ACQ(CPU_SETSIZE, n, p) #define CPU_AND_ATOMIC(n, p) BIT_AND_ATOMIC(CPU_SETSIZE, n, p) #define CPU_OR_ATOMIC(d, s) BIT_OR_ATOMIC(CPU_SETSIZE, d, s) #define CPU_COPY_STORE_REL(f, t) BIT_COPY_STORE_REL(CPU_SETSIZE, f, t) #define CPU_FFS(p) BIT_FFS(CPU_SETSIZE, p) /* * Valid cpulevel_t values. */ #define CPU_LEVEL_ROOT 1 /* All system cpus. */ #define CPU_LEVEL_CPUSET 2 /* Available cpus for which. */ #define CPU_LEVEL_WHICH 3 /* Actual mask/id for which. */ /* * Valid cpuwhich_t values. */ #define CPU_WHICH_TID 1 /* Specifies a thread id. */ #define CPU_WHICH_PID 2 /* Specifies a process id. */ #define CPU_WHICH_CPUSET 3 /* Specifies a set id. */ #define CPU_WHICH_IRQ 4 /* Specifies an irq #. */ #define CPU_WHICH_JAIL 5 /* Specifies a jail id. */ /* * Reserved cpuset identifiers. */ #define CPUSET_INVALID -1 #define CPUSET_DEFAULT 0 #ifdef _KERNEL LIST_HEAD(setlist, cpuset); /* * cpusets encapsulate cpu binding information for one or more threads. * * a - Accessed with atomics. * s - Set at creation, never modified. Only a ref required to read. * c - Locked internally by a cpuset lock. * * The bitmask is only modified while holding the cpuset lock. It may be * read while only a reference is held but the consumer must be prepared * to deal with inconsistent results. */ struct cpuset { cpuset_t cs_mask; /* bitmask of valid cpus. */ volatile u_int cs_ref; /* (a) Reference count. */ int cs_flags; /* (s) Flags from below. */ cpusetid_t cs_id; /* (s) Id or INVALID. */ struct cpuset *cs_parent; /* (s) Pointer to our parent. */ LIST_ENTRY(cpuset) cs_link; /* (c) All identified sets. */ LIST_ENTRY(cpuset) cs_siblings; /* (c) Sibling set link. */ struct setlist cs_children; /* (c) List of children. */ }; #define CPU_SET_ROOT 0x0001 /* Set is a root set. */ #define CPU_SET_RDONLY 0x0002 /* No modification allowed. */ extern cpuset_t *cpuset_root; struct prison; struct proc; struct cpuset *cpuset_thread0(void); struct cpuset *cpuset_ref(struct cpuset *); void cpuset_rel(struct cpuset *); int cpuset_setthread(lwpid_t id, cpuset_t *); int cpuset_setithread(lwpid_t id, u_char cpu); int cpuset_create_root(struct prison *, struct cpuset **); int cpuset_setproc_update_set(struct proc *, struct cpuset *); char *cpusetobj_strprint(char *, const cpuset_t *); int cpusetobj_strscan(cpuset_t *, const char *); #ifdef DDB void ddb_display_cpuset(const cpuset_t *); #endif #else __BEGIN_DECLS int cpuset(cpusetid_t *); int cpuset_setid(cpuwhich_t, id_t, cpusetid_t); int cpuset_getid(cpulevel_t, cpuwhich_t, id_t, cpusetid_t *); int cpuset_getaffinity(cpulevel_t, cpuwhich_t, id_t, size_t, cpuset_t *); int cpuset_setaffinity(cpulevel_t, cpuwhich_t, id_t, size_t, const cpuset_t *); __END_DECLS #endif #endif /* !_SYS_CPUSET_H_ */