Index: head/emulators/xen-kernel411/Makefile =================================================================== --- head/emulators/xen-kernel411/Makefile (revision 483558) +++ head/emulators/xen-kernel411/Makefile (revision 483559) @@ -1,116 +1,118 @@ # $FreeBSD$ PORTNAME= xen PORTVERSION= 4.11.0 -PORTREVISION= 1 +PORTREVISION= 2 CATEGORIES= emulators MASTER_SITES= http://downloads.xenproject.org/release/xen/${PORTVERSION}/ PKGNAMESUFFIX= -kernel411 MAINTAINER= royger@FreeBSD.org COMMENT= Hypervisor using a microkernel design LICENSE= GPLv2 ONLY_FOR_ARCHS= amd64 USES= cpe gmake python:2.7,build # Ports build environment has ARCH=amd64 set which disables Xen automatic arch # detection, but amd64 is not a valid arch for Xen. Hardcode x86_64 on the # command line in order to overwrite the one from the environment. MAKE_ARGS= clang=y PYTHON=${PYTHON_CMD} ARCH=x86_64 NO_MTREE= yes STRIP= # PLIST_FILES= /boot/xen \ /boot/xen.4th \ lib/debug/boot/xen.debug # IOMMU fixes EXTRA_PATCHES= ${FILESDIR}/0001-pci-treat-class-0-devices-as-endpoints.patch:-p1 # vPCI MSI fixes EXTRA_PATCHES+= ${FILESDIR}/0001-vpci-msi-split-code-to-bind-pirq.patch:-p1 \ ${FILESDIR}/0002-vpci-msi-fix-update-of-bound-MSI-interrupts.patch:-p1 # Add extra RAM regions to Dom0 memory map as UNUSABNLE EXTRA_PATCHES+= ${FILESDIR}/0001-x86-dom0-add-extra-RAM-regions-as-UNUSABLE-for-PVH-m.patch:-p1 # MTRR guest setup for PVH EXTRA_PATCHES+= ${FILESDIR}/0001-x86-mtrr-introduce-mask-to-get-VCNT-from-MTRRcap-MSR.patch:-p1 \ ${FILESDIR}/0001-x86-HVM-improve-MTRR-load-checks.patch:-p1 \ ${FILESDIR}/0002-x86-mtrr-split-enabled-field-into-two-boolean-flags.patch:-p1 \ ${FILESDIR}/0003-hvm-mtrr-add-emacs-local-variables-block-with-format.patch:-p1 \ ${FILESDIR}/0004-hvm-mtrr-use-the-hardware-number-of-variable-ranges-.patch:-p1 \ ${FILESDIR}/0005-hvm-mtrr-copy-hardware-state-for-Dom0.patch:-p1 \ ${FILESDIR}/0006-libxc-pvh-set-default-MTRR-type-to-write-back.patch:-p1 \ ${FILESDIR}/0007-docs-pvh-document-initial-MTRR-state.patch:-p1 # Build with lld (LLVM linker) EXTRA_PATCHES+= ${FILESDIR}/0001-x86-replace-usage-in-the-linker-script.patch:-p1 \ ${FILESDIR}/0001-x86-efi-move-the-logic-to-detect-PE-build-support.patch:-p1 \ ${FILESDIR}/0002-x86-efi-split-compiler-vs-linker-support.patch:-p1 # Fix PVH Dom0 build with shadow paging EXTRA_PATCHES+= ${FILESDIR}/0001-x86-pvh-change-the-order-of-the-iommu-initialization.patch:-p1 # XSA-269 (MSR_DEBUGCTL handling) and XSA-273 (L1TF) # Note that due to the high value of patches needed to fix L1TF the package is # brought up to the state of the staging-4.11 branch. This can be removed when # 4.11.1 is released. EXTRA_PATCHES+= ${FILESDIR}/0001-xen-Port-the-array_index_nospec-infrastructure-from-.patch:-p1 \ ${FILESDIR}/0002-x86-correctly-set-nonlazy_xstate_used-when-loading-f.patch:-p1 \ ${FILESDIR}/0003-x86-spec-ctrl-command-line-handling-adjustments.patch:-p1 \ ${FILESDIR}/0005-mm-page_alloc-correct-first_dirty-calculations-durin.patch:-p1 \ ${FILESDIR}/0006-allow-cpu_down-to-be-called-earlier.patch:-p1 \ ${FILESDIR}/0007-x86-svm-Fixes-and-cleanup-to-svm_inject_event.patch:-p1 \ ${FILESDIR}/0008-cpupools-fix-state-when-downing-a-CPU-failed.patch:-p1 \ ${FILESDIR}/0009-x86-AMD-distinguish-compute-units-from-hyper-threads.patch:-p1 \ ${FILESDIR}/0010-x86-distinguish-CPU-offlining-from-CPU-removal.patch:-p1 \ ${FILESDIR}/0011-x86-possibly-bring-up-all-CPUs-even-if-not-all-are-s.patch:-p1 \ ${FILESDIR}/0012-x86-command-line-option-to-avoid-use-of-secondary-hy.patch:-p1 \ ${FILESDIR}/0013-x86-vmx-Don-t-clobber-dr6-while-debugging-state-is-l.patch:-p1 \ ${FILESDIR}/0014-x86-xstate-Use-a-guests-CPUID-policy-rather-than-all.patch:-p1 \ ${FILESDIR}/0015-x86-xstate-Make-errors-in-xstate-calculations-more-o.patch:-p1 \ ${FILESDIR}/0016-x86-hvm-Disallow-unknown-MSR_EFER-bits.patch:-p1 \ ${FILESDIR}/0017-x86-spec-ctrl-Fix-the-parsing-of-xpti-on-fixed-Intel.patch:-p1 \ ${FILESDIR}/0018-x86-spec-ctrl-Yet-more-fixes-for-xpti-parsing.patch:-p1 \ ${FILESDIR}/0019-x86-vmx-Fix-handing-of-MSR_DEBUGCTL-on-VMExit.patch:-p1 \ ${FILESDIR}/0020-x86-vmx-Defer-vmx_vmcs_exit-as-long-as-possible-in-c.patch:-p1 \ ${FILESDIR}/0021-x86-vmx-API-improvements-for-MSR-load-save-infrastru.patch:-p1 \ ${FILESDIR}/0022-x86-vmx-Internal-cleanup-for-MSR-load-save-infrastru.patch:-p1 \ ${FILESDIR}/0023-x86-vmx-Factor-locate_msr_entry-out-of-vmx_find_msr-.patch:-p1 \ ${FILESDIR}/0024-x86-vmx-Support-remote-access-to-the-MSR-lists.patch:-p1 \ ${FILESDIR}/0025-x86-vmx-Improvements-to-LBR-MSR-handling.patch:-p1 \ ${FILESDIR}/0026-x86-vmx-Pass-an-MSR-value-into-vmx_msr_add.patch:-p1 \ ${FILESDIR}/0027-x86-vmx-Support-load-only-guest-MSR-list-entries.patch:-p1 \ ${FILESDIR}/0028-VMX-fix-vmx_-find-del-_msr-build.patch:-p1 \ ${FILESDIR}/0029-ARM-disable-grant-table-v2.patch:-p1 \ ${FILESDIR}/0030-x86-vtx-Fix-the-checking-for-unknown-invalid-MSR_DEB.patch:-p1 \ ${FILESDIR}/0032-x86-spec-ctrl-Calculate-safe-PTE-addresses-for-L1TF-.patch:-p1 \ ${FILESDIR}/0033-x86-spec-ctrl-Introduce-an-option-to-control-L1TF-mi.patch:-p1 \ ${FILESDIR}/0034-x86-shadow-Infrastructure-to-force-a-PV-guest-into-s.patch:-p1 \ ${FILESDIR}/0035-x86-mm-Plumbing-to-allow-any-PTE-update-to-fail-with.patch:-p1 \ ${FILESDIR}/0036-x86-pv-Force-a-guest-into-shadow-mode-when-it-writes.patch:-p1 \ ${FILESDIR}/0037-x86-spec-ctrl-CPUID-MSR-definitions-for-L1D_FLUSH.patch:-p1 \ ${FILESDIR}/0038-x86-msr-Virtualise-MSR_FLUSH_CMD-for-guests.patch:-p1 \ ${FILESDIR}/0039-x86-spec-ctrl-Introduce-an-option-to-control-L1D_FLU.patch:-p1 \ ${FILESDIR}/0040-x86-Make-spec-ctrl-no-a-global-disable-of-all-mitiga.patch:-p1 \ ${FILESDIR}/0042-x86-write-to-correct-variable-in-parse_pv_l1tf.patch:-p1 +# XSA-278: x86: Nested VT-x usable even when disabled +EXTRA_PATCHES+= ${FILESDIR}/xsa278-4.11.patch:-p1 .include .if ${OPSYS} != FreeBSD IGNORE= only supported on FreeBSD .endif .if ${OSVERSION} < 1200074 IGNORE= only supported on recent FreeBSD HEAD .endif # The ports native 'build' target cannot be used because it sets # CFLAGS, and that breaks the Xen build system. do-build: ${MAKE_CMD} -j${MAKE_JOBS_NUMBER} -C ${WRKSRC} xen ${MAKE_ARGS} do-install: ${MKDIR} ${STAGEDIR}/boot ${MKDIR} ${STAGEDIR}${PREFIX}/lib/debug/boot/ ${INSTALL_PROGRAM} ${WRKSRC}/xen/xen ${STAGEDIR}/boot ${INSTALL_DATA} ${FILESDIR}/xen.4th ${STAGEDIR}/boot ${INSTALL_DATA} ${WRKSRC}/xen/xen-syms ${STAGEDIR}${PREFIX}/lib/debug/boot/xen.debug .include Index: head/emulators/xen-kernel411/files/xsa278-4.11.patch =================================================================== --- head/emulators/xen-kernel411/files/xsa278-4.11.patch (nonexistent) +++ head/emulators/xen-kernel411/files/xsa278-4.11.patch (revision 483559) @@ -0,0 +1,326 @@ +From: Andrew Cooper +Subject: x86/vvmx: Disallow the use of VT-x instructions when nested virt is disabled + +c/s ac6a4500b "vvmx: set vmxon_region_pa of vcpu out of VMX operation to an +invalid address" was a real bugfix as described, but has a very subtle bug +which results in all VT-x instructions being usable by a guest. + +The toolstack constructs a guest by issuing: + + XEN_DOMCTL_createdomain + XEN_DOMCTL_max_vcpus + +and optionally later, HVMOP_set_param to enable nested virt. + +As a result, the call to nvmx_vcpu_initialise() in hvm_vcpu_initialise() +(which is what makes the above patch look correct during review) is actually +dead code. In practice, nvmx_vcpu_initialise() first gets called when nested +virt is enabled, which is typically never. + +As a result, the zeroed memory of struct vcpu causes nvmx_vcpu_in_vmx() to +return true before nested virt is enabled for the guest. + +Fixing the order of initialisation is a work in progress for other reasons, +but not viable for security backports. + +A compounding factor is that the vmexit handlers for all instructions, other +than VMXON, pass 0 into vmx_inst_check_privilege()'s vmxop_check parameter, +which skips the CR4.VMXE check. (This is one of many reasons why nested virt +isn't a supported feature yet.) + +However, the overall result is that when nested virt is not enabled by the +toolstack (i.e. the default configuration for all production guests), the VT-x +instructions (other than VMXON) are actually usable, and Xen very quickly +falls over the fact that the nvmx structure is uninitialised. + +In order to fail safe in the supported case, re-implement all the VT-x +instruction handling using a single function with a common prologue, covering +all the checks which should cause #UD or #GP faults. This deliberately +doesn't use any state from the nvmx structure, in case there are other lurking +issues. + +This is XSA-278 + +Reported-by: Sergey Dyasli +Signed-off-by: Andrew Cooper +Reviewed-by: Sergey Dyasli + +diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c +index a6415f0..a4d2829 100644 +--- a/xen/arch/x86/hvm/vmx/vmx.c ++++ b/xen/arch/x86/hvm/vmx/vmx.c +@@ -3982,57 +3982,17 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) + break; + + case EXIT_REASON_VMXOFF: +- if ( nvmx_handle_vmxoff(regs) == X86EMUL_OKAY ) +- update_guest_eip(); +- break; +- + case EXIT_REASON_VMXON: +- if ( nvmx_handle_vmxon(regs) == X86EMUL_OKAY ) +- update_guest_eip(); +- break; +- + case EXIT_REASON_VMCLEAR: +- if ( nvmx_handle_vmclear(regs) == X86EMUL_OKAY ) +- update_guest_eip(); +- break; +- + case EXIT_REASON_VMPTRLD: +- if ( nvmx_handle_vmptrld(regs) == X86EMUL_OKAY ) +- update_guest_eip(); +- break; +- + case EXIT_REASON_VMPTRST: +- if ( nvmx_handle_vmptrst(regs) == X86EMUL_OKAY ) +- update_guest_eip(); +- break; +- + case EXIT_REASON_VMREAD: +- if ( nvmx_handle_vmread(regs) == X86EMUL_OKAY ) +- update_guest_eip(); +- break; +- + case EXIT_REASON_VMWRITE: +- if ( nvmx_handle_vmwrite(regs) == X86EMUL_OKAY ) +- update_guest_eip(); +- break; +- + case EXIT_REASON_VMLAUNCH: +- if ( nvmx_handle_vmlaunch(regs) == X86EMUL_OKAY ) +- update_guest_eip(); +- break; +- + case EXIT_REASON_VMRESUME: +- if ( nvmx_handle_vmresume(regs) == X86EMUL_OKAY ) +- update_guest_eip(); +- break; +- + case EXIT_REASON_INVEPT: +- if ( nvmx_handle_invept(regs) == X86EMUL_OKAY ) +- update_guest_eip(); +- break; +- + case EXIT_REASON_INVVPID: +- if ( nvmx_handle_invvpid(regs) == X86EMUL_OKAY ) ++ if ( nvmx_handle_vmx_insn(regs, exit_reason) == X86EMUL_OKAY ) + update_guest_eip(); + break; + +diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c +index e97db33..88cb58c 100644 +--- a/xen/arch/x86/hvm/vmx/vvmx.c ++++ b/xen/arch/x86/hvm/vmx/vvmx.c +@@ -1470,7 +1470,7 @@ void nvmx_switch_guest(void) + * VMX instructions handling + */ + +-int nvmx_handle_vmxon(struct cpu_user_regs *regs) ++static int nvmx_handle_vmxon(struct cpu_user_regs *regs) + { + struct vcpu *v=current; + struct nestedvmx *nvmx = &vcpu_2_nvmx(v); +@@ -1522,7 +1522,7 @@ int nvmx_handle_vmxon(struct cpu_user_regs *regs) + return X86EMUL_OKAY; + } + +-int nvmx_handle_vmxoff(struct cpu_user_regs *regs) ++static int nvmx_handle_vmxoff(struct cpu_user_regs *regs) + { + struct vcpu *v=current; + struct nestedvmx *nvmx = &vcpu_2_nvmx(v); +@@ -1611,7 +1611,7 @@ static int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs) + return X86EMUL_OKAY; + } + +-int nvmx_handle_vmresume(struct cpu_user_regs *regs) ++static int nvmx_handle_vmresume(struct cpu_user_regs *regs) + { + bool_t launched; + struct vcpu *v = current; +@@ -1645,7 +1645,7 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs) + return nvmx_vmresume(v,regs); + } + +-int nvmx_handle_vmlaunch(struct cpu_user_regs *regs) ++static int nvmx_handle_vmlaunch(struct cpu_user_regs *regs) + { + bool_t launched; + struct vcpu *v = current; +@@ -1688,7 +1688,7 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs) + return rc; + } + +-int nvmx_handle_vmptrld(struct cpu_user_regs *regs) ++static int nvmx_handle_vmptrld(struct cpu_user_regs *regs) + { + struct vcpu *v = current; + struct vmx_inst_decoded decode; +@@ -1759,7 +1759,7 @@ int nvmx_handle_vmptrld(struct cpu_user_regs *regs) + return X86EMUL_OKAY; + } + +-int nvmx_handle_vmptrst(struct cpu_user_regs *regs) ++static int nvmx_handle_vmptrst(struct cpu_user_regs *regs) + { + struct vcpu *v = current; + struct vmx_inst_decoded decode; +@@ -1784,7 +1784,7 @@ int nvmx_handle_vmptrst(struct cpu_user_regs *regs) + return X86EMUL_OKAY; + } + +-int nvmx_handle_vmclear(struct cpu_user_regs *regs) ++static int nvmx_handle_vmclear(struct cpu_user_regs *regs) + { + struct vcpu *v = current; + struct vmx_inst_decoded decode; +@@ -1836,7 +1836,7 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs) + return X86EMUL_OKAY; + } + +-int nvmx_handle_vmread(struct cpu_user_regs *regs) ++static int nvmx_handle_vmread(struct cpu_user_regs *regs) + { + struct vcpu *v = current; + struct vmx_inst_decoded decode; +@@ -1878,7 +1878,7 @@ int nvmx_handle_vmread(struct cpu_user_regs *regs) + return X86EMUL_OKAY; + } + +-int nvmx_handle_vmwrite(struct cpu_user_regs *regs) ++static int nvmx_handle_vmwrite(struct cpu_user_regs *regs) + { + struct vcpu *v = current; + struct vmx_inst_decoded decode; +@@ -1926,7 +1926,7 @@ int nvmx_handle_vmwrite(struct cpu_user_regs *regs) + return X86EMUL_OKAY; + } + +-int nvmx_handle_invept(struct cpu_user_regs *regs) ++static int nvmx_handle_invept(struct cpu_user_regs *regs) + { + struct vmx_inst_decoded decode; + unsigned long eptp; +@@ -1954,7 +1954,7 @@ int nvmx_handle_invept(struct cpu_user_regs *regs) + return X86EMUL_OKAY; + } + +-int nvmx_handle_invvpid(struct cpu_user_regs *regs) ++static int nvmx_handle_invvpid(struct cpu_user_regs *regs) + { + struct vmx_inst_decoded decode; + unsigned long vpid; +@@ -1980,6 +1980,81 @@ int nvmx_handle_invvpid(struct cpu_user_regs *regs) + return X86EMUL_OKAY; + } + ++int nvmx_handle_vmx_insn(struct cpu_user_regs *regs, unsigned int exit_reason) ++{ ++ struct vcpu *curr = current; ++ int ret; ++ ++ if ( !(curr->arch.hvm_vcpu.guest_cr[4] & X86_CR4_VMXE) || ++ !nestedhvm_enabled(curr->domain) || ++ (vmx_guest_x86_mode(curr) < (hvm_long_mode_active(curr) ? 8 : 2)) ) ++ { ++ hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC); ++ return X86EMUL_EXCEPTION; ++ } ++ ++ if ( vmx_get_cpl() > 0 ) ++ { ++ hvm_inject_hw_exception(TRAP_gp_fault, 0); ++ return X86EMUL_EXCEPTION; ++ } ++ ++ switch ( exit_reason ) ++ { ++ case EXIT_REASON_VMXOFF: ++ ret = nvmx_handle_vmxoff(regs); ++ break; ++ ++ case EXIT_REASON_VMXON: ++ ret = nvmx_handle_vmxon(regs); ++ break; ++ ++ case EXIT_REASON_VMCLEAR: ++ ret = nvmx_handle_vmclear(regs); ++ break; ++ ++ case EXIT_REASON_VMPTRLD: ++ ret = nvmx_handle_vmptrld(regs); ++ break; ++ ++ case EXIT_REASON_VMPTRST: ++ ret = nvmx_handle_vmptrst(regs); ++ break; ++ ++ case EXIT_REASON_VMREAD: ++ ret = nvmx_handle_vmread(regs); ++ break; ++ ++ case EXIT_REASON_VMWRITE: ++ ret = nvmx_handle_vmwrite(regs); ++ break; ++ ++ case EXIT_REASON_VMLAUNCH: ++ ret = nvmx_handle_vmlaunch(regs); ++ break; ++ ++ case EXIT_REASON_VMRESUME: ++ ret = nvmx_handle_vmresume(regs); ++ break; ++ ++ case EXIT_REASON_INVEPT: ++ ret = nvmx_handle_invept(regs); ++ break; ++ ++ case EXIT_REASON_INVVPID: ++ ret = nvmx_handle_invvpid(regs); ++ break; ++ ++ default: ++ ASSERT_UNREACHABLE(); ++ domain_crash(curr->domain); ++ ret = X86EMUL_UNHANDLEABLE; ++ break; ++ } ++ ++ return ret; ++} ++ + #define __emul_value(enable1, default1) \ + ((enable1 | default1) << 32 | (default1)) + +diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h +index 9ea35eb..fc4a8d1 100644 +--- a/xen/include/asm-x86/hvm/vmx/vvmx.h ++++ b/xen/include/asm-x86/hvm/vmx/vvmx.h +@@ -94,9 +94,6 @@ void nvmx_domain_relinquish_resources(struct domain *d); + + bool_t nvmx_ept_enabled(struct vcpu *v); + +-int nvmx_handle_vmxon(struct cpu_user_regs *regs); +-int nvmx_handle_vmxoff(struct cpu_user_regs *regs); +- + #define EPT_TRANSLATE_SUCCEED 0 + #define EPT_TRANSLATE_VIOLATION 1 + #define EPT_TRANSLATE_MISCONFIG 2 +@@ -191,15 +188,7 @@ enum vmx_insn_errno set_vvmcs_real_safe(const struct vcpu *, u32 encoding, + uint64_t get_shadow_eptp(struct vcpu *v); + + void nvmx_destroy_vmcs(struct vcpu *v); +-int nvmx_handle_vmptrld(struct cpu_user_regs *regs); +-int nvmx_handle_vmptrst(struct cpu_user_regs *regs); +-int nvmx_handle_vmclear(struct cpu_user_regs *regs); +-int nvmx_handle_vmread(struct cpu_user_regs *regs); +-int nvmx_handle_vmwrite(struct cpu_user_regs *regs); +-int nvmx_handle_vmresume(struct cpu_user_regs *regs); +-int nvmx_handle_vmlaunch(struct cpu_user_regs *regs); +-int nvmx_handle_invept(struct cpu_user_regs *regs); +-int nvmx_handle_invvpid(struct cpu_user_regs *regs); ++int nvmx_handle_vmx_insn(struct cpu_user_regs *regs, unsigned int exit_reason); + int nvmx_msr_read_intercept(unsigned int msr, + u64 *msr_content); + Property changes on: head/emulators/xen-kernel411/files/xsa278-4.11.patch ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property