Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F147345836
D4049.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
17 KB
Referenced Files
None
Subscribers
None
D4049.diff
View Options
Index: head/emulators/xen-kernel/Makefile
===================================================================
--- head/emulators/xen-kernel/Makefile
+++ head/emulators/xen-kernel/Makefile
@@ -3,6 +3,7 @@
PORTNAME= xen
PKGNAMESUFFIX= -kernel
PORTVERSION= 4.5.1
+PORTREVISION= 1
CATEGORIES= emulators
MASTER_SITES= http://bits.xensource.com/oss-xen/release/${PORTVERSION}/
@@ -30,7 +31,12 @@
${FILESDIR}/0005-x86-rework-paging_log_dirty_op-to-work-with-hvm-gues.patch:-p2 \
${FILESDIR}/0006-xen-pvh-enable-mmu_update-hypercall.patch:-p2 \
${FILESDIR}/0007-iommu-fix-usage-of-shared-EPT-IOMMU-page-tables-on-P.patch:-p2 \
- ${FILESDIR}/0008-xen-arm-mm-Do-not-dump-the-p2m-when-mapping-a-foreig.patch:-p2
+ ${FILESDIR}/0008-xen-arm-mm-Do-not-dump-the-p2m-when-mapping-a-foreig.patch:-p2 \
+ ${FILESDIR}/xsa148-4.5.patch:-p2 \
+ ${FILESDIR}/xsa149.patch:-p2 \
+ ${FILESDIR}/xsa150.patch:-p2 \
+ ${FILESDIR}/xsa151.patch:-p2 \
+ ${FILESDIR}/xsa152-4.5.patch:-p2
.include <bsd.port.options.mk>
Index: head/emulators/xen-kernel/files/xsa148-4.5.patch
===================================================================
--- head/emulators/xen-kernel/files/xsa148-4.5.patch
+++ head/emulators/xen-kernel/files/xsa148-4.5.patch
@@ -0,0 +1,39 @@
+x86: guard against undue super page PTE creation
+
+When optional super page support got added (commit bd1cd81d64 "x86: PV
+support for hugepages"), two adjustments were missed: mod_l2_entry()
+needs to consider the PSE and RW bits when deciding whether to use the
+fast path, and the PSE bit must not be removed from L2_DISALLOW_MASK
+unconditionally.
+
+This is XSA-148.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- a/xen/arch/x86/mm.c
++++ b/xen/arch/x86/mm.c
+@@ -162,7 +162,10 @@ static void put_superpage(unsigned long
+ static uint32_t base_disallow_mask;
+ /* Global bit is allowed to be set on L1 PTEs. Intended for user mappings. */
+ #define L1_DISALLOW_MASK ((base_disallow_mask | _PAGE_GNTTAB) & ~_PAGE_GLOBAL)
+-#define L2_DISALLOW_MASK (base_disallow_mask & ~_PAGE_PSE)
++
++#define L2_DISALLOW_MASK (unlikely(opt_allow_superpage) \
++ ? base_disallow_mask & ~_PAGE_PSE \
++ : base_disallow_mask)
+
+ #define l3_disallow_mask(d) (!is_pv_32on64_domain(d) ? \
+ base_disallow_mask : \
+@@ -1770,7 +1773,10 @@ static int mod_l2_entry(l2_pgentry_t *pl
+ }
+
+ /* Fast path for identical mapping and presence. */
+- if ( !l2e_has_changed(ol2e, nl2e, _PAGE_PRESENT) )
++ if ( !l2e_has_changed(ol2e, nl2e,
++ unlikely(opt_allow_superpage)
++ ? _PAGE_PSE | _PAGE_RW | _PAGE_PRESENT
++ : _PAGE_PRESENT) )
+ {
+ adjust_guest_l2e(nl2e, d);
+ if ( UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, preserve_ad) )
Index: head/emulators/xen-kernel/files/xsa149.patch
===================================================================
--- head/emulators/xen-kernel/files/xsa149.patch
+++ head/emulators/xen-kernel/files/xsa149.patch
@@ -0,0 +1,20 @@
+xen: free domain's vcpu array
+
+This was overlooked in fb442e2171 ("x86_64: allow more vCPU-s per
+guest").
+
+This is XSA-149.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Ian Campbell <ian.campbell@citrix.com>
+
+--- a/xen/common/domain.c
++++ b/xen/common/domain.c
+@@ -841,6 +841,7 @@ static void complete_domain_destroy(stru
+
+ xsm_free_security_domain(d);
+ free_cpumask_var(d->domain_dirty_cpumask);
++ xfree(d->vcpu);
+ free_domain_struct(d);
+
+ send_global_virq(VIRQ_DOM_EXC);
Index: head/emulators/xen-kernel/files/xsa150.patch
===================================================================
--- head/emulators/xen-kernel/files/xsa150.patch
+++ head/emulators/xen-kernel/files/xsa150.patch
@@ -0,0 +1,201 @@
+x86/PoD: Eager sweep for zeroed pages
+
+Based on the contents of a guests physical address space,
+p2m_pod_emergency_sweep() could degrade into a linear memcmp() from 0 to
+max_gfn, which runs non-preemptibly.
+
+As p2m_pod_emergency_sweep() runs behind the scenes in a number of contexts,
+making it preemptible is not feasible.
+
+Instead, a different approach is taken. Recently-populated pages are eagerly
+checked for reclaimation, which amortises the p2m_pod_emergency_sweep()
+operation across each p2m_pod_demand_populate() operation.
+
+Note that in the case that a 2M superpage can't be reclaimed as a superpage,
+it is shattered if 4K pages of zeros can be reclaimed. This is unfortunate
+but matches the previous behaviour, and is required to avoid regressions
+(domain crash from PoD exhaustion) with VMs configured close to the limit.
+
+This is CVE-2015-7970 / XSA-150.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: George Dunlap <george.dunlap@citrix.com>
+
+--- a/xen/arch/x86/mm/p2m-pod.c
++++ b/xen/arch/x86/mm/p2m-pod.c
+@@ -920,28 +920,6 @@ p2m_pod_zero_check(struct p2m_domain *p2
+ }
+
+ #define POD_SWEEP_LIMIT 1024
+-
+-/* When populating a new superpage, look at recently populated superpages
+- * hoping that they've been zeroed. This will snap up zeroed pages as soon as
+- * the guest OS is done with them. */
+-static void
+-p2m_pod_check_last_super(struct p2m_domain *p2m, unsigned long gfn_aligned)
+-{
+- unsigned long check_gfn;
+-
+- ASSERT(p2m->pod.last_populated_index < POD_HISTORY_MAX);
+-
+- check_gfn = p2m->pod.last_populated[p2m->pod.last_populated_index];
+-
+- p2m->pod.last_populated[p2m->pod.last_populated_index] = gfn_aligned;
+-
+- p2m->pod.last_populated_index =
+- ( p2m->pod.last_populated_index + 1 ) % POD_HISTORY_MAX;
+-
+- p2m_pod_zero_check_superpage(p2m, check_gfn);
+-}
+-
+-
+ #define POD_SWEEP_STRIDE 16
+ static void
+ p2m_pod_emergency_sweep(struct p2m_domain *p2m)
+@@ -982,7 +960,7 @@ p2m_pod_emergency_sweep(struct p2m_domai
+ * NB that this is a zero-sum game; we're increasing our cache size
+ * by re-increasing our 'debt'. Since we hold the pod lock,
+ * (entry_count - count) must remain the same. */
+- if ( p2m->pod.count > 0 && i < limit )
++ if ( i < limit && (p2m->pod.count > 0 || hypercall_preempt_check()) )
+ break;
+ }
+
+@@ -994,6 +972,58 @@ p2m_pod_emergency_sweep(struct p2m_domai
+
+ }
+
++static void pod_eager_reclaim(struct p2m_domain *p2m)
++{
++ struct pod_mrp_list *mrp = &p2m->pod.mrp;
++ unsigned int i = 0;
++
++ /*
++ * Always check one page for reclaimation.
++ *
++ * If the PoD pool is empty, keep checking some space is found, or all
++ * entries have been exhaused.
++ */
++ do
++ {
++ unsigned int idx = (mrp->idx + i++) % ARRAY_SIZE(mrp->list);
++ unsigned long gfn = mrp->list[idx];
++
++ if ( gfn != INVALID_GFN )
++ {
++ if ( gfn & POD_LAST_SUPERPAGE )
++ {
++ gfn &= ~POD_LAST_SUPERPAGE;
++
++ if ( p2m_pod_zero_check_superpage(p2m, gfn) == 0 )
++ {
++ unsigned int x;
++
++ for ( x = 0; x < SUPERPAGE_PAGES; ++x, ++gfn )
++ p2m_pod_zero_check(p2m, &gfn, 1);
++ }
++ }
++ else
++ p2m_pod_zero_check(p2m, &gfn, 1);
++
++ mrp->list[idx] = INVALID_GFN;
++ }
++
++ } while ( (p2m->pod.count == 0) && (i < ARRAY_SIZE(mrp->list)) );
++}
++
++static void pod_eager_record(struct p2m_domain *p2m,
++ unsigned long gfn, unsigned int order)
++{
++ struct pod_mrp_list *mrp = &p2m->pod.mrp;
++
++ ASSERT(mrp->list[mrp->idx] == INVALID_GFN);
++ ASSERT(gfn != INVALID_GFN);
++
++ mrp->list[mrp->idx++] =
++ gfn | (order == PAGE_ORDER_2M ? POD_LAST_SUPERPAGE : 0);
++ mrp->idx %= ARRAY_SIZE(mrp->list);
++}
++
+ int
+ p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
+ unsigned int order,
+@@ -1034,6 +1064,8 @@ p2m_pod_demand_populate(struct p2m_domai
+ return 0;
+ }
+
++ pod_eager_reclaim(p2m);
++
+ /* Only sweep if we're actually out of memory. Doing anything else
+ * causes unnecessary time and fragmentation of superpages in the p2m. */
+ if ( p2m->pod.count == 0 )
+@@ -1070,6 +1102,8 @@ p2m_pod_demand_populate(struct p2m_domai
+ p2m->pod.entry_count -= (1 << order);
+ BUG_ON(p2m->pod.entry_count < 0);
+
++ pod_eager_record(p2m, gfn_aligned, order);
++
+ if ( tb_init_done )
+ {
+ struct {
+@@ -1085,12 +1119,6 @@ p2m_pod_demand_populate(struct p2m_domai
+ __trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), &t);
+ }
+
+- /* Check the last guest demand-populate */
+- if ( p2m->pod.entry_count > p2m->pod.count
+- && (order == PAGE_ORDER_2M)
+- && (q & P2M_ALLOC) )
+- p2m_pod_check_last_super(p2m, gfn_aligned);
+-
+ pod_unlock(p2m);
+ return 0;
+ out_of_memory:
+--- a/xen/arch/x86/mm/p2m.c
++++ b/xen/arch/x86/mm/p2m.c
+@@ -58,6 +58,7 @@ boolean_param("hap_2mb", opt_hap_2mb);
+ /* Init the datastructures for later use by the p2m code */
+ static int p2m_initialise(struct domain *d, struct p2m_domain *p2m)
+ {
++ unsigned int i;
+ int ret = 0;
+
+ mm_rwlock_init(&p2m->lock);
+@@ -73,6 +74,9 @@ static int p2m_initialise(struct domain
+
+ p2m->np2m_base = P2M_BASE_EADDR;
+
++ for ( i = 0; i < ARRAY_SIZE(p2m->pod.mrp.list); ++i )
++ p2m->pod.mrp.list[i] = INVALID_GFN;
++
+ if ( hap_enabled(d) && cpu_has_vmx )
+ ret = ept_p2m_init(p2m);
+ else
+--- a/xen/include/asm-x86/p2m.h
++++ b/xen/include/asm-x86/p2m.h
+@@ -292,10 +292,20 @@ struct p2m_domain {
+ entry_count; /* # of pages in p2m marked pod */
+ unsigned long reclaim_single; /* Last gpfn of a scan */
+ unsigned long max_guest; /* gpfn of max guest demand-populate */
+-#define POD_HISTORY_MAX 128
+- /* gpfn of last guest superpage demand-populated */
+- unsigned long last_populated[POD_HISTORY_MAX];
+- unsigned int last_populated_index;
++
++ /*
++ * Tracking of the most recently populated PoD pages, for eager
++ * reclamation.
++ */
++ struct pod_mrp_list {
++#define NR_POD_MRP_ENTRIES 32
++
++/* Encode ORDER_2M superpage in top bit of GFN */
++#define POD_LAST_SUPERPAGE (INVALID_GFN & ~(INVALID_GFN >> 1))
++
++ unsigned long list[NR_POD_MRP_ENTRIES];
++ unsigned int idx;
++ } mrp;
+ mm_lock_t lock; /* Locking of private pod structs, *
+ * not relying on the p2m lock. */
+ } pod;
Index: head/emulators/xen-kernel/files/xsa151.patch
===================================================================
--- head/emulators/xen-kernel/files/xsa151.patch
+++ head/emulators/xen-kernel/files/xsa151.patch
@@ -0,0 +1,28 @@
+xenoprof: free domain's vcpu array
+
+This was overlooked in fb442e2171 ("x86_64: allow more vCPU-s per
+guest").
+
+This is XSA-151.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Ian Campbell <ian.campbell@citrix.com>
+
+--- a/xen/common/xenoprof.c
++++ b/xen/common/xenoprof.c
+@@ -239,6 +239,7 @@ static int alloc_xenoprof_struct(
+ d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages), 0);
+ if ( d->xenoprof->rawbuf == NULL )
+ {
++ xfree(d->xenoprof->vcpu);
+ xfree(d->xenoprof);
+ d->xenoprof = NULL;
+ return -ENOMEM;
+@@ -286,6 +287,7 @@ void free_xenoprof_pages(struct domain *
+ free_xenheap_pages(x->rawbuf, order);
+ }
+
++ xfree(x->vcpu);
+ xfree(x);
+ d->xenoprof = NULL;
+ }
Index: head/emulators/xen-kernel/files/xsa152-4.5.patch
===================================================================
--- head/emulators/xen-kernel/files/xsa152-4.5.patch
+++ head/emulators/xen-kernel/files/xsa152-4.5.patch
@@ -0,0 +1,41 @@
+x86: rate-limit logging in do_xen{oprof,pmu}_op()
+
+Some of the sub-ops are acessible to all guests, and hence should be
+rate-limited. In the xenoprof case, just like for XSA-146, include them
+only in debug builds. Since the vPMU code is rather new, allow them to
+be always present, but downgrade them to (rate limited) guest messages.
+
+This is XSA-152.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/common/xenoprof.c
++++ b/xen/common/xenoprof.c
+@@ -676,15 +676,13 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_H
+
+ if ( (op < 0) || (op > XENOPROF_last_op) )
+ {
+- printk("xenoprof: invalid operation %d for domain %d\n",
+- op, current->domain->domain_id);
++ gdprintk(XENLOG_DEBUG, "invalid operation %d\n", op);
+ return -EINVAL;
+ }
+
+ if ( !NONPRIV_OP(op) && (current->domain != xenoprof_primary_profiler) )
+ {
+- printk("xenoprof: dom %d denied privileged operation %d\n",
+- current->domain->domain_id, op);
++ gdprintk(XENLOG_DEBUG, "denied privileged operation %d\n", op);
+ return -EPERM;
+ }
+
+@@ -907,8 +905,7 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_H
+ spin_unlock(&xenoprof_lock);
+
+ if ( ret < 0 )
+- printk("xenoprof: operation %d failed for dom %d (status : %d)\n",
+- op, current->domain->domain_id, ret);
++ gdprintk(XENLOG_DEBUG, "operation %d failed: %d\n", op, ret);
+
+ return ret;
+ }
Index: head/emulators/xen/Makefile
===================================================================
--- head/emulators/xen/Makefile
+++ head/emulators/xen/Makefile
@@ -2,6 +2,7 @@
PORTNAME= xen
PORTVERSION= 4.5.1
+PORTREVISION= 1
CATEGORIES= emulators
MAINTAINER= bapt@FreeBSD.org
Index: head/sysutils/xen-tools/Makefile
===================================================================
--- head/sysutils/xen-tools/Makefile
+++ head/sysutils/xen-tools/Makefile
@@ -2,7 +2,7 @@
PORTNAME= xen
PORTVERSION= 4.5.1
-PORTREVISION= 1
+PORTREVISION= 2
CATEGORIES= sysutils emulators
MASTER_SITES= http://bits.xensource.com/oss-xen/release/${PORTVERSION}/ \
http://code.coreboot.org/p/seabios/downloads/get/:seabios
@@ -50,6 +50,7 @@
EXTRA_PATCHES= ${FILESDIR}/xsa137.patch:-p1 \
${FILESDIR}/xsa142-4.5.patch:-p1 \
+ ${FILESDIR}/xsa153-libxl.patch:-p1 \
${FILESDIR}/0002-libxc-fix-xc_dom_load_elf_symtab.patch:-p1
CONFIGURE_ARGS+= --with-extra-qemuu-configure-args="${QEMU_ARGS}"
Index: head/sysutils/xen-tools/files/xsa153-libxl.patch
===================================================================
--- head/sysutils/xen-tools/files/xsa153-libxl.patch
+++ head/sysutils/xen-tools/files/xsa153-libxl.patch
@@ -0,0 +1,86 @@
+From 27593ec62bdad8621df910931349d964a6dbaa8c Mon Sep 17 00:00:00 2001
+From: Ian Jackson <ian.jackson@eu.citrix.com>
+Date: Wed, 21 Oct 2015 16:18:30 +0100
+Subject: [PATCH XSA-153 v3] libxl: adjust PoD target by memory fudge, too
+
+PoD guests need to balloon at least as far as required by PoD, or risk
+crashing. Currently they don't necessarily know what the right value
+is, because our memory accounting is (at the very least) confusing.
+
+Apply the memory limit fudge factor to the in-hypervisor PoD memory
+target, too. This will increase the size of the guest's PoD cache by
+the fudge factor LIBXL_MAXMEM_CONSTANT (currently 1Mby). This ensures
+that even with a slightly-off balloon driver, the guest will be
+stable even under memory pressure.
+
+There are two call sites of xc_domain_set_pod_target that need fixing:
+
+The one in libxl_set_memory_target is straightforward.
+
+The one in xc_hvm_build_x86.c:setup_guest is more awkward. Simply
+setting the PoD target differently does not work because the various
+amounts of memory during domain construction no longer match up.
+Instead, we adjust the guest memory target in xenstore (but only for
+PoD guests).
+
+This introduces a 1Mby discrepancy between the balloon target of a PoD
+guest at boot, and the target set by an apparently-equivalent `xl
+mem-set' (or similar) later. This approach is low-risk for a security
+fix but we need to fix this up properly in xen.git#staging and
+probably also in stable trees.
+
+This is XSA-153.
+
+Signed-off-by: Ian Jackson <Ian.Jackson@eu.citrix.com>
+---
+ tools/libxl/libxl.c | 2 +-
+ tools/libxl/libxl_dom.c | 9 ++++++++-
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
+index d38d0c7..1366177 100644
+--- a/tools/libxl/libxl.c
++++ b/tools/libxl/libxl.c
+@@ -4815,7 +4815,7 @@ retry_transaction:
+ }
+
+ rc = xc_domain_set_pod_target(ctx->xch, domid,
+- new_target_memkb / 4, NULL, NULL, NULL);
++ (new_target_memkb + LIBXL_MAXMEM_CONSTANT) / 4, NULL, NULL, NULL);
+ if (rc != 0) {
+ LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR,
+ "xc_domain_set_pod_target domid=%d, memkb=%d "
+diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
+index b514377..8019f4e 100644
+--- a/tools/libxl/libxl_dom.c
++++ b/tools/libxl/libxl_dom.c
+@@ -486,6 +486,7 @@ int libxl__build_post(libxl__gc *gc, uint32_t domid,
+ xs_transaction_t t;
+ char **ents;
+ int i, rc;
++ int64_t mem_target_fudge;
+
+ if (info->num_vnuma_nodes && !info->num_vcpu_soft_affinity) {
+ rc = set_vnuma_affinity(gc, domid, info);
+@@ -518,11 +519,17 @@ int libxl__build_post(libxl__gc *gc, uint32_t domid,
+ }
+ }
+
++ mem_target_fudge =
++ (info->type == LIBXL_DOMAIN_TYPE_HVM &&
++ info->max_memkb > info->target_memkb)
++ ? LIBXL_MAXMEM_CONSTANT : 0;
++
+ ents = libxl__calloc(gc, 12 + (info->max_vcpus * 2) + 2, sizeof(char *));
+ ents[0] = "memory/static-max";
+ ents[1] = GCSPRINTF("%"PRId64, info->max_memkb);
+ ents[2] = "memory/target";
+- ents[3] = GCSPRINTF("%"PRId64, info->target_memkb - info->video_memkb);
++ ents[3] = GCSPRINTF("%"PRId64, info->target_memkb - info->video_memkb
++ - mem_target_fudge);
+ ents[4] = "memory/videoram";
+ ents[5] = GCSPRINTF("%"PRId64, info->video_memkb);
+ ents[6] = "domid";
+--
+1.7.10.4
+
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Wed, Mar 11, 4:45 AM (4 h, 55 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
29523664
Default Alt Text
D4049.diff (17 KB)
Attached To
Mode
D4049: xen: apply XSAs
Attached
Detach File
Event Timeline
Log In to Comment