Index: head/emulators/xen/Makefile =================================================================== --- head/emulators/xen/Makefile (revision 400565) +++ head/emulators/xen/Makefile (revision 400566) @@ -1,19 +1,20 @@ # $FreeBSD$ PORTNAME= xen PORTVERSION= 4.5.1 +PORTREVISION= 1 CATEGORIES= emulators MAINTAINER= bapt@FreeBSD.org COMMENT= Xen Hyvervisor meta port LICENSE= GPLv2 ONLY_FOR_ARCHS= amd64 RUN_DEPENDS= /boot/xen:${PORTSDIR}/emulators/xen-kernel \ xl:${PORTSDIR}/sysutils/xen-tools USES= metaport .include Index: head/emulators/xen-kernel/Makefile =================================================================== --- head/emulators/xen-kernel/Makefile (revision 400565) +++ head/emulators/xen-kernel/Makefile (revision 400566) @@ -1,54 +1,60 @@ # $FreeBSD$ PORTNAME= xen PKGNAMESUFFIX= -kernel PORTVERSION= 4.5.1 +PORTREVISION= 1 CATEGORIES= emulators MASTER_SITES= http://bits.xensource.com/oss-xen/release/${PORTVERSION}/ MAINTAINER= bapt@FreeBSD.org COMMENT= Hypervisor using a microkernel design LICENSE= GPLv2 ONLY_FOR_ARCHS= amd64 USES= cpe gmake python:build MAKE_ARGS= HOSTCC="${CC}" CC="${CC}" PYTHON=${PYTHON_CMD} \ NM="${NM}" LD="${LD}" USE_GCC= yes NO_MTREE= yes PLIST_FILES= /boot/xen \ /boot/xen.4th ALL_TARGET= build STRIP= # WRKSRC_SUBDIR= xen EXTRA_PATCHES= ${FILESDIR}/0001-introduce-a-helper-to-allocate-non-contiguous-memory.patch:-p2 \ ${FILESDIR}/0002-vmap-avoid-hitting-an-ASSERT-with-vfree-NULL.patch:-p2 \ ${FILESDIR}/0003-x86-shadow-fix-shadow_track_dirty_vram-to-work-on-hv.patch:-p2 \ ${FILESDIR}/0004-x86-hap-make-hap_track_dirty_vram-use-non-contiguous.patch:-p2 \ ${FILESDIR}/0005-x86-rework-paging_log_dirty_op-to-work-with-hvm-gues.patch:-p2 \ ${FILESDIR}/0006-xen-pvh-enable-mmu_update-hypercall.patch:-p2 \ ${FILESDIR}/0007-iommu-fix-usage-of-shared-EPT-IOMMU-page-tables-on-P.patch:-p2 \ - ${FILESDIR}/0008-xen-arm-mm-Do-not-dump-the-p2m-when-mapping-a-foreig.patch:-p2 + ${FILESDIR}/0008-xen-arm-mm-Do-not-dump-the-p2m-when-mapping-a-foreig.patch:-p2 \ + ${FILESDIR}/xsa148-4.5.patch:-p2 \ + ${FILESDIR}/xsa149.patch:-p2 \ + ${FILESDIR}/xsa150.patch:-p2 \ + ${FILESDIR}/xsa151.patch:-p2 \ + ${FILESDIR}/xsa152-4.5.patch:-p2 .include .if ${OPSYS} != FreeBSD IGNORE= Only supported on FreeBSD .endif .if ${OSVERSION} < 1100055 IGNORE= Only supported on recent FreeBSD 11 .endif do-install: ${MKDIR} ${STAGEDIR}/boot ${INSTALL_PROGRAM} ${WRKSRC}/xen ${STAGEDIR}/boot ${INSTALL_DATA} ${FILESDIR}/xen.4th ${STAGEDIR}/boot .include #Filter out LDFLAGS .undef LDFLAGS RUN_DEPENDS:= ${RUN_DEPENDS:N*gcc*} Index: head/emulators/xen-kernel/files/xsa148-4.5.patch =================================================================== --- head/emulators/xen-kernel/files/xsa148-4.5.patch (nonexistent) +++ head/emulators/xen-kernel/files/xsa148-4.5.patch (revision 400566) @@ -0,0 +1,39 @@ +x86: guard against undue super page PTE creation + +When optional super page support got added (commit bd1cd81d64 "x86: PV +support for hugepages"), two adjustments were missed: mod_l2_entry() +needs to consider the PSE and RW bits when deciding whether to use the +fast path, and the PSE bit must not be removed from L2_DISALLOW_MASK +unconditionally. + +This is XSA-148. + +Signed-off-by: Jan Beulich +Reviewed-by: Tim Deegan + +--- a/xen/arch/x86/mm.c ++++ b/xen/arch/x86/mm.c +@@ -162,7 +162,10 @@ static void put_superpage(unsigned long + static uint32_t base_disallow_mask; + /* Global bit is allowed to be set on L1 PTEs. Intended for user mappings. */ + #define L1_DISALLOW_MASK ((base_disallow_mask | _PAGE_GNTTAB) & ~_PAGE_GLOBAL) +-#define L2_DISALLOW_MASK (base_disallow_mask & ~_PAGE_PSE) ++ ++#define L2_DISALLOW_MASK (unlikely(opt_allow_superpage) \ ++ ? base_disallow_mask & ~_PAGE_PSE \ ++ : base_disallow_mask) + + #define l3_disallow_mask(d) (!is_pv_32on64_domain(d) ? \ + base_disallow_mask : \ +@@ -1770,7 +1773,10 @@ static int mod_l2_entry(l2_pgentry_t *pl + } + + /* Fast path for identical mapping and presence. */ +- if ( !l2e_has_changed(ol2e, nl2e, _PAGE_PRESENT) ) ++ if ( !l2e_has_changed(ol2e, nl2e, ++ unlikely(opt_allow_superpage) ++ ? _PAGE_PSE | _PAGE_RW | _PAGE_PRESENT ++ : _PAGE_PRESENT) ) + { + adjust_guest_l2e(nl2e, d); + if ( UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, preserve_ad) ) Property changes on: head/emulators/xen-kernel/files/xsa148-4.5.patch ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Index: head/emulators/xen-kernel/files/xsa149.patch =================================================================== --- head/emulators/xen-kernel/files/xsa149.patch (nonexistent) +++ head/emulators/xen-kernel/files/xsa149.patch (revision 400566) @@ -0,0 +1,20 @@ +xen: free domain's vcpu array + +This was overlooked in fb442e2171 ("x86_64: allow more vCPU-s per +guest"). + +This is XSA-149. + +Signed-off-by: Jan Beulich +Reviewed-by: Ian Campbell + +--- a/xen/common/domain.c ++++ b/xen/common/domain.c +@@ -841,6 +841,7 @@ static void complete_domain_destroy(stru + + xsm_free_security_domain(d); + free_cpumask_var(d->domain_dirty_cpumask); ++ xfree(d->vcpu); + free_domain_struct(d); + + send_global_virq(VIRQ_DOM_EXC); Property changes on: head/emulators/xen-kernel/files/xsa149.patch ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Index: head/emulators/xen-kernel/files/xsa150.patch =================================================================== --- head/emulators/xen-kernel/files/xsa150.patch (nonexistent) +++ head/emulators/xen-kernel/files/xsa150.patch (revision 400566) @@ -0,0 +1,201 @@ +x86/PoD: Eager sweep for zeroed pages + +Based on the contents of a guests physical address space, +p2m_pod_emergency_sweep() could degrade into a linear memcmp() from 0 to +max_gfn, which runs non-preemptibly. + +As p2m_pod_emergency_sweep() runs behind the scenes in a number of contexts, +making it preemptible is not feasible. + +Instead, a different approach is taken. Recently-populated pages are eagerly +checked for reclaimation, which amortises the p2m_pod_emergency_sweep() +operation across each p2m_pod_demand_populate() operation. + +Note that in the case that a 2M superpage can't be reclaimed as a superpage, +it is shattered if 4K pages of zeros can be reclaimed. This is unfortunate +but matches the previous behaviour, and is required to avoid regressions +(domain crash from PoD exhaustion) with VMs configured close to the limit. + +This is CVE-2015-7970 / XSA-150. + +Signed-off-by: Andrew Cooper +Reviewed-by: Jan Beulich +Reviewed-by: George Dunlap + +--- a/xen/arch/x86/mm/p2m-pod.c ++++ b/xen/arch/x86/mm/p2m-pod.c +@@ -920,28 +920,6 @@ p2m_pod_zero_check(struct p2m_domain *p2 + } + + #define POD_SWEEP_LIMIT 1024 +- +-/* When populating a new superpage, look at recently populated superpages +- * hoping that they've been zeroed. This will snap up zeroed pages as soon as +- * the guest OS is done with them. */ +-static void +-p2m_pod_check_last_super(struct p2m_domain *p2m, unsigned long gfn_aligned) +-{ +- unsigned long check_gfn; +- +- ASSERT(p2m->pod.last_populated_index < POD_HISTORY_MAX); +- +- check_gfn = p2m->pod.last_populated[p2m->pod.last_populated_index]; +- +- p2m->pod.last_populated[p2m->pod.last_populated_index] = gfn_aligned; +- +- p2m->pod.last_populated_index = +- ( p2m->pod.last_populated_index + 1 ) % POD_HISTORY_MAX; +- +- p2m_pod_zero_check_superpage(p2m, check_gfn); +-} +- +- + #define POD_SWEEP_STRIDE 16 + static void + p2m_pod_emergency_sweep(struct p2m_domain *p2m) +@@ -982,7 +960,7 @@ p2m_pod_emergency_sweep(struct p2m_domai + * NB that this is a zero-sum game; we're increasing our cache size + * by re-increasing our 'debt'. Since we hold the pod lock, + * (entry_count - count) must remain the same. */ +- if ( p2m->pod.count > 0 && i < limit ) ++ if ( i < limit && (p2m->pod.count > 0 || hypercall_preempt_check()) ) + break; + } + +@@ -994,6 +972,58 @@ p2m_pod_emergency_sweep(struct p2m_domai + + } + ++static void pod_eager_reclaim(struct p2m_domain *p2m) ++{ ++ struct pod_mrp_list *mrp = &p2m->pod.mrp; ++ unsigned int i = 0; ++ ++ /* ++ * Always check one page for reclaimation. ++ * ++ * If the PoD pool is empty, keep checking some space is found, or all ++ * entries have been exhaused. ++ */ ++ do ++ { ++ unsigned int idx = (mrp->idx + i++) % ARRAY_SIZE(mrp->list); ++ unsigned long gfn = mrp->list[idx]; ++ ++ if ( gfn != INVALID_GFN ) ++ { ++ if ( gfn & POD_LAST_SUPERPAGE ) ++ { ++ gfn &= ~POD_LAST_SUPERPAGE; ++ ++ if ( p2m_pod_zero_check_superpage(p2m, gfn) == 0 ) ++ { ++ unsigned int x; ++ ++ for ( x = 0; x < SUPERPAGE_PAGES; ++x, ++gfn ) ++ p2m_pod_zero_check(p2m, &gfn, 1); ++ } ++ } ++ else ++ p2m_pod_zero_check(p2m, &gfn, 1); ++ ++ mrp->list[idx] = INVALID_GFN; ++ } ++ ++ } while ( (p2m->pod.count == 0) && (i < ARRAY_SIZE(mrp->list)) ); ++} ++ ++static void pod_eager_record(struct p2m_domain *p2m, ++ unsigned long gfn, unsigned int order) ++{ ++ struct pod_mrp_list *mrp = &p2m->pod.mrp; ++ ++ ASSERT(mrp->list[mrp->idx] == INVALID_GFN); ++ ASSERT(gfn != INVALID_GFN); ++ ++ mrp->list[mrp->idx++] = ++ gfn | (order == PAGE_ORDER_2M ? POD_LAST_SUPERPAGE : 0); ++ mrp->idx %= ARRAY_SIZE(mrp->list); ++} ++ + int + p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn, + unsigned int order, +@@ -1034,6 +1064,8 @@ p2m_pod_demand_populate(struct p2m_domai + return 0; + } + ++ pod_eager_reclaim(p2m); ++ + /* Only sweep if we're actually out of memory. Doing anything else + * causes unnecessary time and fragmentation of superpages in the p2m. */ + if ( p2m->pod.count == 0 ) +@@ -1070,6 +1102,8 @@ p2m_pod_demand_populate(struct p2m_domai + p2m->pod.entry_count -= (1 << order); + BUG_ON(p2m->pod.entry_count < 0); + ++ pod_eager_record(p2m, gfn_aligned, order); ++ + if ( tb_init_done ) + { + struct { +@@ -1085,12 +1119,6 @@ p2m_pod_demand_populate(struct p2m_domai + __trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), &t); + } + +- /* Check the last guest demand-populate */ +- if ( p2m->pod.entry_count > p2m->pod.count +- && (order == PAGE_ORDER_2M) +- && (q & P2M_ALLOC) ) +- p2m_pod_check_last_super(p2m, gfn_aligned); +- + pod_unlock(p2m); + return 0; + out_of_memory: +--- a/xen/arch/x86/mm/p2m.c ++++ b/xen/arch/x86/mm/p2m.c +@@ -58,6 +58,7 @@ boolean_param("hap_2mb", opt_hap_2mb); + /* Init the datastructures for later use by the p2m code */ + static int p2m_initialise(struct domain *d, struct p2m_domain *p2m) + { ++ unsigned int i; + int ret = 0; + + mm_rwlock_init(&p2m->lock); +@@ -73,6 +74,9 @@ static int p2m_initialise(struct domain + + p2m->np2m_base = P2M_BASE_EADDR; + ++ for ( i = 0; i < ARRAY_SIZE(p2m->pod.mrp.list); ++i ) ++ p2m->pod.mrp.list[i] = INVALID_GFN; ++ + if ( hap_enabled(d) && cpu_has_vmx ) + ret = ept_p2m_init(p2m); + else +--- a/xen/include/asm-x86/p2m.h ++++ b/xen/include/asm-x86/p2m.h +@@ -292,10 +292,20 @@ struct p2m_domain { + entry_count; /* # of pages in p2m marked pod */ + unsigned long reclaim_single; /* Last gpfn of a scan */ + unsigned long max_guest; /* gpfn of max guest demand-populate */ +-#define POD_HISTORY_MAX 128 +- /* gpfn of last guest superpage demand-populated */ +- unsigned long last_populated[POD_HISTORY_MAX]; +- unsigned int last_populated_index; ++ ++ /* ++ * Tracking of the most recently populated PoD pages, for eager ++ * reclamation. ++ */ ++ struct pod_mrp_list { ++#define NR_POD_MRP_ENTRIES 32 ++ ++/* Encode ORDER_2M superpage in top bit of GFN */ ++#define POD_LAST_SUPERPAGE (INVALID_GFN & ~(INVALID_GFN >> 1)) ++ ++ unsigned long list[NR_POD_MRP_ENTRIES]; ++ unsigned int idx; ++ } mrp; + mm_lock_t lock; /* Locking of private pod structs, * + * not relying on the p2m lock. */ + } pod; Property changes on: head/emulators/xen-kernel/files/xsa150.patch ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Index: head/emulators/xen-kernel/files/xsa151.patch =================================================================== --- head/emulators/xen-kernel/files/xsa151.patch (nonexistent) +++ head/emulators/xen-kernel/files/xsa151.patch (revision 400566) @@ -0,0 +1,28 @@ +xenoprof: free domain's vcpu array + +This was overlooked in fb442e2171 ("x86_64: allow more vCPU-s per +guest"). + +This is XSA-151. + +Signed-off-by: Jan Beulich +Reviewed-by: Ian Campbell + +--- a/xen/common/xenoprof.c ++++ b/xen/common/xenoprof.c +@@ -239,6 +239,7 @@ static int alloc_xenoprof_struct( + d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages), 0); + if ( d->xenoprof->rawbuf == NULL ) + { ++ xfree(d->xenoprof->vcpu); + xfree(d->xenoprof); + d->xenoprof = NULL; + return -ENOMEM; +@@ -286,6 +287,7 @@ void free_xenoprof_pages(struct domain * + free_xenheap_pages(x->rawbuf, order); + } + ++ xfree(x->vcpu); + xfree(x); + d->xenoprof = NULL; + } Property changes on: head/emulators/xen-kernel/files/xsa151.patch ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Index: head/emulators/xen-kernel/files/xsa152-4.5.patch =================================================================== --- head/emulators/xen-kernel/files/xsa152-4.5.patch (nonexistent) +++ head/emulators/xen-kernel/files/xsa152-4.5.patch (revision 400566) @@ -0,0 +1,41 @@ +x86: rate-limit logging in do_xen{oprof,pmu}_op() + +Some of the sub-ops are acessible to all guests, and hence should be +rate-limited. In the xenoprof case, just like for XSA-146, include them +only in debug builds. Since the vPMU code is rather new, allow them to +be always present, but downgrade them to (rate limited) guest messages. + +This is XSA-152. + +Signed-off-by: Jan Beulich + +--- a/xen/common/xenoprof.c ++++ b/xen/common/xenoprof.c +@@ -676,15 +676,13 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_H + + if ( (op < 0) || (op > XENOPROF_last_op) ) + { +- printk("xenoprof: invalid operation %d for domain %d\n", +- op, current->domain->domain_id); ++ gdprintk(XENLOG_DEBUG, "invalid operation %d\n", op); + return -EINVAL; + } + + if ( !NONPRIV_OP(op) && (current->domain != xenoprof_primary_profiler) ) + { +- printk("xenoprof: dom %d denied privileged operation %d\n", +- current->domain->domain_id, op); ++ gdprintk(XENLOG_DEBUG, "denied privileged operation %d\n", op); + return -EPERM; + } + +@@ -907,8 +905,7 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_H + spin_unlock(&xenoprof_lock); + + if ( ret < 0 ) +- printk("xenoprof: operation %d failed for dom %d (status : %d)\n", +- op, current->domain->domain_id, ret); ++ gdprintk(XENLOG_DEBUG, "operation %d failed: %d\n", op, ret); + + return ret; + } Property changes on: head/emulators/xen-kernel/files/xsa152-4.5.patch ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Index: head/sysutils/xen-tools/Makefile =================================================================== --- head/sysutils/xen-tools/Makefile (revision 400565) +++ head/sysutils/xen-tools/Makefile (revision 400566) @@ -1,88 +1,89 @@ # $FreeBSD$ PORTNAME= xen PORTVERSION= 4.5.1 -PORTREVISION= 1 +PORTREVISION= 2 CATEGORIES= sysutils emulators MASTER_SITES= http://bits.xensource.com/oss-xen/release/${PORTVERSION}/ \ http://code.coreboot.org/p/seabios/downloads/get/:seabios PKGNAMESUFFIX= -tools MAINTAINER= bapt@FreeBSD.org COMMENT= Xen management tool, based on LibXenlight LICENSE= GPLv2 LGPL3 LICENSE_COMB= multi LIB_DEPENDS= libyajl.so:${PORTSDIR}/devel/yajl \ liblzo2.so:${PORTSDIR}/archivers/lzo2 \ libpixman-1.so:${PORTSDIR}/x11/pixman BUILD_DEPENDS= dev86>0:${PORTSDIR}/devel/dev86 OPTIONS_DEFINE= DOCS ONLY_FOR_ARCHS= amd64 ONLY_FOR_ARCHS_REASON= "not yet ported to anything other than amd64" SEABIOSVERSION= 1.8.1 DISTFILES+= ${DISTNAME}.tar.gz \ seabios-${SEABIOSVERSION}.tar.gz:seabios WRKSRC= ${WRKDIR}/xen-${PORTVERSION} USES= cpe gmake libtool perl5 pkgconfig python shebangfix USE_GNOME= glib20 USE_LDCONFIG= yes GNU_CONFIGURE= yes CONFIGURE_ENV= HOSTCC="${CC}" CC="${CC}" \ ac_cv_path_BASH=${TRUE} \ ac_cv_path_XGETTEXT=${TRUE} MAKE_ARGS= HOSTCC="${CC}" CC="${CC}" GCC="${GCC}" cc="${GCC}" QEMU_ARGS= --disable-gtk \ --disable-smartcard-nss \ --disable-sdl \ --disable-vte \ --disable-glx \ --disable-curses \ --disable-tools \ --disable-curl \ --cxx=c++ EXTRA_PATCHES= ${FILESDIR}/xsa137.patch:-p1 \ ${FILESDIR}/xsa142-4.5.patch:-p1 \ + ${FILESDIR}/xsa153-libxl.patch:-p1 \ ${FILESDIR}/0002-libxc-fix-xc_dom_load_elf_symtab.patch:-p1 CONFIGURE_ARGS+= --with-extra-qemuu-configure-args="${QEMU_ARGS}" SHEBANG_FILES= tools/misc/xencov_split \ tools/misc/xen-ringwatch USE_GCC= yes ALL_TARGET= tools docs INSTALL_TARGET= install-tools install-docs .include .if ${OPSYS} != FreeBSD IGNORE= only supported on FreeBSD .endif post-extract: ${MV} ${WRKDIR}/seabios-${SEABIOSVERSION} ${WRKSRC}/tools/firmware/seabios-dir post-patch: @${REINPLACE_CMD} "s,x86_64,amd64,g" ${WRKSRC}/tools/configure @${REINPLACE_CMD} -e "s,/var/lib,/var/db,g" \ ${WRKSRC}/tools/Makefile \ ${WRKSRC}/tools/libxc/include/xenguest.h \ ${WRKSRC}/tools/libxl/libxl_dom.c \ ${WRKSRC}/tools/libxl/libxl_dm.c \ ${WRKSRC}/tools/qemu-xen-traditional/i386-dm/helper2.c \ ${WRKSRC}/docs/man/* @for p in ${FILESDIR}/*qemuu*.patch; do \ ${ECHO_CMD} "====> Applying $${p##*/}" ; \ ${PATCH} -s -p1 -i $${p} -d ${WRKSRC}/tools/qemu-xen ; \ done post-install: ${MKDIR} ${STAGEDIR}/var/run/xen .include Index: head/sysutils/xen-tools/files/xsa153-libxl.patch =================================================================== --- head/sysutils/xen-tools/files/xsa153-libxl.patch (nonexistent) +++ head/sysutils/xen-tools/files/xsa153-libxl.patch (revision 400566) @@ -0,0 +1,86 @@ +From 27593ec62bdad8621df910931349d964a6dbaa8c Mon Sep 17 00:00:00 2001 +From: Ian Jackson +Date: Wed, 21 Oct 2015 16:18:30 +0100 +Subject: [PATCH XSA-153 v3] libxl: adjust PoD target by memory fudge, too + +PoD guests need to balloon at least as far as required by PoD, or risk +crashing. Currently they don't necessarily know what the right value +is, because our memory accounting is (at the very least) confusing. + +Apply the memory limit fudge factor to the in-hypervisor PoD memory +target, too. This will increase the size of the guest's PoD cache by +the fudge factor LIBXL_MAXMEM_CONSTANT (currently 1Mby). This ensures +that even with a slightly-off balloon driver, the guest will be +stable even under memory pressure. + +There are two call sites of xc_domain_set_pod_target that need fixing: + +The one in libxl_set_memory_target is straightforward. + +The one in xc_hvm_build_x86.c:setup_guest is more awkward. Simply +setting the PoD target differently does not work because the various +amounts of memory during domain construction no longer match up. +Instead, we adjust the guest memory target in xenstore (but only for +PoD guests). + +This introduces a 1Mby discrepancy between the balloon target of a PoD +guest at boot, and the target set by an apparently-equivalent `xl +mem-set' (or similar) later. This approach is low-risk for a security +fix but we need to fix this up properly in xen.git#staging and +probably also in stable trees. + +This is XSA-153. + +Signed-off-by: Ian Jackson +--- + tools/libxl/libxl.c | 2 +- + tools/libxl/libxl_dom.c | 9 ++++++++- + 2 files changed, 9 insertions(+), 2 deletions(-) + +diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c +index d38d0c7..1366177 100644 +--- a/tools/libxl/libxl.c ++++ b/tools/libxl/libxl.c +@@ -4815,7 +4815,7 @@ retry_transaction: + } + + rc = xc_domain_set_pod_target(ctx->xch, domid, +- new_target_memkb / 4, NULL, NULL, NULL); ++ (new_target_memkb + LIBXL_MAXMEM_CONSTANT) / 4, NULL, NULL, NULL); + if (rc != 0) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, + "xc_domain_set_pod_target domid=%d, memkb=%d " +diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c +index b514377..8019f4e 100644 +--- a/tools/libxl/libxl_dom.c ++++ b/tools/libxl/libxl_dom.c +@@ -486,6 +486,7 @@ int libxl__build_post(libxl__gc *gc, uint32_t domid, + xs_transaction_t t; + char **ents; + int i, rc; ++ int64_t mem_target_fudge; + + if (info->num_vnuma_nodes && !info->num_vcpu_soft_affinity) { + rc = set_vnuma_affinity(gc, domid, info); +@@ -518,11 +519,17 @@ int libxl__build_post(libxl__gc *gc, uint32_t domid, + } + } + ++ mem_target_fudge = ++ (info->type == LIBXL_DOMAIN_TYPE_HVM && ++ info->max_memkb > info->target_memkb) ++ ? LIBXL_MAXMEM_CONSTANT : 0; ++ + ents = libxl__calloc(gc, 12 + (info->max_vcpus * 2) + 2, sizeof(char *)); + ents[0] = "memory/static-max"; + ents[1] = GCSPRINTF("%"PRId64, info->max_memkb); + ents[2] = "memory/target"; +- ents[3] = GCSPRINTF("%"PRId64, info->target_memkb - info->video_memkb); ++ ents[3] = GCSPRINTF("%"PRId64, info->target_memkb - info->video_memkb ++ - mem_target_fudge); + ents[4] = "memory/videoram"; + ents[5] = GCSPRINTF("%"PRId64, info->video_memkb); + ents[6] = "domid"; +-- +1.7.10.4 + Property changes on: head/sysutils/xen-tools/files/xsa153-libxl.patch ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property