Differential D16416 Diff 46059 head/emulators/xen-kernel47/files/0002-xen-rework-paging_log_dirty_op-to-work-with-hvm-gues.patch
Changeset View
Changeset View
Standalone View
Standalone View
head/emulators/xen-kernel47/files/0002-xen-rework-paging_log_dirty_op-to-work-with-hvm-gues.patch
Property | Old Value | New Value |
---|---|---|
fbsd:nokeywords | null | yes \ No newline at end of property |
From e253a2e2fb45197273cee7a7fa2b77f7a87cb67f Mon Sep 17 00:00:00 2001 | |||||
From: Roger Pau Monne <roger.pau@citrix.com> | |||||
Date: Tue, 31 May 2016 16:07:26 +0200 | |||||
Subject: [PATCH 2/2] xen: rework paging_log_dirty_op to work with hvm guests | |||||
MIME-Version: 1.0 | |||||
Content-Type: text/plain; charset=UTF-8 | |||||
Content-Transfer-Encoding: 8bit | |||||
When the caller of paging_log_dirty_op is a hvm guest Xen would choke when | |||||
trying to copy the dirty bitmap to the guest because the paging lock is | |||||
already held. | |||||
Fix this by independently mapping each page of the guest bitmap as needed | |||||
without the paging lock held. | |||||
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> | |||||
Cc: Tim Deegan <tim@xen.org> | |||||
Cc: Jan Beulich <jbeulich@suse.com> | |||||
Cc: Andrew Cooper <andrew.cooper3@citrix.com> | |||||
--- | |||||
Changes since v6: | |||||
- Move the again label to the start of the function. | |||||
- Set all the preempt fields if we need to map a new page (just like on the | |||||
preempt case). | |||||
Changes since v4: | |||||
- Indent again label. | |||||
- Replace bogus paddr_t cast with proper type. | |||||
- Update preempt.log_dirty before dropping the paging lock. | |||||
Changes since v3: | |||||
- Drop last parameter from map_dirty_bitmap. | |||||
- Drop pointless initializers in paging_log_dirty_op. | |||||
- Add a new field to paging_domain in order to copy i2 position. | |||||
- Move the again case up to make sure we don't hold cached values of the | |||||
contents of log_dirty. | |||||
- Replace the BUG_ON in paging_log_dirty_op with an ASSERT. | |||||
Changes since v2: | |||||
- Add checks for p2m_is_ram and p2m_is_discard_write when mapping a guest | |||||
page. | |||||
- Remove error checking from memset/memcpy, they unconditionally return | |||||
dst. | |||||
--- | |||||
xen/arch/x86/mm/paging.c | 97 +++++++++++++++++++++++++++++++++++++++----- | |||||
xen/include/asm-x86/domain.h | 1 + | |||||
2 files changed, 87 insertions(+), 11 deletions(-) | |||||
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c | |||||
index ed94ff7..595c9d6 100644 | |||||
--- a/xen/arch/x86/mm/paging.c | |||||
+++ b/xen/arch/x86/mm/paging.c | |||||
@@ -406,6 +406,51 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn) | |||||
return rv; | |||||
} | |||||
+static inline void *map_dirty_bitmap(XEN_GUEST_HANDLE_64(uint8) dirty_bitmap, | |||||
+ unsigned long pages, | |||||
+ struct page_info **page) | |||||
+{ | |||||
+ uint32_t pfec = PFEC_page_present | PFEC_write_access; | |||||
+ unsigned long gfn; | |||||
+ p2m_type_t p2mt; | |||||
+ | |||||
+ gfn = paging_gva_to_gfn(current, | |||||
+ (unsigned long)(dirty_bitmap.p + (pages >> 3)), | |||||
+ &pfec); | |||||
+ if ( gfn == INVALID_GFN ) | |||||
+ return NULL; | |||||
+ | |||||
+ *page = get_page_from_gfn(current->domain, gfn, &p2mt, P2M_UNSHARE); | |||||
+ | |||||
+ if ( !p2m_is_ram(p2mt) ) | |||||
+ { | |||||
+ put_page(*page); | |||||
+ return NULL; | |||||
+ } | |||||
+ if ( p2m_is_paging(p2mt) ) | |||||
+ { | |||||
+ put_page(*page); | |||||
+ p2m_mem_paging_populate(current->domain, gfn); | |||||
+ return NULL; | |||||
+ } | |||||
+ if ( p2m_is_shared(p2mt) || p2m_is_discard_write(p2mt) ) | |||||
+ { | |||||
+ put_page(*page); | |||||
+ return NULL; | |||||
+ } | |||||
+ | |||||
+ return __map_domain_page(*page); | |||||
+} | |||||
+ | |||||
+static inline void unmap_dirty_bitmap(void *addr, struct page_info *page) | |||||
+{ | |||||
+ if ( addr != NULL ) | |||||
+ { | |||||
+ unmap_domain_page(addr); | |||||
+ put_page(page); | |||||
+ } | |||||
+} | |||||
+ | |||||
/* Read a domain's log-dirty bitmap and stats. If the operation is a CLEAN, | |||||
* clear the bitmap and stats as well. */ | |||||
@@ -418,7 +463,11 @@ static int paging_log_dirty_op(struct domain *d, | |||||
mfn_t *l4 = NULL, *l3 = NULL, *l2 = NULL; | |||||
unsigned long *l1 = NULL; | |||||
int i4, i3, i2; | |||||
+ uint8_t *dirty_bitmap; | |||||
+ struct page_info *page; | |||||
+ unsigned long index_mapped; | |||||
+ again: | |||||
if ( !resuming ) | |||||
{ | |||||
/* | |||||
@@ -439,6 +488,14 @@ static int paging_log_dirty_op(struct domain *d, | |||||
p2m_flush_hardware_cached_dirty(d); | |||||
} | |||||
+ index_mapped = resuming ? d->arch.paging.preempt.log_dirty.done : 0; | |||||
+ dirty_bitmap = map_dirty_bitmap(sc->dirty_bitmap, index_mapped, &page); | |||||
+ if ( dirty_bitmap == NULL ) | |||||
+ { | |||||
+ domain_unpause(d); | |||||
+ return -EFAULT; | |||||
+ } | |||||
+ | |||||
paging_lock(d); | |||||
if ( !d->arch.paging.preempt.dom ) | |||||
@@ -478,18 +535,18 @@ static int paging_log_dirty_op(struct domain *d, | |||||
l4 = paging_map_log_dirty_bitmap(d); | |||||
i4 = d->arch.paging.preempt.log_dirty.i4; | |||||
i3 = d->arch.paging.preempt.log_dirty.i3; | |||||
+ i2 = d->arch.paging.preempt.log_dirty.i2; | |||||
pages = d->arch.paging.preempt.log_dirty.done; | |||||
for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 ) | |||||
{ | |||||
l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(l4[i4]) : NULL; | |||||
- for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ ) | |||||
+ for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); | |||||
+ i3++, i2 = 0 ) | |||||
{ | |||||
l2 = ((l3 && mfn_valid(l3[i3])) ? | |||||
map_domain_page(l3[i3]) : NULL); | |||||
- for ( i2 = 0; | |||||
- (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES); | |||||
- i2++ ) | |||||
+ for ( ; (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES); i2++ ) | |||||
{ | |||||
unsigned int bytes = PAGE_SIZE; | |||||
l1 = ((l2 && mfn_valid(l2[i2])) ? | |||||
@@ -498,15 +555,28 @@ static int paging_log_dirty_op(struct domain *d, | |||||
bytes = (unsigned int)((sc->pages - pages + 7) >> 3); | |||||
if ( likely(peek) ) | |||||
{ | |||||
- if ( (l1 ? copy_to_guest_offset(sc->dirty_bitmap, | |||||
- pages >> 3, (uint8_t *)l1, | |||||
- bytes) | |||||
- : clear_guest_offset(sc->dirty_bitmap, | |||||
- pages >> 3, bytes)) != 0 ) | |||||
+ if ( pages >> (3 + PAGE_SHIFT) != | |||||
+ index_mapped >> (3 + PAGE_SHIFT) ) | |||||
{ | |||||
- rv = -EFAULT; | |||||
- goto out; | |||||
+ /* We need to map next page */ | |||||
+ d->arch.paging.preempt.log_dirty.i4 = i4; | |||||
+ d->arch.paging.preempt.log_dirty.i3 = i3; | |||||
+ d->arch.paging.preempt.log_dirty.i2 = i2; | |||||
+ d->arch.paging.preempt.log_dirty.done = pages; | |||||
+ d->arch.paging.preempt.dom = current->domain; | |||||
+ d->arch.paging.preempt.op = sc->op; | |||||
+ resuming = 1; | |||||
+ paging_unlock(d); | |||||
+ unmap_dirty_bitmap(dirty_bitmap, page); | |||||
+ goto again; | |||||
} | |||||
+ ASSERT(((pages >> 3) % PAGE_SIZE) + bytes <= PAGE_SIZE); | |||||
+ if ( l1 ) | |||||
+ memcpy(dirty_bitmap + ((pages >> 3) % PAGE_SIZE), l1, | |||||
+ bytes); | |||||
+ else | |||||
+ memset(dirty_bitmap + ((pages >> 3) % PAGE_SIZE), 0, | |||||
+ bytes); | |||||
} | |||||
pages += bytes << 3; | |||||
if ( l1 ) | |||||
@@ -524,6 +594,7 @@ static int paging_log_dirty_op(struct domain *d, | |||||
{ | |||||
d->arch.paging.preempt.log_dirty.i4 = i4; | |||||
d->arch.paging.preempt.log_dirty.i3 = i3 + 1; | |||||
+ d->arch.paging.preempt.log_dirty.i2 = 0; | |||||
rv = -ERESTART; | |||||
break; | |||||
} | |||||
@@ -536,6 +607,7 @@ static int paging_log_dirty_op(struct domain *d, | |||||
{ | |||||
d->arch.paging.preempt.log_dirty.i4 = i4 + 1; | |||||
d->arch.paging.preempt.log_dirty.i3 = 0; | |||||
+ d->arch.paging.preempt.log_dirty.i2 = 0; | |||||
rv = -ERESTART; | |||||
} | |||||
if ( rv ) | |||||
@@ -565,6 +637,7 @@ static int paging_log_dirty_op(struct domain *d, | |||||
if ( rv ) | |||||
{ | |||||
/* Never leave the domain paused on real errors. */ | |||||
+ unmap_dirty_bitmap(dirty_bitmap, page); | |||||
ASSERT(rv == -ERESTART); | |||||
return rv; | |||||
} | |||||
@@ -577,12 +650,14 @@ static int paging_log_dirty_op(struct domain *d, | |||||
* paging modes (shadow or hap). Safe because the domain is paused. */ | |||||
d->arch.paging.log_dirty.clean_dirty_bitmap(d); | |||||
} | |||||
+ unmap_dirty_bitmap(dirty_bitmap, page); | |||||
domain_unpause(d); | |||||
return rv; | |||||
out: | |||||
d->arch.paging.preempt.dom = NULL; | |||||
paging_unlock(d); | |||||
+ unmap_dirty_bitmap(dirty_bitmap, page); | |||||
domain_unpause(d); | |||||
if ( l1 ) | |||||
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h | |||||
index 165e533..0dc1ed8 100644 | |||||
--- a/xen/include/asm-x86/domain.h | |||||
+++ b/xen/include/asm-x86/domain.h | |||||
@@ -206,6 +206,7 @@ struct paging_domain { | |||||
unsigned long done:PADDR_BITS - PAGE_SHIFT; | |||||
unsigned long i4:PAGETABLE_ORDER; | |||||
unsigned long i3:PAGETABLE_ORDER; | |||||
+ unsigned long i2:PAGETABLE_ORDER; | |||||
} log_dirty; | |||||
}; | |||||
} preempt; | |||||
-- | |||||
2.7.4 (Apple Git-66) | |||||