Page MenuHomeFreeBSD

D20585.diff
No OneTemporary

D20585.diff

Index: head/sys/arm64/arm64/pmap.c
===================================================================
--- head/sys/arm64/arm64/pmap.c
+++ head/sys/arm64/arm64/pmap.c
@@ -2267,6 +2267,8 @@
int bit, field;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT((va & L2_OFFSET) == 0,
+ ("pmap_pv_demote_l2: va is not 2mpage aligned"));
KASSERT((pa & L2_OFFSET) == 0,
("pmap_pv_demote_l2: pa is not 2mpage aligned"));
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
@@ -2277,7 +2279,6 @@
* must not be released until the last pv entry is reinstantiated.
*/
pvh = pa_to_pvh(pa);
- va = va & ~L2_OFFSET;
pv = pmap_pvh_remove(pvh, pmap, va);
KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found"));
m = PHYS_TO_VM_PAGE(pa);
@@ -2433,7 +2434,13 @@
old_l2 = pmap_load_clear(l2);
KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
("pmap_remove_l2: L2e %lx is not a block mapping", old_l2));
- pmap_invalidate_range(pmap, sva, sva + L2_SIZE);
+
+ /*
+ * Since a promotion must break the 4KB page mappings before making
+ * the 2MB page mapping, a pmap_invalidate_page() suffices.
+ */
+ pmap_invalidate_page(pmap, sva);
+
if (old_l2 & ATTR_SW_WIRED)
pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE;
pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE);
@@ -2571,8 +2578,8 @@
pmap_remove_l2(pmap, l2, sva, pmap_load(l1),
&free, &lock);
continue;
- } else if (pmap_demote_l2_locked(pmap, l2,
- sva &~L2_OFFSET, &lock) == NULL)
+ } else if (pmap_demote_l2_locked(pmap, l2, sva,
+ &lock) == NULL)
continue;
l3_paddr = pmap_load(l2);
}
@@ -3052,8 +3059,7 @@
if (pde != NULL && lvl == 1) {
l2 = pmap_l1_to_l2(pde, va);
if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
- (l3 = pmap_demote_l2_locked(pmap, l2, va & ~L2_OFFSET,
- &lock)) != NULL) {
+ (l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) {
l3 = &l3[pmap_l3_index(va)];
if (va < VM_MAXUSER_ADDRESS) {
mpte = PHYS_TO_VM_PAGE(
@@ -3391,7 +3397,7 @@
lockp) != 0)
break;
}
- vm_page_free_pages_toq(&free, true);
+ vm_page_free_pages_toq(&free, false);
if (va >= VM_MAXUSER_ADDRESS) {
/*
* Both pmap_remove_l2() and pmap_remove_l3() will
@@ -3419,7 +3425,7 @@
* Invalidate those entries.
*/
pmap_invalidate_page(pmap, va);
- vm_page_free_pages_toq(&free, true);
+ vm_page_free_pages_toq(&free, false);
}
CTR2(KTR_PMAP,
"pmap_enter_l2: failure for va %#lx in pmap %p",
@@ -4331,8 +4337,7 @@
va = pv->pv_va;
pte = pmap_pte(pmap, pv->pv_va, &lvl);
if ((pmap_load(pte) & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW))
- pmap_demote_l2_locked(pmap, pte, va & ~L2_OFFSET,
- &lock);
+ (void)pmap_demote_l2_locked(pmap, pte, va, &lock);
KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
("inconsistent pv lock %p %p for page %p",
lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
@@ -4906,7 +4911,7 @@
pte = pmap_l1_to_l2(pte, tmpva);
case 2:
newpte = pmap_demote_l2(kernel_pmap, pte,
- tmpva & ~L2_OFFSET);
+ tmpva);
if (newpte == NULL)
return (EINVAL);
pte = pmap_l2_to_l3(pte, tmpva);
@@ -5005,6 +5010,18 @@
return (l2);
}
+static void
+pmap_demote_l2_abort(pmap_t pmap, vm_offset_t va, pt_entry_t *l2,
+ struct rwlock **lockp)
+{
+ struct spglist free;
+
+ SLIST_INIT(&free);
+ (void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), &free,
+ lockp);
+ vm_page_free_pages_toq(&free, false);
+}
+
/*
* Create an L3 table to map all addresses within an L2 mapping.
*/
@@ -5023,8 +5040,7 @@
oldl2 = pmap_load(l2);
KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
("pmap_demote_l2: Demoting a non-block entry"));
- KASSERT((va & L2_OFFSET) == 0,
- ("pmap_demote_l2: Invalid virtual address %#lx", va));
+ va &= ~L2_OFFSET;
tmpl2 = 0;
if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) {
@@ -5033,15 +5049,57 @@
return (NULL);
}
+ /*
+ * Invalidate the 2MB page mapping and return "failure" if the
+ * mapping was never accessed.
+ */
+ if ((oldl2 & ATTR_AF) == 0) {
+ KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
+ ("pmap_demote_l2: a wired mapping is missing ATTR_AF"));
+ pmap_demote_l2_abort(pmap, va, l2, lockp);
+ CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx in pmap %p",
+ va, pmap);
+ goto fail;
+ }
+
if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) {
+ KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
+ ("pmap_demote_l2: page table page for a wired mapping"
+ " is missing"));
+
+ /*
+ * If the page table page is missing and the mapping
+ * is for a kernel address, the mapping must belong to
+ * the direct map. Page table pages are preallocated
+ * for every other part of the kernel address space,
+ * so the direct map region is the only part of the
+ * kernel address space that must be handled here.
+ */
+ KASSERT(va < VM_MAXUSER_ADDRESS || VIRT_IN_DMAP(va),
+ ("pmap_demote_l2: No saved mpte for va %#lx", va));
+
+ /*
+ * If the 2MB page mapping belongs to the direct map
+ * region of the kernel's address space, then the page
+ * allocation request specifies the highest possible
+ * priority (VM_ALLOC_INTERRUPT). Otherwise, the
+ * priority is normal.
+ */
ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va),
(VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
+
+ /*
+ * If the allocation of the new page table page fails,
+ * invalidate the 2MB page mapping and return "failure".
+ */
if (ml3 == NULL) {
+ pmap_demote_l2_abort(pmap, va, l2, lockp);
CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx"
" in pmap %p", va, pmap);
goto fail;
}
+
if (va < VM_MAXUSER_ADDRESS) {
ml3->wire_count = NL3PG;
pmap_resident_count_inc(pmap, 1);
@@ -5090,6 +5148,10 @@
if ((oldl2 & ATTR_SW_MANAGED) != 0)
reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
+ /*
+ * Pass PAGE_SIZE so that a single TLB invalidation is performed on
+ * the 2MB page mapping.
+ */
pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE);
/*

File Metadata

Mime Type
text/plain
Expires
Fri, Dec 27, 4:54 AM (10 h, 51 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15611312
Default Alt Text
D20585.diff (6 KB)

Event Timeline