Changeset View
Changeset View
Standalone View
Standalone View
arm64/arm64/pmap.c
Show First 20 Lines • Show All 91 Lines • ▼ Show 20 Lines | |||||
struct md_page *pvh; | struct md_page *pvh; | ||||
struct pv_chunk *pc; | struct pv_chunk *pc; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
vm_offset_t va_last; | vm_offset_t va_last; | ||||
vm_page_t m; | vm_page_t m; | ||||
int bit, field; | int bit, field; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
KASSERT((va & L2_OFFSET) == 0, | |||||
("pmap_pv_demote_l2: va is not 2mpage aligned")); | |||||
KASSERT((pa & L2_OFFSET) == 0, | KASSERT((pa & L2_OFFSET) == 0, | ||||
("pmap_pv_demote_l2: pa is not 2mpage aligned")); | ("pmap_pv_demote_l2: pa is not 2mpage aligned")); | ||||
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); | CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); | ||||
/* | /* | ||||
* Transfer the 2mpage's pv entry for this mapping to the first | * Transfer the 2mpage's pv entry for this mapping to the first | ||||
* page's pv list. Once this transfer begins, the pv list lock | * page's pv list. Once this transfer begins, the pv list lock | ||||
* must not be released until the last pv entry is reinstantiated. | * must not be released until the last pv entry is reinstantiated. | ||||
*/ | */ | ||||
pvh = pa_to_pvh(pa); | pvh = pa_to_pvh(pa); | ||||
va = va & ~L2_OFFSET; | |||||
pv = pmap_pvh_remove(pvh, pmap, va); | pv = pmap_pvh_remove(pvh, pmap, va); | ||||
KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found")); | KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found")); | ||||
m = PHYS_TO_VM_PAGE(pa); | m = PHYS_TO_VM_PAGE(pa); | ||||
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | ||||
m->md.pv_gen++; | m->md.pv_gen++; | ||||
/* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */ | /* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */ | ||||
PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1)); | PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1)); | ||||
va_last = va + L2_SIZE - PAGE_SIZE; | va_last = va + L2_SIZE - PAGE_SIZE; | ||||
▲ Show 20 Lines • Show All 139 Lines • ▼ Show 20 Lines | |||||
vm_offset_t eva, va; | vm_offset_t eva, va; | ||||
vm_page_t m, ml3; | vm_page_t m, ml3; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
KASSERT((sva & L2_OFFSET) == 0, ("pmap_remove_l2: sva is not aligned")); | KASSERT((sva & L2_OFFSET) == 0, ("pmap_remove_l2: sva is not aligned")); | ||||
old_l2 = pmap_load_clear(l2); | old_l2 = pmap_load_clear(l2); | ||||
KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK, | KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK, | ||||
("pmap_remove_l2: L2e %lx is not a block mapping", old_l2)); | ("pmap_remove_l2: L2e %lx is not a block mapping", old_l2)); | ||||
pmap_invalidate_range(pmap, sva, sva + L2_SIZE); | |||||
/* | |||||
* Since a promotion must break the 4KB page mappings before making | |||||
* the 2MB page mapping, a pmap_invalidate_page() suffices. | |||||
*/ | |||||
pmap_invalidate_page(pmap, sva); | |||||
if (old_l2 & ATTR_SW_WIRED) | if (old_l2 & ATTR_SW_WIRED) | ||||
pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE; | pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE; | ||||
pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE); | pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE); | ||||
if (old_l2 & ATTR_SW_MANAGED) { | if (old_l2 & ATTR_SW_MANAGED) { | ||||
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, old_l2 & ~ATTR_MASK); | CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, old_l2 & ~ATTR_MASK); | ||||
pvh = pa_to_pvh(old_l2 & ~ATTR_MASK); | pvh = pa_to_pvh(old_l2 & ~ATTR_MASK); | ||||
pmap_pvh_free(pvh, pmap, sva); | pmap_pvh_free(pvh, pmap, sva); | ||||
eva = sva + L2_SIZE; | eva = sva + L2_SIZE; | ||||
▲ Show 20 Lines • Show All 121 Lines • ▼ Show 20 Lines | |||||
l3_paddr = pmap_load(l2); | l3_paddr = pmap_load(l2); | ||||
if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) { | if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) { | ||||
if (sva + L2_SIZE == va_next && eva >= va_next) { | if (sva + L2_SIZE == va_next && eva >= va_next) { | ||||
pmap_remove_l2(pmap, l2, sva, pmap_load(l1), | pmap_remove_l2(pmap, l2, sva, pmap_load(l1), | ||||
&free, &lock); | &free, &lock); | ||||
continue; | continue; | ||||
} else if (pmap_demote_l2_locked(pmap, l2, | } else if (pmap_demote_l2_locked(pmap, l2, sva, | ||||
sva &~L2_OFFSET, &lock) == NULL) | &lock) == NULL) | ||||
continue; | continue; | ||||
l3_paddr = pmap_load(l2); | l3_paddr = pmap_load(l2); | ||||
} | } | ||||
/* | /* | ||||
* Weed out invalid mappings. | * Weed out invalid mappings. | ||||
*/ | */ | ||||
if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE) | if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE) | ||||
▲ Show 20 Lines • Show All 182 Lines • ▼ Show 20 Lines | |||||
flags, m, &lock); | flags, m, &lock); | ||||
goto out; | goto out; | ||||
} | } | ||||
pde = pmap_pde(pmap, va, &lvl); | pde = pmap_pde(pmap, va, &lvl); | ||||
if (pde != NULL && lvl == 1) { | if (pde != NULL && lvl == 1) { | ||||
l2 = pmap_l1_to_l2(pde, va); | l2 = pmap_l1_to_l2(pde, va); | ||||
if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK && | if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK && | ||||
(l3 = pmap_demote_l2_locked(pmap, l2, va & ~L2_OFFSET, | (l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) { | ||||
&lock)) != NULL) { | |||||
l3 = &l3[pmap_l3_index(va)]; | l3 = &l3[pmap_l3_index(va)]; | ||||
if (va < VM_MAXUSER_ADDRESS) { | if (va < VM_MAXUSER_ADDRESS) { | ||||
mpte = PHYS_TO_VM_PAGE( | mpte = PHYS_TO_VM_PAGE( | ||||
pmap_load(l2) & ~ATTR_MASK); | pmap_load(l2) & ~ATTR_MASK); | ||||
mpte->wire_count++; | mpte->wire_count++; | ||||
} | } | ||||
goto havel3; | goto havel3; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 182 Lines • ▼ Show 20 Lines | |||||
else | else | ||||
for (sva = va; sva < va + L2_SIZE; sva += PAGE_SIZE) { | for (sva = va; sva < va + L2_SIZE; sva += PAGE_SIZE) { | ||||
l3 = pmap_l2_to_l3(l2, sva); | l3 = pmap_l2_to_l3(l2, sva); | ||||
if (pmap_l3_valid(pmap_load(l3)) && | if (pmap_l3_valid(pmap_load(l3)) && | ||||
pmap_remove_l3(pmap, l3, sva, old_l2, &free, | pmap_remove_l3(pmap, l3, sva, old_l2, &free, | ||||
lockp) != 0) | lockp) != 0) | ||||
break; | break; | ||||
} | } | ||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, false); | ||||
alc: This was essentially a cut-and-paste error in porting pmap_enter_pde() from amd64. To avoid… | |||||
markjUnsubmitted Done Inline ActionsNote that v_wire_count is now a per-CPU counter, and not updated with atomics. markj: Note that v_wire_count is now a per-CPU counter, and not updated with atomics. | |||||
if (va >= VM_MAXUSER_ADDRESS) { | if (va >= VM_MAXUSER_ADDRESS) { | ||||
/* | /* | ||||
* Both pmap_remove_l2() and pmap_remove_l3() will | * Both pmap_remove_l2() and pmap_remove_l3() will | ||||
* leave the kernel page table page zero filled. | * leave the kernel page table page zero filled. | ||||
*/ | */ | ||||
mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); | mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); | ||||
if (pmap_insert_pt_page(pmap, mt, false)) | if (pmap_insert_pt_page(pmap, mt, false)) | ||||
panic("pmap_enter_l2: trie insert failed"); | panic("pmap_enter_l2: trie insert failed"); | ||||
Show All 11 Lines | |||||
if (pmap_unwire_l3(pmap, va, l2pg, &free)) { | if (pmap_unwire_l3(pmap, va, l2pg, &free)) { | ||||
/* | /* | ||||
* Although "va" is not mapped, paging-structure | * Although "va" is not mapped, paging-structure | ||||
* caches could nonetheless have entries that | * caches could nonetheless have entries that | ||||
* refer to the freed page table pages. | * refer to the freed page table pages. | ||||
* Invalidate those entries. | * Invalidate those entries. | ||||
*/ | */ | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, false); | ||||
} | } | ||||
CTR2(KTR_PMAP, | CTR2(KTR_PMAP, | ||||
"pmap_enter_l2: failure for va %#lx in pmap %p", | "pmap_enter_l2: failure for va %#lx in pmap %p", | ||||
va, pmap); | va, pmap); | ||||
return (KERN_RESOURCE_SHORTAGE); | return (KERN_RESOURCE_SHORTAGE); | ||||
} | } | ||||
if ((new_l2 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) | if ((new_l2 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) | ||||
for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) | for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) | ||||
▲ Show 20 Lines • Show All 182 Lines • ▼ Show 20 Lines | |||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
goto retry_pv_loop; | goto retry_pv_loop; | ||||
} | } | ||||
} | } | ||||
va = pv->pv_va; | va = pv->pv_va; | ||||
pte = pmap_pte(pmap, pv->pv_va, &lvl); | pte = pmap_pte(pmap, pv->pv_va, &lvl); | ||||
if ((pmap_load(pte) & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) | if ((pmap_load(pte) & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) | ||||
pmap_demote_l2_locked(pmap, pte, va & ~L2_OFFSET, | (void)pmap_demote_l2_locked(pmap, pte, va, &lock); | ||||
&lock); | |||||
KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), | KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), | ||||
("inconsistent pv lock %p %p for page %p", | ("inconsistent pv lock %p %p for page %p", | ||||
lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); | lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
if (!PMAP_TRYLOCK(pmap)) { | if (!PMAP_TRYLOCK(pmap)) { | ||||
▲ Show 20 Lines • Show All 182 Lines • ▼ Show 20 Lines | |||||
case 1: | case 1: | ||||
newpte = pmap_demote_l1(kernel_pmap, pte, | newpte = pmap_demote_l1(kernel_pmap, pte, | ||||
tmpva & ~L1_OFFSET); | tmpva & ~L1_OFFSET); | ||||
if (newpte == NULL) | if (newpte == NULL) | ||||
return (EINVAL); | return (EINVAL); | ||||
pte = pmap_l1_to_l2(pte, tmpva); | pte = pmap_l1_to_l2(pte, tmpva); | ||||
case 2: | case 2: | ||||
newpte = pmap_demote_l2(kernel_pmap, pte, | newpte = pmap_demote_l2(kernel_pmap, pte, | ||||
tmpva & ~L2_OFFSET); | tmpva); | ||||
if (newpte == NULL) | if (newpte == NULL) | ||||
return (EINVAL); | return (EINVAL); | ||||
pte = pmap_l2_to_l3(pte, tmpva); | pte = pmap_l2_to_l3(pte, tmpva); | ||||
case 3: | case 3: | ||||
/* Update the entry */ | /* Update the entry */ | ||||
l3 = pmap_load(pte); | l3 = pmap_load(pte); | ||||
l3 &= ~ATTR_IDX_MASK; | l3 &= ~ATTR_IDX_MASK; | ||||
l3 |= ATTR_IDX(mode); | l3 |= ATTR_IDX(mode); | ||||
▲ Show 20 Lines • Show All 82 Lines • ▼ Show 20 Lines | |||||
if (tmpl1 != 0) { | if (tmpl1 != 0) { | ||||
pmap_kremove(tmpl1); | pmap_kremove(tmpl1); | ||||
kva_free(tmpl1, PAGE_SIZE); | kva_free(tmpl1, PAGE_SIZE); | ||||
} | } | ||||
return (l2); | return (l2); | ||||
} | } | ||||
static void | |||||
pmap_demote_l2_abort(pmap_t pmap, vm_offset_t va, pt_entry_t *l2, | |||||
struct rwlock **lockp) | |||||
{ | |||||
struct spglist free; | |||||
SLIST_INIT(&free); | |||||
(void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), &free, | |||||
lockp); | |||||
vm_page_free_pages_toq(&free, false); | |||||
} | |||||
/* | /* | ||||
* Create an L3 table to map all addresses within an L2 mapping. | * Create an L3 table to map all addresses within an L2 mapping. | ||||
*/ | */ | ||||
static pt_entry_t * | static pt_entry_t * | ||||
pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va, | pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va, | ||||
struct rwlock **lockp) | struct rwlock **lockp) | ||||
{ | { | ||||
pt_entry_t *l3, newl3, oldl2; | pt_entry_t *l3, newl3, oldl2; | ||||
vm_offset_t tmpl2; | vm_offset_t tmpl2; | ||||
vm_paddr_t l3phys, phys; | vm_paddr_t l3phys, phys; | ||||
vm_page_t ml3; | vm_page_t ml3; | ||||
int i; | int i; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
l3 = NULL; | l3 = NULL; | ||||
oldl2 = pmap_load(l2); | oldl2 = pmap_load(l2); | ||||
KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK, | KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK, | ||||
("pmap_demote_l2: Demoting a non-block entry")); | ("pmap_demote_l2: Demoting a non-block entry")); | ||||
KASSERT((va & L2_OFFSET) == 0, | va &= ~L2_OFFSET; | ||||
("pmap_demote_l2: Invalid virtual address %#lx", va)); | |||||
tmpl2 = 0; | tmpl2 = 0; | ||||
if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) { | if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) { | ||||
tmpl2 = kva_alloc(PAGE_SIZE); | tmpl2 = kva_alloc(PAGE_SIZE); | ||||
if (tmpl2 == 0) | if (tmpl2 == 0) | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
/* | |||||
* Invalidate the 2MB page mapping and return "failure" if the | |||||
* mapping is not wired and was never accessed. (Unlike amd64 and | |||||
* i386, arm64's pmap_enter() does not preset ATTR_AF on wired | |||||
* mappings.) | |||||
andrewUnsubmitted Done Inline ActionsIs this comment correct? pmap_enter always sets ATTR_AF via ATTR_DEFAULT. andrew: Is this comment correct? `pmap_enter` always sets `ATTR_AF` via `ATTR_DEFAULT`. | |||||
alcAuthorUnsubmitted Done Inline ActionsNo, the comment is incorrect. Thank you for pointing this out. I will update the patch accordingly. As an aside, the functions that create "speculative" mappings, in other words, mappings not created as the direct result of an access, for example, pmap_enter_object() and pmap_enter_quick(), are also using ATTR_DEFAULT and thereby incorrectly marking the pages as referenced. alc: No, the comment is incorrect. Thank you for pointing this out. I will update the patch… | |||||
*/ | |||||
if ((oldl2 & (ATTR_SW_WIRED | ATTR_AF)) == 0) { | |||||
pmap_demote_l2_abort(pmap, va, l2, lockp); | |||||
CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx in pmap %p", | |||||
va, pmap); | |||||
goto fail; | |||||
} | |||||
if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) { | if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) { | ||||
KASSERT((oldl2 & ATTR_SW_WIRED) == 0, | |||||
("pmap_demote_l2: page table page for a wired mapping" | |||||
" is missing")); | |||||
/* | |||||
* If the page table page is missing and the mapping | |||||
* is for a kernel address, the mapping must belong to | |||||
* the direct map. Page table pages are preallocated | |||||
* for every other part of the kernel address space, | |||||
* so the direct map region is the only part of the | |||||
* kernel address space that must be handled here. | |||||
*/ | |||||
KASSERT(va < VM_MAXUSER_ADDRESS || VIRT_IN_DMAP(va), | |||||
("pmap_demote_l2: No saved mpte for va %#lx", va)); | |||||
/* | |||||
* If the 2MB page mapping belongs to the direct map | |||||
* region of the kernel's address space, then the page | |||||
* allocation request specifies the highest possible | |||||
* priority (VM_ALLOC_INTERRUPT). Otherwise, the | |||||
* priority is normal. | |||||
*/ | |||||
ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va), | ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va), | ||||
(VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | | (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | | ||||
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); | ||||
/* | |||||
* If the allocation of the new page table page fails, | |||||
* invalidate the 2MB page mapping and return "failure". | |||||
*/ | |||||
if (ml3 == NULL) { | if (ml3 == NULL) { | ||||
pmap_demote_l2_abort(pmap, va, l2, lockp); | |||||
CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx" | CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx" | ||||
" in pmap %p", va, pmap); | " in pmap %p", va, pmap); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
if (va < VM_MAXUSER_ADDRESS) { | if (va < VM_MAXUSER_ADDRESS) { | ||||
ml3->wire_count = NL3PG; | ml3->wire_count = NL3PG; | ||||
pmap_resident_count_inc(pmap, 1); | pmap_resident_count_inc(pmap, 1); | ||||
} | } | ||||
} | } | ||||
l3phys = VM_PAGE_TO_PHYS(ml3); | l3phys = VM_PAGE_TO_PHYS(ml3); | ||||
l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys); | l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys); | ||||
Show All 32 Lines | |||||
* of the L2 and the PV lists will be inconsistent, which can result | * of the L2 and the PV lists will be inconsistent, which can result | ||||
* in reclaim_pv_chunk() attempting to remove a PV entry from the | * in reclaim_pv_chunk() attempting to remove a PV entry from the | ||||
* wrong PV list and pmap_pv_demote_l2() failing to find the expected | * wrong PV list and pmap_pv_demote_l2() failing to find the expected | ||||
* PV entry for the 2MB page mapping that is being demoted. | * PV entry for the 2MB page mapping that is being demoted. | ||||
*/ | */ | ||||
if ((oldl2 & ATTR_SW_MANAGED) != 0) | if ((oldl2 & ATTR_SW_MANAGED) != 0) | ||||
reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp); | reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp); | ||||
/* | |||||
* Pass PAGE_SIZE so that a single TLB invalidation is performed on | |||||
* the 2MB page mapping. | |||||
*/ | |||||
pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE); | pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE); | ||||
/* | /* | ||||
* Demote the PV entry. | * Demote the PV entry. | ||||
*/ | */ | ||||
if ((oldl2 & ATTR_SW_MANAGED) != 0) | if ((oldl2 & ATTR_SW_MANAGED) != 0) | ||||
pmap_pv_demote_l2(pmap, va, oldl2 & ~ATTR_MASK, lockp); | pmap_pv_demote_l2(pmap, va, oldl2 & ~ATTR_MASK, lockp); | ||||
▲ Show 20 Lines • Show All 91 Lines • Show Last 20 Lines |
This was essentially a cut-and-paste error in porting pmap_enter_pde() from amd64. To avoid another similar error, I think that a followup change should remove the wired page count updates from the page table page deallocation code so that we would correctly pass "true" here. That would also reduce the number of atomic ops performed.