diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -4797,6 +4797,7 @@ struct spglist free; pd_entry_t *l2, old_l2; vm_page_t l2pg, mt; + vm_page_t uwptpg; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(ADDR_IS_CANONICAL(va), @@ -4864,6 +4865,24 @@ } } + /* + * Allocate leaf ptpage for wired userspace pages. + */ + uwptpg = NULL; + if ((new_l2 & ATTR_SW_WIRED) != 0 && pmap != kernel_pmap) { + uwptpg = vm_page_alloc_noobj(VM_ALLOC_WIRED); + if (uwptpg == NULL) { + return (KERN_RESOURCE_SHORTAGE); + } + uwptpg->pindex = pmap_l2_pindex(va); + if (pmap_insert_pt_page(pmap, uwptpg, true, false)) { + vm_page_unwire_noq(uwptpg); + vm_page_free(uwptpg); + return (KERN_RESOURCE_SHORTAGE); + } + pmap_resident_count_inc(pmap, 1); + uwptpg->ref_count = NL3PG; + } if ((new_l2 & ATTR_SW_MANAGED) != 0) { /* * Abort this mapping if its PV entry could not be created. @@ -4871,6 +4890,16 @@ if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) { if (l2pg != NULL) pmap_abort_ptp(pmap, va, l2pg); + if (uwptpg != NULL) { + mt = pmap_remove_pt_page(pmap, va); + KASSERT(mt == uwptpg, + ("removed pt page %p, expected %p", mt, + uwptpg)); + pmap_resident_count_dec(pmap, 1); + uwptpg->ref_count = 1; + vm_page_unwire_noq(uwptpg); + vm_page_free(uwptpg); + } CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p", va, pmap);