diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -2685,6 +2685,8 @@ ("pmap_demote_l2_locked: oldl2 is not a leaf entry")); if ((oldl2 & PTE_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) == NULL) { + KASSERT((oldl2 & PTE_SW_WIRED) == 0, + ("pmap_demote_l2_locked: page table page for a wired mapping is missing")); if ((oldl2 & PTE_A) == 0 || (mpte = vm_page_alloc_noobj( (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : 0) | VM_ALLOC_WIRED)) == NULL) { @@ -3217,6 +3219,7 @@ pd_entry_t *l2, *l3, oldl2; vm_offset_t sva; vm_page_t l2pg, mt; + vm_page_t uwptpg; PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -3274,6 +3277,24 @@ ("pmap_enter_l2: non-zero L2 entry %p", l2)); } + /* + * Allocate leaf ptpage for wired userspace pages. + */ + uwptpg = NULL; + if ((new_l2 & PTE_SW_WIRED) != 0 && pmap != kernel_pmap) { + uwptpg = vm_page_alloc_noobj(VM_ALLOC_WIRED); + if (uwptpg == NULL) { + return (KERN_RESOURCE_SHORTAGE); + } + uwptpg->pindex = pmap_l2_pindex(va); + if (pmap_insert_pt_page(pmap, uwptpg, true, false)) { + vm_page_unwire_noq(uwptpg); + vm_page_free(uwptpg); + return (KERN_RESOURCE_SHORTAGE); + } + pmap_resident_count_inc(pmap, 1); + uwptpg->ref_count = Ln_ENTRIES; + } if ((new_l2 & PTE_SW_MANAGED) != 0) { /* * Abort this mapping if its PV entry could not be created. @@ -3290,6 +3311,16 @@ pmap_invalidate_page(pmap, va); vm_page_free_pages_toq(&free, true); } + if (uwptpg != NULL) { + mt = pmap_remove_pt_page(pmap, va); + KASSERT(mt == uwptpg, + ("removed pt page %p, expected %p", mt, + uwptpg)); + pmap_resident_count_dec(pmap, 1); + uwptpg->ref_count = 1; + vm_page_unwire_noq(uwptpg); + vm_page_free(uwptpg); + } CTR2(KTR_PMAP, "pmap_enter_l2: failed to create PV entry" " for va %#lx in pmap %p", va, pmap);