Index: amd64/amd64/pmap.c =================================================================== --- amd64/amd64/pmap.c +++ amd64/amd64/pmap.c @@ -1053,7 +1053,7 @@ static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); -static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte); +static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted); static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva); static void pmap_invalidate_cache_range_all(vm_offset_t sva, @@ -1758,7 +1758,7 @@ mpte->phys_addr = KPTphys + (i << PAGE_SHIFT); mpte->wire_count = 1; if (i << PDRSHIFT < KERNend && - pmap_insert_pt_page(kernel_pmap, mpte)) + pmap_insert_pt_page(kernel_pmap, mpte, false)) panic("pmap_init: pmap_insert_pt_page failed"); } PMAP_UNLOCK(kernel_pmap); @@ -3131,10 +3131,11 @@ * ordered by this virtual address range. */ static __inline int -pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte) +pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); + mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0; return (vm_radix_insert(&pmap->pm_root, mpte)); } @@ -4626,7 +4627,7 @@ * If the page table page is not leftover from an earlier promotion, * initialize it. */ - if ((oldpde & PG_PROMOTED) == 0) + if (mpte->valid == 0) pmap_fill_ptp(firstpte, newpte); pmap_demote_pde_check(firstpte, newpte); @@ -5399,7 +5400,7 @@ ("pmap_promote_pde: page table page is out of range")); KASSERT(mpte->pindex == pmap_pde_pindex(va), ("pmap_promote_pde: page table page's pindex is wrong")); - if (pmap_insert_pt_page(pmap, mpte)) { + if (pmap_insert_pt_page(pmap, mpte, true)) { atomic_add_long(&pmap_pde_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx in pmap %p", va, @@ -5826,15 +5827,13 @@ } vm_page_free_pages_toq(&free, true); if (va >= VM_MAXUSER_ADDRESS) { + /* + * Both pmap_remove_pde() and pmap_remove_ptes() will + * leave the kernel page table page zero filled. + */ mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME); - if (pmap_insert_pt_page(pmap, mt)) { - /* - * XXX Currently, this can't happen because - * we do not perform pmap_enter(psind == 1) - * on the kernel pmap. - */ + if (pmap_insert_pt_page(pmap, mt, false)) panic("pmap_enter_pde: trie insert failed"); - } } else KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p", pde)); Index: arm64/arm64/pmap.c =================================================================== --- arm64/arm64/pmap.c +++ arm64/arm64/pmap.c @@ -2814,10 +2814,11 @@ * ordered by this virtual address range. */ static __inline int -pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte) +pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); + mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0; return (vm_radix_insert(&pmap->pm_root, mpte)); } @@ -2962,7 +2963,7 @@ ("pmap_promote_l2: page table page is out of range")); KASSERT(mpte->pindex == pmap_l2_pindex(va), ("pmap_promote_l2: page table page's pindex is wrong")); - if (pmap_insert_pt_page(pmap, mpte)) { + if (pmap_insert_pt_page(pmap, mpte, true)) { atomic_add_long(&pmap_l2_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx in pmap %p", va, @@ -3386,15 +3387,13 @@ } vm_page_free_pages_toq(&free, true); if (va >= VM_MAXUSER_ADDRESS) { + /* + * Both pmap_remove_l2() and pmap_remove_l3() will + * leave the kernel page table page zero filled. + */ mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); - if (pmap_insert_pt_page(pmap, mt)) { - /* - * XXX Currently, this can't happen bacuse - * we do not perform pmap_enter(psind == 1) - * on the kernel pmap. - */ + if (pmap_insert_pt_page(pmap, mt, false)) panic("pmap_enter_l2: trie insert failed"); - } } else KASSERT(pmap_load(l2) == 0, ("pmap_enter_l2: non-zero L2 entry %p", l2)); @@ -5035,8 +5034,10 @@ " in pmap %p", va, pmap); goto fail; } - if (va < VM_MAXUSER_ADDRESS) + if (va < VM_MAXUSER_ADDRESS) { + ml3->wire_count = NL3PG; pmap_resident_count_inc(pmap, 1); + } } l3phys = VM_PAGE_TO_PHYS(ml3); @@ -5048,10 +5049,10 @@ newl3 = (oldl2 & (ATTR_MASK & ~ATTR_DESCR_MASK)) | L3_PAGE; /* - * If the page table page is new, initialize it. + * If the page table page is not leftover from an earlier promotion, + * initialize it. */ - if (ml3->wire_count == 1) { - ml3->wire_count = NL3PG; + if (ml3->valid == 0) { for (i = 0; i < Ln_ENTRIES; i++) { l3[i] = newl3 | phys; phys += L3_SIZE; Index: i386/i386/pmap.c =================================================================== --- i386/i386/pmap.c +++ i386/i386/pmap.c @@ -318,7 +318,7 @@ u_int flags, vm_page_t m); static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte); -static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte); +static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted); static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde); static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); @@ -992,7 +992,7 @@ mpte->wire_count = 1; if (pseflag != 0 && KERNBASE <= i << PDRSHIFT && i << PDRSHIFT < KERNend && - pmap_insert_pt_page(kernel_pmap, mpte)) + pmap_insert_pt_page(kernel_pmap, mpte, false)) panic("pmap_init: pmap_insert_pt_page failed"); } PMAP_UNLOCK(kernel_pmap); @@ -1902,10 +1902,11 @@ * ordered by this virtual address range. */ static __inline int -pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte) +pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); + mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0; return (vm_radix_insert(&pmap->pm_root, mpte)); } @@ -2823,7 +2824,7 @@ * If the page table page is not leftover from an earlier promotion, * initialize it. */ - if ((oldpde & PG_PROMOTED) == 0) + if (mpte->valid == 0) pmap_fill_ptp(firstpte, newpte); KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME), @@ -3533,7 +3534,7 @@ ("pmap_promote_pde: page table page is out of range")); KASSERT(mpte->pindex == va >> PDRSHIFT, ("pmap_promote_pde: page table page's pindex is wrong")); - if (pmap_insert_pt_page(pmap, mpte)) { + if (pmap_insert_pt_page(pmap, mpte, true)) { pmap_pde_p_failures++; CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x in pmap %p", va, @@ -3911,15 +3912,13 @@ } vm_page_free_pages_toq(&free, true); if (pmap == kernel_pmap) { + /* + * Both pmap_remove_pde() and pmap_remove_ptes() will + * leave the kernel page table page zero filled. + */ mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME); - if (pmap_insert_pt_page(pmap, mt)) { - /* - * XXX Currently, this can't happen because - * we do not perform pmap_enter(psind == 1) - * on the kernel pmap. - */ + if (pmap_insert_pt_page(pmap, mt, false)) panic("pmap_enter_pde: trie insert failed"); - } } else KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p", pde)); Index: riscv/riscv/pmap.c =================================================================== --- riscv/riscv/pmap.c +++ riscv/riscv/pmap.c @@ -1106,10 +1106,11 @@ * ordered by this virtual address range. */ static __inline int -pmap_insert_pt_page(pmap_t pmap, vm_page_t ml3) +pmap_insert_pt_page(pmap_t pmap, vm_page_t ml3, bool promoted) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); + ml3->valid = promoted ? VM_PAGE_BITS_ALL : 0; return (vm_radix_insert(&pmap->pm_root, ml3)); } @@ -2482,8 +2483,10 @@ "failure for va %#lx in pmap %p", va, pmap); return (false); } - if (va < VM_MAXUSER_ADDRESS) + if (va < VM_MAXUSER_ADDRESS) { + mpte->wire_count = Ln_ENTRIES; pmap_resident_count_inc(pmap, 1); + } } mptepa = VM_PAGE_TO_PHYS(mpte); firstl3 = (pt_entry_t *)PHYS_TO_DMAP(mptepa); @@ -2495,10 +2498,10 @@ newl3 = oldl2; /* - * If the page table page is new, initialize it. + * If the page table page is not leftover from an earlier promotion, + * initialize it. */ - if (mpte->wire_count == 1) { - mpte->wire_count = Ln_ENTRIES; + if (mpte->valid == 0) { for (i = 0; i < Ln_ENTRIES; i++) pmap_store(firstl3 + i, newl3 + (i << PTE_PPN0_S)); } @@ -2589,7 +2592,7 @@ ml3 = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l2))); KASSERT(ml3->pindex == pmap_l2_pindex(va), ("pmap_promote_l2: page table page's pindex is wrong")); - if (pmap_insert_pt_page(pmap, ml3)) { + if (pmap_insert_pt_page(pmap, ml3, true)) { CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx pmap %p", va, pmap); atomic_add_long(&pmap_l2_p_failures, 1); @@ -2972,15 +2975,13 @@ } vm_page_free_pages_toq(&free, true); if (va >= VM_MAXUSER_ADDRESS) { + /* + * Both pmap_remove_l2() and pmap_remove_l3() will + * leave the kernel page table page zero filled. + */ mt = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l2))); - if (pmap_insert_pt_page(pmap, mt)) { - /* - * XXX Currently, this can't happen bacuse - * we do not perform pmap_enter(psind == 1) - * on the kernel pmap. - */ + if (pmap_insert_pt_page(pmap, mt, false)) panic("pmap_enter_l2: trie insert failed"); - } } else KASSERT(pmap_load(l2) == 0, ("pmap_enter_l2: non-zero L2 entry %p", l2));