Index: mips/mips/pmap.c =================================================================== --- mips/mips/pmap.c +++ mips/mips/pmap.c @@ -2037,6 +2037,8 @@ if (is_kernel_pmap(pmap)) newpte |= PTE_G; PMAP_PTE_SET_CACHE_BITS(newpte, pa, m); + if ((m->oflags & VPO_UNMANAGED) == 0) + newpte |= PTE_MANAGED; mpte = NULL; @@ -2066,8 +2068,11 @@ panic("pmap_enter: invalid page directory, pdir=%p, va=%p", (void *)pmap->pm_segtab, (void *)va); } - om = NULL; + origpte = *pte; + KASSERT(!pte_test(&origpte, PTE_D | PTE_RO | PTE_V), + ("pmap_enter: modified page not writable: va: %p, pte: %#jx", + (void *)va, (uintmax_t)origpte)); opa = TLBLO_PTE_TO_PA(origpte); /* @@ -2086,10 +2091,6 @@ PTE_W)) pmap->pm_stats.wired_count--; - KASSERT(!pte_test(&origpte, PTE_D | PTE_RO), - ("%s: modified page not writable: va: %p, pte: %#jx", - __func__, (void *)va, (uintmax_t)origpte)); - /* * Remove extra pte reference */ @@ -2098,8 +2099,6 @@ if (pte_test(&origpte, PTE_MANAGED)) { m->md.pv_flags |= PV_TABLE_REF; - om = m; - newpte |= PTE_MANAGED; if (!pte_test(&newpte, PTE_RO)) vm_page_aflag_set(m, PGA_WRITEABLE); } @@ -2113,13 +2112,29 @@ * handle validating new mapping. */ if (opa) { + if (is_kernel_pmap(pmap)) + *pte = PTE_G; + else + *pte = 0; if (pte_test(&origpte, PTE_W)) pmap->pm_stats.wired_count--; - if (pte_test(&origpte, PTE_MANAGED)) { om = PHYS_TO_VM_PAGE(opa); + if (pte_test(&origpte, PTE_D)) + vm_page_dirty(om); + if ((om->md.pv_flags & PV_TABLE_REF) != 0) { + om->md.pv_flags &= ~PV_TABLE_REF; + vm_page_aflag_set(om, PGA_REFERENCED); + } pv = pmap_pvh_remove(&om->md, pmap, va); + if (!pte_test(&newpte, PTE_MANAGED)) + free_pv_entry(pmap, pv); + if ((om->aflags & PGA_WRITEABLE) != 0 && + TAILQ_EMPTY(&om->md.pv_list)) + vm_page_aflag_clear(om, PGA_WRITEABLE); } + pmap_invalidate_page(pmap, va); + origpte = 0; if (mpte != NULL) { mpte->wire_count--; KASSERT(mpte->wire_count > 0, @@ -2132,17 +2147,16 @@ /* * Enter on the PV list if part of our managed memory. */ - if ((m->oflags & VPO_UNMANAGED) == 0) { + if (pte_test(&newpte, PTE_MANAGED)) { m->md.pv_flags |= PV_TABLE_REF; - if (pv == NULL) + if (pv == NULL) { pv = get_pv_entry(pmap, FALSE); - pv->pv_va = va; + pv->pv_va = va; + } TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); - newpte |= PTE_MANAGED; if (!pte_test(&newpte, PTE_RO)) vm_page_aflag_set(m, PGA_WRITEABLE); - } else if (pv != NULL) - free_pv_entry(pmap, pv); + } /* * Increment counters @@ -2163,21 +2177,11 @@ if (origpte != newpte) { *pte = newpte; if (pte_test(&origpte, PTE_V)) { - if (pte_test(&origpte, PTE_MANAGED) && opa != pa) { - if (om->md.pv_flags & PV_TABLE_REF) - vm_page_aflag_set(om, PGA_REFERENCED); - om->md.pv_flags &= ~PV_TABLE_REF; - } + KASSERT(opa == pa, ("pmap_enter: invalid update")); if (pte_test(&origpte, PTE_D)) { - KASSERT(!pte_test(&origpte, PTE_RO), - ("pmap_enter: modified page not writable:" - " va: %p, pte: %#jx", (void *)va, (uintmax_t)origpte)); if (pte_test(&origpte, PTE_MANAGED)) - vm_page_dirty(om); + vm_page_dirty(m); } - if (pte_test(&origpte, PTE_MANAGED) && - TAILQ_EMPTY(&om->md.pv_list)) - vm_page_aflag_clear(om, PGA_WRITEABLE); pmap_update_page(pmap, va, newpte); } }