Index: head/sys/i386/i386/pmap.c =================================================================== --- head/sys/i386/i386/pmap.c +++ head/sys/i386/i386/pmap.c @@ -3636,7 +3636,6 @@ } pa = VM_PAGE_TO_PHYS(m); - om = NULL; origpte = *pte; opa = origpte & PG_FRAME; @@ -3661,10 +3660,8 @@ if (mpte) mpte->wire_count--; - if (origpte & PG_MANAGED) { - om = m; + if (origpte & PG_MANAGED) pa |= PG_MANAGED; - } goto validate; } @@ -3672,15 +3669,42 @@ /* * Mapping has changed, invalidate old range and fall through to - * handle validating new mapping. + * handle validating new mapping. This ensures that all threads + * sharing the pmap keep a consistent view of the mapping, which is + * necessary for the correct handling of COW faults. It + * also permits reuse of the old mapping's PV entry, + * avoiding an allocation. + * + * For consistency, handle unmanaged mappings the same way. */ if (opa) { + origpte = pte_load_clear(pte); + KASSERT((origpte & PG_FRAME) == opa, + ("pmap_enter: unexpected pa update for %#x", va)); if (origpte & PG_W) pmap->pm_stats.wired_count--; if (origpte & PG_MANAGED) { om = PHYS_TO_VM_PAGE(opa); + + /* + * The pmap lock is sufficient to synchronize with + * concurrent calls to pmap_page_test_mappings() and + * pmap_ts_referenced(). + */ + if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) + vm_page_dirty(om); + if ((origpte & PG_A) != 0) + vm_page_aflag_set(om, PGA_REFERENCED); pv = pmap_pvh_remove(&om->md, pmap, va); + if ((om->aflags & PGA_WRITEABLE) != 0 && + TAILQ_EMPTY(&om->md.pv_list) && + ((om->flags & PG_FICTITIOUS) != 0 || + TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) + vm_page_aflag_clear(om, PGA_WRITEABLE); } + if ((origpte & PG_A) != 0) + pmap_invalidate_page(pmap, va); + origpte = 0; if (mpte != NULL) { mpte->wire_count--; KASSERT(mpte->wire_count > 0, @@ -3697,9 +3721,10 @@ KASSERT(pmap != kernel_pmap || va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); - if (pv == NULL) + if (pv == NULL) { pv = get_pv_entry(pmap, FALSE); - pv->pv_va = va; + pv->pv_va = va; + } TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); pa |= PG_MANAGED; } else if (pv != NULL) @@ -3741,28 +3766,19 @@ if (origpte & PG_V) { invlva = FALSE; origpte = pte_load_store(pte, newpte); - if (origpte & PG_A) { - if (origpte & PG_MANAGED) - vm_page_aflag_set(om, PGA_REFERENCED); - if (opa != VM_PAGE_TO_PHYS(m)) - invlva = TRUE; -#if defined(PAE) || defined(PAE_TABLES) - if ((origpte & PG_NX) == 0 && - (newpte & PG_NX) != 0) - invlva = TRUE; -#endif - } - if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { + KASSERT((origpte & PG_FRAME) == VM_PAGE_TO_PHYS(m), + ("pmap_enter: unexpected pa update for %#x", va)); + if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW) && + (newpte & PG_M) == 0) { if ((origpte & PG_MANAGED) != 0) - vm_page_dirty(om); - if ((prot & VM_PROT_WRITE) == 0) - invlva = TRUE; + vm_page_dirty(m); + invlva = TRUE; } - if ((origpte & PG_MANAGED) != 0 && - TAILQ_EMPTY(&om->md.pv_list) && - ((om->flags & PG_FICTITIOUS) != 0 || - TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) - vm_page_aflag_clear(om, PGA_WRITEABLE); +#if defined(PAE) || defined(PAE_TABLES) + else if ((origpte & (PG_A | PG_NX)) == PG_A && + (newpte & PG_NX) != 0) + invlva = TRUE; +#endif if (invlva) pmap_invalidate_page(pmap, va); } else