Index: head/sys/amd64/amd64/pmap.c =================================================================== --- head/sys/amd64/amd64/pmap.c +++ head/sys/amd64/amd64/pmap.c @@ -7574,7 +7574,7 @@ pmap_t pmap; pv_entry_t next_pv, pv; pd_entry_t oldpde, *pde; - pt_entry_t oldpte, *pte, PG_M, PG_RW, PG_V; + pt_entry_t *pte, PG_M, PG_RW; struct rwlock *lock; vm_offset_t va; int md_gen, pvh_gen; @@ -7610,33 +7610,23 @@ } } PG_M = pmap_modified_bit(pmap); - PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); va = pv->pv_va; pde = pmap_pde(pmap, va); oldpde = *pde; - if ((oldpde & PG_RW) != 0) { - if (pmap_demote_pde_locked(pmap, pde, va, &lock)) { - if ((oldpde & PG_W) == 0) { - /* - * Write protect the mapping to a - * single page so that a subsequent - * write access may repromote. - */ - va += VM_PAGE_TO_PHYS(m) - (oldpde & - PG_PS_FRAME); - pte = pmap_pde_to_pte(pde, va); - oldpte = *pte; - if ((oldpte & PG_V) != 0) { - while (!atomic_cmpset_long(pte, - oldpte, - oldpte & ~(PG_M | PG_RW))) - oldpte = *pte; - vm_page_dirty(m); - pmap_invalidate_page(pmap, va); - } - } - } + /* If oldpde has PG_RW set, then it also has PG_M set. */ + if ((oldpde & PG_RW) != 0 && + pmap_demote_pde_locked(pmap, pde, va, &lock) && + (oldpde & PG_W) == 0) { + /* + * Write protect the mapping to a single page so that + * a subsequent write access may repromote. + */ + va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME); + pte = pmap_pde_to_pte(pde, va); + atomic_clear_long(pte, PG_M | PG_RW); + vm_page_dirty(m); + pmap_invalidate_page(pmap, va); } PMAP_UNLOCK(pmap); } Index: head/sys/arm64/arm64/pmap.c =================================================================== --- head/sys/arm64/arm64/pmap.c +++ head/sys/arm64/arm64/pmap.c @@ -4889,28 +4889,22 @@ va = pv->pv_va; l2 = pmap_l2(pmap, va); oldl2 = pmap_load(l2); - if ((oldl2 & ATTR_SW_DBM) != 0) { - if (pmap_demote_l2_locked(pmap, l2, va, &lock)) { - if ((oldl2 & ATTR_SW_WIRED) == 0) { - /* - * Write protect the mapping to a - * single page so that a subsequent - * write access may repromote. - */ - va += VM_PAGE_TO_PHYS(m) - - (oldl2 & ~ATTR_MASK); - l3 = pmap_l2_to_l3(l2, va); - oldl3 = pmap_load(l3); - if (pmap_l3_valid(oldl3)) { - while (!atomic_fcmpset_long(l3, - &oldl3, (oldl3 & ~ATTR_SW_DBM) | - ATTR_AP(ATTR_AP_RO))) - cpu_spinwait(); - vm_page_dirty(m); - pmap_invalidate_page(pmap, va); - } - } - } + /* If oldl2 has ATTR_SW_DBM set, then it is also dirty. */ + if ((oldl2 & ATTR_SW_DBM) != 0 && + pmap_demote_l2_locked(pmap, l2, va, &lock) && + (oldl2 & ATTR_SW_WIRED) == 0) { + /* + * Write protect the mapping to a single page so that + * a subsequent write access may repromote. + */ + va += VM_PAGE_TO_PHYS(m) - (oldl2 & ~ATTR_MASK); + l3 = pmap_l2_to_l3(l2, va); + oldl3 = pmap_load(l3); + while (!atomic_fcmpset_long(l3, &oldl3, + (oldl3 & ~ATTR_SW_DBM) | ATTR_AP(ATTR_AP_RO))) + cpu_spinwait(); + vm_page_dirty(m); + pmap_invalidate_page(pmap, va); } PMAP_UNLOCK(pmap); } Index: head/sys/i386/i386/pmap.c =================================================================== --- head/sys/i386/i386/pmap.c +++ head/sys/i386/i386/pmap.c @@ -5280,7 +5280,7 @@ pv_entry_t next_pv, pv; pmap_t pmap; pd_entry_t oldpde, *pde; - pt_entry_t oldpte, *pte; + pt_entry_t *pte; vm_offset_t va; KASSERT((m->oflags & VPO_UNMANAGED) == 0, @@ -5307,34 +5307,24 @@ PMAP_LOCK(pmap); pde = pmap_pde(pmap, va); oldpde = *pde; - if ((oldpde & PG_RW) != 0) { - if (pmap_demote_pde(pmap, pde, va)) { - if ((oldpde & PG_W) == 0) { - /* - * Write protect the mapping to a - * single page so that a subsequent - * write access may repromote. - */ - va += VM_PAGE_TO_PHYS(m) - (oldpde & - PG_PS_FRAME); - pte = pmap_pte_quick(pmap, va); - oldpte = *pte; - if ((oldpte & PG_V) != 0) { - /* - * Regardless of whether a pte is 32 or 64 bits - * in size, PG_RW and PG_M are among the least - * significant 32 bits. - */ - while (!atomic_cmpset_int((u_int *)pte, - oldpte, - oldpte & ~(PG_M | PG_RW))) - oldpte = *pte; - vm_page_dirty(m); - pmap_invalidate_page_int(pmap, - va); - } - } - } + /* If oldpde has PG_RW set, then it also has PG_M set. */ + if ((oldpde & PG_RW) != 0 && + pmap_demote_pde(pmap, pde, va) && + (oldpde & PG_W) == 0) { + /* + * Write protect the mapping to a single page so that + * a subsequent write access may repromote. + */ + va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME); + pte = pmap_pte_quick(pmap, va); + /* + * Regardless of whether a pte is 32 or 64 bits + * in size, PG_RW and PG_M are among the least + * significant 32 bits. + */ + atomic_clear_int((u_int *)pte, PG_M | PG_RW); + vm_page_dirty(m); + pmap_invalidate_page_int(pmap, va); } PMAP_UNLOCK(pmap); } Index: head/sys/riscv/riscv/pmap.c =================================================================== --- head/sys/riscv/riscv/pmap.c +++ head/sys/riscv/riscv/pmap.c @@ -4105,7 +4105,7 @@ pmap_t pmap; pv_entry_t next_pv, pv; pd_entry_t *l2, oldl2; - pt_entry_t *l3, oldl3; + pt_entry_t *l3; vm_offset_t va; int md_gen, pvh_gen; @@ -4143,28 +4143,19 @@ va = pv->pv_va; l2 = pmap_l2(pmap, va); oldl2 = pmap_load(l2); - if ((oldl2 & PTE_W) != 0) { - if (pmap_demote_l2_locked(pmap, l2, va, &lock)) { - if ((oldl2 & PTE_SW_WIRED) == 0) { - /* - * Write protect the mapping to a - * single page so that a subsequent - * write access may repromote. - */ - va += VM_PAGE_TO_PHYS(m) - - PTE_TO_PHYS(oldl2); - l3 = pmap_l2_to_l3(l2, va); - oldl3 = pmap_load(l3); - if ((oldl3 & PTE_V) != 0) { - while (!atomic_fcmpset_long(l3, - &oldl3, oldl3 & ~(PTE_D | - PTE_W))) - cpu_spinwait(); - vm_page_dirty(m); - pmap_invalidate_page(pmap, va); - } - } - } + /* If oldl2 has PTE_W set, then it also has PTE_D set. */ + if ((oldl2 & PTE_W) != 0 && + pmap_demote_l2_locked(pmap, l2, va, &lock) && + (oldl2 & PTE_SW_WIRED) == 0) { + /* + * Write protect the mapping to a single page so that + * a subsequent write access may repromote. + */ + va += VM_PAGE_TO_PHYS(m) - PTE_TO_PHYS(oldl2); + l3 = pmap_l2_to_l3(l2, va); + pmap_clear_bits(l3, PTE_D | PTE_W); + vm_page_dirty(m); + pmap_invalidate_page(pmap, va); } PMAP_UNLOCK(pmap); }