diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -9649,6 +9649,8 @@ void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { + if (m->md.pat_mode == ma) + return; m->md.pat_mode = ma; @@ -9668,6 +9670,9 @@ { int error; + if (m->md.pat_mode == ma) + return; + m->md.pat_mode = ma; if ((m->flags & PG_FICTITIOUS) != 0) diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -5767,7 +5767,7 @@ CTR5(KTR_PMAP, "%s: page %p - 0x%08X oma: %d, ma: %d", __func__, m, VM_PAGE_TO_PHYS(m), oma, ma); - if ((m->flags & PG_FICTITIOUS) != 0) + if (ma == oma || (m->flags & PG_FICTITIOUS) != 0) return; #if 0 /* @@ -5784,22 +5784,20 @@ * If page is not mapped by sf buffer, map the page * transient and do invalidation. */ - if (ma != oma) { - pa = VM_PAGE_TO_PHYS(m); - sched_pin(); - pc = get_pcpu(); - cmap2_pte2p = pc->pc_cmap2_pte2p; - mtx_lock(&pc->pc_cmap_lock); - if (pte2_load(cmap2_pte2p) != 0) - panic("%s: CMAP2 busy", __func__); - pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, - vm_memattr_to_pte2(ma))); - dcache_wbinv_poc((vm_offset_t)pc->pc_cmap2_addr, pa, PAGE_SIZE); - pte2_clear(cmap2_pte2p); - tlb_flush((vm_offset_t)pc->pc_cmap2_addr); - sched_unpin(); - mtx_unlock(&pc->pc_cmap_lock); - } + pa = VM_PAGE_TO_PHYS(m); + sched_pin(); + pc = get_pcpu(); + cmap2_pte2p = pc->pc_cmap2_pte2p; + mtx_lock(&pc->pc_cmap_lock); + if (pte2_load(cmap2_pte2p) != 0) + panic("%s: CMAP2 busy", __func__); + pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, + vm_memattr_to_pte2(ma))); + dcache_wbinv_poc((vm_offset_t)pc->pc_cmap2_addr, pa, PAGE_SIZE); + pte2_clear(cmap2_pte2p); + tlb_flush((vm_offset_t)pc->pc_cmap2_addr); + sched_unpin(); + mtx_unlock(&pc->pc_cmap_lock); } /* diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -8045,6 +8045,8 @@ void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { + if (m->md.pv_memattr == ma) + return; m->md.pv_memattr = ma; diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -5617,6 +5617,8 @@ static void __CONCAT(PMTYPE, page_set_memattr)(vm_page_t m, vm_memattr_t ma) { + if (m->md.pat_mode == ma) + return; m->md.pat_mode = ma; if ((m->flags & PG_FICTITIOUS) != 0) diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -1469,6 +1469,9 @@ pmap_t pmap; u_int lo; + if (m->md.mdpg_cache_attrs == ma) + return; + if ((m->oflags & VPO_UNMANAGED) != 0) { m->md.mdpg_cache_attrs = ma; return; diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -2134,6 +2134,9 @@ CTR3(KTR_PMAP, "%s: pa=%#jx, ma=%#x", __func__, (uintmax_t)VM_PAGE_TO_PHYS(m), ma); + if (m->md.mdpg_cache_attrs == ma) + return; + if ((m->oflags & VPO_UNMANAGED) != 0) { m->md.mdpg_cache_attrs = ma; return; diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c --- a/sys/powerpc/aim/mmu_radix.c +++ b/sys/powerpc/aim/mmu_radix.c @@ -5937,6 +5937,9 @@ { CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma); + + if (m->md.mdpg_cache_attrs == ma) + return; m->md.mdpg_cache_attrs = ma; /* diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -4838,6 +4838,8 @@ void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { + if (m->md.pv_memattr == ma) + return; m->md.pv_memattr = ma;