diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -9304,12 +9304,16 @@ m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); vm_page_dirty(m); } - atomic_clear_long(pte, PG_M | PG_A); - } else if ((*pte & PG_A) != 0) - atomic_clear_long(pte, PG_A); - else + } else goto maybe_invlrng; + /* + * Avoid clearing the accessed bit for wired mappings. + * Superpage demotion assumes that a wired mapping is + * always accessed. + */ + atomic_clear_long(pte, + PG_M | ((*pte & PG_W) == 0 ? PG_A : 0)); if ((*pte & PG_G) != 0) { if (va == va_next) va = sva; diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -7695,7 +7695,7 @@ * subsequent access may act as a * repromotion trigger. */ - if ((oldl3 & ATTR_SW_WIRED) == 0) { + if ((oldl3 & ATTR_SW_WIRED) == 0) { dva = MIN((sva & ~L3C_OFFSET) + L3C_SIZE - PAGE_SIZE, va_next - PAGE_SIZE); @@ -7721,12 +7721,17 @@ * we avoid corrupting the page able. */ if (oldl3 != 0) { - while (!atomic_fcmpset_long(l3, &oldl3, - (oldl3 & ~ATTR_AF) | - ATTR_S1_AP(ATTR_S1_AP_RO))) - cpu_spinwait(); + if ((oldl3 & ATTR_SW_WIRED) != 0) + pmap_set_bits(l3, + ATTR_S1_AP(ATTR_S1_AP_RO)); + else + while (!atomic_fcmpset_long(l3, + &oldl3, (oldl3 & ~ATTR_AF) | + ATTR_S1_AP(ATTR_S1_AP_RO))) + cpu_spinwait(); } - } else if ((oldl3 & ATTR_AF) != 0) { + } else if ((oldl3 & (ATTR_AF | ATTR_SW_WIRED)) == + ATTR_AF) { /* * Clear the accessed bit in this L3 entry * regardless of the contiguous bit.