diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -8459,7 +8459,7 @@ pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(VM_PAGE_TO_PHYS(m)); rw_wlock(lock); -retry_pv_loop: +retry: TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { @@ -8469,7 +8469,7 @@ rw_wlock(lock); if (pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); - goto retry_pv_loop; + goto retry; } } PG_RW = pmap_rw_bit(pmap); @@ -8493,7 +8493,7 @@ if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); - goto retry_pv_loop; + goto retry; } } PG_M = pmap_modified_bit(pmap); @@ -8503,12 +8503,11 @@ ("pmap_remove_write: found a 2mpage in page %p's pv list", m)); pte = pmap_pde_to_pte(pde, pv->pv_va); -retry: oldpte = *pte; if (oldpte & PG_RW) { - if (!atomic_cmpset_long(pte, oldpte, oldpte & + while (!atomic_fcmpset_long(pte, &oldpte, oldpte & ~(PG_RW | PG_M))) - goto retry; + cpu_spinwait(); if ((oldpte & PG_M) != 0) vm_page_dirty(m); pmap_invalidate_page(pmap, pv->pv_va); diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -3223,10 +3223,12 @@ * Return if the L2 entry already has the desired access restrictions * in place. */ -retry: if ((old_l2 & mask) == nbits) return; + while (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits)) + cpu_spinwait(); + /* * When a dirty read/write superpage mapping is write protected, * update the dirty field of each of the superpage's constituent 4KB @@ -3240,9 +3242,6 @@ vm_page_dirty(mt); } - if (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits)) - goto retry; - /* * Since a promotion must break the 4KB page mappings before making * the 2MB page mapping, a pmap_invalidate_page() suffices. @@ -3334,7 +3333,7 @@ for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++, sva += L3_SIZE) { l3 = pmap_load(l3p); -retry: + /* * Go to the next L3 entry if the current one is * invalid or already has the desired access @@ -3351,6 +3350,10 @@ continue; } + while (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | + nbits)) + cpu_spinwait(); + /* * When a dirty read/write mapping is write protected, * update the page's dirty field. @@ -3360,8 +3363,6 @@ pmap_pte_dirty(pmap, l3)) vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK)); - if (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits)) - goto retry; if (va == va_next) va = sva; }