Index: head/sys/arm64/arm64/pmap.c =================================================================== --- head/sys/arm64/arm64/pmap.c +++ head/sys/arm64/arm64/pmap.c @@ -328,6 +328,7 @@ #define pmap_load_clear(table) atomic_swap_64(table, 0) #define pmap_load_store(table, entry) atomic_swap_64(table, entry) #define pmap_set_bits(table, bits) atomic_set_64(table, bits) +#define pmap_store(table, entry) atomic_store_64(table, entry) /********************/ /* Inline functions */ @@ -637,7 +638,7 @@ (vm_offset_t)l2); freemempos += PAGE_SIZE; - pmap_load_store(&pagetable_dmap[l1_slot], + pmap_store(&pagetable_dmap[l1_slot], (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE); memset(l2, 0, PAGE_SIZE); @@ -655,7 +656,7 @@ l2_slot = pmap_l2_index(va); KASSERT(l2_slot != 0, ("...")); - pmap_load_store(&l2[l2_slot], + pmap_store(&l2[l2_slot], (pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN | ATTR_IDX(CACHED_MEMORY) | L2_BLOCK); } @@ -667,7 +668,7 @@ (physmap[i + 1] - pa) >= L1_SIZE; pa += L1_SIZE, va += L1_SIZE) { l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT); - pmap_load_store(&pagetable_dmap[l1_slot], + pmap_store(&pagetable_dmap[l1_slot], (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_XN | ATTR_IDX(CACHED_MEMORY) | L1_BLOCK); } @@ -682,7 +683,7 @@ (vm_offset_t)l2); freemempos += PAGE_SIZE; - pmap_load_store(&pagetable_dmap[l1_slot], + pmap_store(&pagetable_dmap[l1_slot], (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE); memset(l2, 0, PAGE_SIZE); @@ -692,7 +693,7 @@ for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1]; pa += L2_SIZE, va += L2_SIZE) { l2_slot = pmap_l2_index(va); - pmap_load_store(&l2[l2_slot], + pmap_store(&l2[l2_slot], (pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN | ATTR_IDX(CACHED_MEMORY) | L2_BLOCK); } @@ -727,7 +728,7 @@ KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index")); pa = pmap_early_vtophys(l1pt, l2pt); - pmap_load_store(&l1[l1_slot], + pmap_store(&l1[l1_slot], (pa & ~Ln_TABLE_MASK) | L1_TABLE); l2pt += PAGE_SIZE; } @@ -757,7 +758,7 @@ KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index")); pa = pmap_early_vtophys(l1pt, l3pt); - pmap_load_store(&l2[l2_slot], + pmap_store(&l2[l2_slot], (pa & ~Ln_TABLE_MASK) | L2_TABLE); l3pt += PAGE_SIZE; } @@ -1540,7 +1541,7 @@ l0index = ptepindex - (NUL2E + NUL1E); l0 = &pmap->pm_l0[l0index]; - pmap_load_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE); + pmap_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE); } else if (ptepindex >= NUL2E) { vm_pindex_t l0index, l1index; pd_entry_t *l0, *l1; @@ -1566,7 +1567,7 @@ l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK); l1 = &l1[ptepindex & Ln_ADDR_MASK]; - pmap_load_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE); + pmap_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE); } else { vm_pindex_t l0index, l1index; pd_entry_t *l0, *l1, *l2; @@ -1608,7 +1609,7 @@ l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK); l2 = &l2[ptepindex & Ln_ADDR_MASK]; - pmap_load_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE); + pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE); } pmap_resident_count_inc(pmap, 1); @@ -1781,7 +1782,7 @@ if ((nkpg->flags & PG_ZERO) == 0) pmap_zero_page(nkpg); paddr = VM_PAGE_TO_PHYS(nkpg); - pmap_load_store(l1, paddr | L1_TABLE); + pmap_store(l1, paddr | L1_TABLE); continue; /* try again */ } l2 = pmap_l1_to_l2(l1, kernel_vm_end); @@ -3018,7 +3019,7 @@ pmap_invalidate_range_nopin(pmap, va, va + size); /* Create the new mapping */ - pmap_load_store(pte, newpte); + pmap_store(pte, newpte); dsb(ishst); critical_exit(); @@ -3304,12 +3305,9 @@ /* * No, might be a protection or wiring change. */ - if ((orig_l3 & ATTR_SW_MANAGED) != 0) { - if ((new_l3 & ATTR_AP(ATTR_AP_RW)) == - ATTR_AP(ATTR_AP_RW)) { - vm_page_aflag_set(m, PGA_WRITEABLE); - } - } + if ((orig_l3 & ATTR_SW_MANAGED) != 0 && + (new_l3 & ATTR_SW_DBM) != 0) + vm_page_aflag_set(m, PGA_WRITEABLE); goto validate; } @@ -3415,7 +3413,7 @@ } } else { /* New mapping */ - pmap_load_store(l3, new_l3); + pmap_store(l3, new_l3); dsb(ishst); } @@ -3517,12 +3515,16 @@ vm_page_free_pages_toq(&free, true); if (va >= VM_MAXUSER_ADDRESS) { /* - * Both pmap_remove_l2() and pmap_remove_l3() will - * leave the kernel page table page zero filled. + * Both pmap_remove_l2() and pmap_remove_l3_range() + * will leave the kernel page table page zero filled. + * Nonetheless, the TLB could have an intermediate + * entry for the kernel page table page. */ mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); if (pmap_insert_pt_page(pmap, mt, false)) panic("pmap_enter_l2: trie insert failed"); + pmap_clear(l2); + pmap_invalidate_page(pmap, va); } else KASSERT(pmap_load(l2) == 0, ("pmap_enter_l2: non-zero L2 entry %p", l2)); @@ -3536,10 +3538,13 @@ SLIST_INIT(&free); if (pmap_unwire_l3(pmap, va, l2pg, &free)) { /* - * Although "va" is not mapped, paging-structure - * caches could nonetheless have entries that + * Although "va" is not mapped, the TLB could + * nonetheless have intermediate entries that * refer to the freed page table pages. * Invalidate those entries. + * + * XXX redundant invalidation (See + * _pmap_unwire_l3().) */ pmap_invalidate_page(pmap, va); vm_page_free_pages_toq(&free, true); @@ -3564,7 +3569,7 @@ /* * Map the superpage. */ - (void)pmap_load_store(l2, new_l2); + pmap_store(l2, new_l2); dsb(ishst); atomic_add_long(&pmap_l2_mappings, 1); @@ -3767,7 +3772,7 @@ m->md.pv_memattr == VM_MEMATTR_WRITE_BACK) cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE); - pmap_load_store(l3, l3_val); + pmap_store(l3, l3_val); dsb(ishst); return (mpte); @@ -3840,7 +3845,7 @@ * demote the mapping and fall through. */ if (sva + L2_SIZE == va_next && eva >= va_next) { - atomic_clear_64(l2, ATTR_SW_WIRED); + pmap_clear_bits(l2, ATTR_SW_WIRED); pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE; continue; @@ -3865,7 +3870,7 @@ * the pmap lock synchronizes access to ATTR_SW_WIRED, * the System MMU may write to the entry concurrently. */ - atomic_clear_64(l3, ATTR_SW_WIRED); + pmap_clear_bits(l3, ATTR_SW_WIRED); pmap->pm_stats.wired_count--; } } @@ -3944,8 +3949,7 @@ nbits = 0; if ((srcptepaddr & ATTR_SW_DBM) != 0) nbits |= ATTR_AP_RW_BIT; - (void)pmap_load_store(l2, - (srcptepaddr & ~mask) | nbits); + pmap_store(l2, (srcptepaddr & ~mask) | nbits); pmap_resident_count_inc(dst_pmap, L2_SIZE / PAGE_SIZE); atomic_add_long(&pmap_l2_mappings, 1); @@ -3994,8 +3998,7 @@ nbits = 0; if ((ptetemp & ATTR_SW_DBM) != 0) nbits |= ATTR_AP_RW_BIT; - (void)pmap_load_store(dst_pte, - (ptetemp & ~mask) | nbits); + pmap_store(dst_pte, (ptetemp & ~mask) | nbits); pmap_resident_count_inc(dst_pmap, 1); } else { SLIST_INIT(&free); @@ -4003,8 +4006,8 @@ &free)) { /* * Although "addr" is not mapped, - * paging-structure caches could - * nonetheless have entries that refer + * the TLB could nonetheless have + * intermediate entries that refer * to the freed page table pages. * Invalidate those entries. *