diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -1445,7 +1445,7 @@ * Otherwise, just the cached final-level entry is invalidated. */ static __inline void -pmap_invalidate_kernel(uint64_t r, bool final_only) +pmap_s1_invalidate_kernel(uint64_t r, bool final_only) { if (final_only) __asm __volatile("tlbi vaale1is, %0" : : "r" (r)); @@ -1454,7 +1454,7 @@ } static __inline void -pmap_invalidate_user(uint64_t r, bool final_only) +pmap_s1_invalidate_user(uint64_t r, bool final_only) { if (final_only) __asm __volatile("tlbi vale1is, %0" : : "r" (r)); @@ -1467,7 +1467,7 @@ * for the specified virtual address in the given virtual address space. */ static __inline void -pmap_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only) +pmap_s1_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only) { uint64_t r; @@ -1476,10 +1476,10 @@ dsb(ishst); r = TLBI_VA(va); if (pmap == kernel_pmap) { - pmap_invalidate_kernel(r, final_only); + pmap_s1_invalidate_kernel(r, final_only); } else { r |= ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); - pmap_invalidate_user(r, final_only); + pmap_s1_invalidate_user(r, final_only); } dsb(ish); isb(); @@ -1490,7 +1490,7 @@ * for the specified virtual address range in the given virtual address space. */ static __inline void -pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, +pmap_s1_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool final_only) { uint64_t end, r, start; @@ -1502,13 +1502,13 @@ start = TLBI_VA(sva); end = TLBI_VA(eva); for (r = start; r < end; r += TLBI_VA_L3_INCR) - pmap_invalidate_kernel(r, final_only); + pmap_s1_invalidate_kernel(r, final_only); } else { start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); start |= TLBI_VA(sva); end |= TLBI_VA(eva); for (r = start; r < end; r += TLBI_VA_L3_INCR) - pmap_invalidate_user(r, final_only); + pmap_s1_invalidate_user(r, final_only); } dsb(ish); isb(); @@ -1519,7 +1519,7 @@ * given virtual address space. */ static __inline void -pmap_invalidate_all(pmap_t pmap) +pmap_s1_invalidate_all(pmap_t pmap) { uint64_t r; @@ -1770,7 +1770,7 @@ pa += PAGE_SIZE; size -= PAGE_SIZE; } - pmap_invalidate_range(kernel_pmap, sva, va, true); + pmap_s1_invalidate_range(kernel_pmap, sva, va, true); } void @@ -1790,7 +1790,7 @@ pte = pmap_pte_exists(kernel_pmap, va, 3, __func__); pmap_clear(pte); - pmap_invalidate_page(kernel_pmap, va, true); + pmap_s1_invalidate_page(kernel_pmap, va, true); } void @@ -1812,7 +1812,7 @@ va += PAGE_SIZE; size -= PAGE_SIZE; } - pmap_invalidate_range(kernel_pmap, sva, va, true); + pmap_s1_invalidate_range(kernel_pmap, sva, va, true); } /* @@ -1868,7 +1868,7 @@ va += L3_SIZE; } - pmap_invalidate_range(kernel_pmap, sva, va, true); + pmap_s1_invalidate_range(kernel_pmap, sva, va, true); } /* @@ -1894,7 +1894,7 @@ va += PAGE_SIZE; } - pmap_invalidate_range(kernel_pmap, sva, va, true); + pmap_s1_invalidate_range(kernel_pmap, sva, va, true); } /*************************************************** @@ -1982,7 +1982,7 @@ l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); pmap_unwire_l3(pmap, va, l1pg, free); } - pmap_invalidate_page(pmap, va, false); + pmap_s1_invalidate_page(pmap, va, false); /* * Put page on a list so that it is released after @@ -2652,7 +2652,7 @@ if (pmap_pte_dirty(pmap, tpte)) vm_page_dirty(m); if ((tpte & ATTR_AF) != 0) { - pmap_invalidate_page(pmap, va, true); + pmap_s1_invalidate_page(pmap, va, true); vm_page_aflag_set(m, PGA_REFERENCED); } CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); @@ -3182,9 +3182,9 @@ /* * Since a promotion must break the 4KB page mappings before making - * the 2MB page mapping, a pmap_invalidate_page() suffices. + * the 2MB page mapping, a pmap_s1_invalidate_page() suffices. */ - pmap_invalidate_page(pmap, sva, true); + pmap_s1_invalidate_page(pmap, sva, true); if (old_l2 & ATTR_SW_WIRED) pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE; @@ -3234,7 +3234,7 @@ PMAP_LOCK_ASSERT(pmap, MA_OWNED); old_l3 = pmap_load_clear(l3); - pmap_invalidate_page(pmap, va, true); + pmap_s1_invalidate_page(pmap, va, true); if (old_l3 & ATTR_SW_WIRED) pmap->pm_stats.wired_count -= 1; pmap_resident_count_dec(pmap, 1); @@ -3283,7 +3283,7 @@ for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) { if (!pmap_l3_valid(pmap_load(l3))) { if (va != eva) { - pmap_invalidate_range(pmap, va, sva, true); + pmap_s1_invalidate_range(pmap, va, sva, true); va = eva; } continue; @@ -3310,7 +3310,7 @@ * still provides access to that page. */ if (va != eva) { - pmap_invalidate_range(pmap, va, + pmap_s1_invalidate_range(pmap, va, sva, true); va = eva; } @@ -3341,7 +3341,7 @@ va = sva; } if (va != eva) - pmap_invalidate_range(pmap, va, sva, true); + pmap_s1_invalidate_range(pmap, va, sva, true); } /* @@ -3397,7 +3397,7 @@ MPASS(pmap != kernel_pmap); MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0); pmap_clear(l1); - pmap_invalidate_page(pmap, sva, true); + pmap_s1_invalidate_page(pmap, sva, true); pmap_resident_count_dec(pmap, L1_SIZE / PAGE_SIZE); pmap_unuse_pt(pmap, sva, pmap_load(l0), &free); continue; @@ -3528,7 +3528,7 @@ if (tpte & ATTR_SW_WIRED) pmap->pm_stats.wired_count--; if ((tpte & ATTR_AF) != 0) { - pmap_invalidate_page(pmap, pv->pv_va, true); + pmap_s1_invalidate_page(pmap, pv->pv_va, true); vm_page_aflag_set(m, PGA_REFERENCED); } @@ -3591,9 +3591,9 @@ /* * Since a promotion must break the 4KB page mappings before making - * the 2MB page mapping, a pmap_invalidate_page() suffices. + * the 2MB page mapping, a pmap_s1_invalidate_page() suffices. */ - pmap_invalidate_page(pmap, sva, true); + pmap_s1_invalidate_page(pmap, sva, true); } /* @@ -3634,7 +3634,7 @@ if ((pmap_load(l1) & mask) != nbits) { pmap_store(l1, (pmap_load(l1) & ~mask) | nbits); if (invalidate) - pmap_invalidate_page(pmap, sva, true); + pmap_s1_invalidate_page(pmap, sva, true); } continue; } @@ -3676,7 +3676,7 @@ if (!pmap_l3_valid(l3) || (l3 & mask) == nbits) { if (va != va_next) { if (invalidate) - pmap_invalidate_range(pmap, + pmap_s1_invalidate_range(pmap, va, sva, true); va = va_next; } @@ -3700,7 +3700,7 @@ va = sva; } if (va != va_next && invalidate) - pmap_invalidate_range(pmap, va, sva, true); + pmap_s1_invalidate_range(pmap, va, sva, true); } PMAP_UNLOCK(pmap); } @@ -3813,7 +3813,7 @@ * be cached, so we invalidate intermediate entries as well as final * entries. */ - pmap_invalidate_range(pmap, va, va + size, false); + pmap_s1_invalidate_range(pmap, va, va + size, false); /* Create the new mapping */ pmap_store(pte, newpte); @@ -4269,7 +4269,7 @@ if (pmap_pte_dirty(pmap, orig_l3)) vm_page_dirty(om); if ((orig_l3 & ATTR_AF) != 0) { - pmap_invalidate_page(pmap, va, true); + pmap_s1_invalidate_page(pmap, va, true); vm_page_aflag_set(om, PGA_REFERENCED); } CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); @@ -4284,7 +4284,7 @@ } else { KASSERT((orig_l3 & ATTR_AF) != 0, ("pmap_enter: unmanaged mapping lacks ATTR_AF")); - pmap_invalidate_page(pmap, va, true); + pmap_s1_invalidate_page(pmap, va, true); } orig_l3 = 0; } else { @@ -4342,7 +4342,7 @@ if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) { /* same PA, different attributes */ orig_l3 = pmap_load_store(l3, new_l3); - pmap_invalidate_page(pmap, va, true); + pmap_s1_invalidate_page(pmap, va, true); if ((orig_l3 & ATTR_SW_MANAGED) != 0 && pmap_pte_dirty(pmap, orig_l3)) vm_page_dirty(m); @@ -4528,7 +4528,7 @@ if (pmap_insert_pt_page(pmap, mt, false)) panic("pmap_enter_l2: trie insert failed"); pmap_clear(l2); - pmap_invalidate_page(pmap, va, false); + pmap_s1_invalidate_page(pmap, va, false); } } @@ -5485,7 +5485,7 @@ } if (lock != NULL) rw_wunlock(lock); - pmap_invalidate_all(pmap); + pmap_s1_invalidate_all(pmap); free_pv_chunk_batch(free_chunks); PMAP_UNLOCK(pmap); vm_page_free_pages_toq(&free, true); @@ -5712,7 +5712,7 @@ if ((oldpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)) vm_page_dirty(m); - pmap_invalidate_page(pmap, pv->pv_va, true); + pmap_s1_invalidate_page(pmap, pv->pv_va, true); } PMAP_UNLOCK(pmap); } @@ -5810,7 +5810,7 @@ (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 && (tpte & ATTR_SW_WIRED) == 0) { pmap_clear_bits(pte, ATTR_AF); - pmap_invalidate_page(pmap, va, true); + pmap_s1_invalidate_page(pmap, va, true); cleared++; } else not_cleared++; @@ -5851,7 +5851,7 @@ if ((tpte & ATTR_AF) != 0) { if ((tpte & ATTR_SW_WIRED) == 0) { pmap_clear_bits(pte, ATTR_AF); - pmap_invalidate_page(pmap, pv->pv_va, true); + pmap_s1_invalidate_page(pmap, pv->pv_va, true); cleared++; } else not_cleared++; @@ -5991,12 +5991,12 @@ continue; maybe_invlrng: if (va != va_next) { - pmap_invalidate_range(pmap, va, sva, true); + pmap_s1_invalidate_range(pmap, va, sva, true); va = va_next; } } if (va != va_next) - pmap_invalidate_range(pmap, va, sva, true); + pmap_s1_invalidate_range(pmap, va, sva, true); } PMAP_UNLOCK(pmap); } @@ -6057,7 +6057,7 @@ (oldl3 & ~ATTR_SW_DBM) | ATTR_S1_AP(ATTR_S1_AP_RO))) cpu_spinwait(); vm_page_dirty(m); - pmap_invalidate_page(pmap, va, true); + pmap_s1_invalidate_page(pmap, va, true); } PMAP_UNLOCK(pmap); } @@ -6080,7 +6080,7 @@ oldl3 = pmap_load(l3); if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM){ pmap_set_bits(l3, ATTR_S1_AP(ATTR_S1_AP_RO)); - pmap_invalidate_page(pmap, pv->pv_va, true); + pmap_s1_invalidate_page(pmap, pv->pv_va, true); } PMAP_UNLOCK(pmap); } @@ -6169,7 +6169,7 @@ va += L2_SIZE; pa += L2_SIZE; } - pmap_invalidate_all(kernel_pmap); + pmap_s1_invalidate_all(kernel_pmap); va = preinit_map_va + (start_idx * L2_SIZE); @@ -6238,7 +6238,7 @@ } } if (preinit_map) { - pmap_invalidate_all(kernel_pmap); + pmap_s1_invalidate_all(kernel_pmap); return; } @@ -7244,7 +7244,7 @@ if ((pte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RO)) { pmap_clear_bits(ptep, ATTR_S1_AP_RW_BIT); - pmap_invalidate_page(pmap, far, true); + pmap_s1_invalidate_page(pmap, far, true); } rv = KERN_SUCCESS; }