diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -372,6 +372,8 @@ void (*pmap_clean_stage2_tlbi)(void); void (*pmap_invalidate_vpipt_icache)(void); +void (*pmap_stage2_invalidate_page)(uint64_t, vm_offset_t, bool); +void (*pmap_stage2_invalidate_all)(uint64_t); /* * A pmap's cookie encodes an ASID and epoch number. Cookies for reserved @@ -1478,6 +1480,14 @@ isb(); } +static __inline void +pmap_s2_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only) +{ + PMAP_ASSERT_STAGE2(pmap); + MPASS(pmap_stage2_invalidate_all != NULL); + pmap_stage2_invalidate_page(pmap_to_ttbr0(pmap), va, true); +} + /* * Invalidates any cached final- and optionally intermediate-level TLB entries * for the specified virtual address range in the given virtual address space. @@ -1529,6 +1539,15 @@ isb(); } +static __inline void +pmap_s2_invalidate_all(pmap_t pmap) +{ + PMAP_ASSERT_STAGE2(pmap); + MPASS(pmap_stage2_invalidate_all != NULL); + pmap_stage2_invalidate_all(pmap_to_ttbr0(pmap)); +} + + /* * Routine: pmap_extract * Function: @@ -1972,7 +1991,10 @@ l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); pmap_unwire_l3(pmap, va, l1pg, free); } - pmap_invalidate_page(pmap, va, false); + if (pmap->pm_stage == PM_STAGE1) + pmap_invalidate_page(pmap, va, false); + else + pmap_s2_invalidate_page(pmap, va, false); /* * Put page on a list so that it is released after @@ -3273,7 +3295,9 @@ for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) { if (!pmap_l3_valid(pmap_load(l3))) { if (va != eva) { - pmap_invalidate_range(pmap, va, sva, true); + if (pmap->pm_stage == PM_STAGE1) + pmap_invalidate_range(pmap, va, sva, + true); va = eva; } continue; @@ -3300,8 +3324,10 @@ * still provides access to that page. */ if (va != eva) { - pmap_invalidate_range(pmap, va, - sva, true); + if (pmap->pm_stage == PM_STAGE1) + pmap_invalidate_range( + pmap, va, sva, + true); va = eva; } rw_wunlock(*lockp); @@ -3330,8 +3356,15 @@ if (va == eva) va = sva; } - if (va != eva) + if (pmap->pm_stage == PM_STAGE1 && va != eva) { pmap_invalidate_range(pmap, va, sva, true); + } else if (pmap->pm_stage == PM_STAGE2) { + /* + * Invalidate all entries rather than as we remove them + * as it may involve a call into EL2 + */ + pmap_s2_invalidate_all(pmap); + } } /* @@ -5475,7 +5508,10 @@ } if (lock != NULL) rw_wunlock(lock); - pmap_invalidate_all(pmap); + if (pmap->pm_stage == PM_STAGE1) + pmap_invalidate_all(pmap); + else + pmap_s2_invalidate_all(pmap); free_pv_chunk_batch(free_chunks); PMAP_UNLOCK(pmap); vm_page_free_pages_toq(&free, true); @@ -5800,7 +5836,10 @@ (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 && (tpte & ATTR_SW_WIRED) == 0) { pmap_clear_bits(pte, ATTR_AF); - pmap_invalidate_page(pmap, va, true); + if (pmap->pm_stage == PM_STAGE1) + pmap_invalidate_page(pmap, va, true); + else + pmap_s2_invalidate_page(pmap, va, true); cleared++; } else not_cleared++; @@ -5841,7 +5880,10 @@ if ((tpte & ATTR_AF) != 0) { if ((tpte & ATTR_SW_WIRED) == 0) { pmap_clear_bits(pte, ATTR_AF); - pmap_invalidate_page(pmap, pv->pv_va, true); + if (pmap->pm_stage == PM_STAGE1) + pmap_invalidate_page(pmap, va, true); + else + pmap_s2_invalidate_page(pmap, va, true); cleared++; } else not_cleared++;