Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -1222,11 +1222,35 @@ SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD, &pmap_l2_promotions, 0, "2MB page promotions"); +/* + * If the given value for "final_only" is false, then any cached intermediate- + * level entries, i.e., L{0,1,2}_TABLE entries, are invalidated in addition to + * any cached final-level entry, i.e., either an L{1,2}_BLOCK or L3_PAGE entry. + * Otherwise, just the cached final-level entry is invalidated. + */ +static __inline void +pmap_invalidate_kernel(uint64_t r, bool final_only) +{ + if (final_only) + __asm __volatile("tlbi vaale1is, %0" : : "r" (r)); + else + __asm __volatile("tlbi vaae1is, %0" : : "r" (r)); +} + +static __inline void +pmap_invalidate_user(uint64_t r, bool final_only) +{ + if (final_only) + __asm __volatile("tlbi vale1is, %0" : : "r" (r)); + else + __asm __volatile("tlbi vae1is, %0" : : "r" (r)); +} + /* * Invalidate a single TLB entry. */ static __inline void -pmap_invalidate_page(pmap_t pmap, vm_offset_t va) +pmap_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only) { uint64_t r; @@ -1235,17 +1259,18 @@ dsb(ishst); if (pmap == kernel_pmap) { r = atop(va); - __asm __volatile("tlbi vaae1is, %0" : : "r" (r)); + pmap_invalidate_kernel(r, final_only); } else { r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) | atop(va); - __asm __volatile("tlbi vae1is, %0" : : "r" (r)); + pmap_invalidate_user(r, final_only); } dsb(ish); isb(); } static __inline void -pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) +pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, + bool final_only) { uint64_t end, r, start; @@ -1256,13 +1281,13 @@ start = atop(sva); end = atop(eva); for (r = start; r < end; r++) - __asm __volatile("tlbi vaae1is, %0" : : "r" (r)); + pmap_invalidate_kernel(r, final_only); } else { start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); start |= atop(sva); end |= atop(eva); for (r = start; r < end; r++) - __asm __volatile("tlbi vae1is, %0" : : "r" (r)); + pmap_invalidate_user(r, final_only); } dsb(ish); isb(); @@ -1513,7 +1538,7 @@ pa += PAGE_SIZE; size -= PAGE_SIZE; } - pmap_invalidate_range(kernel_pmap, sva, va); + pmap_invalidate_range(kernel_pmap, sva, va, true); } void @@ -1533,7 +1558,7 @@ pte = pmap_pte_exists(kernel_pmap, va, 3, __func__); pmap_clear(pte); - pmap_invalidate_page(kernel_pmap, va); + pmap_invalidate_page(kernel_pmap, va, true); } void @@ -1555,7 +1580,7 @@ va += PAGE_SIZE; size -= PAGE_SIZE; } - pmap_invalidate_range(kernel_pmap, sva, va); + pmap_invalidate_range(kernel_pmap, sva, va, true); } /* @@ -1611,7 +1636,7 @@ va += L3_SIZE; } - pmap_invalidate_range(kernel_pmap, sva, va); + pmap_invalidate_range(kernel_pmap, sva, va, true); } /* @@ -1637,7 +1662,7 @@ va += PAGE_SIZE; } - pmap_invalidate_range(kernel_pmap, sva, va); + pmap_invalidate_range(kernel_pmap, sva, va, true); } /*************************************************** @@ -1725,7 +1750,7 @@ l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); pmap_unwire_l3(pmap, va, l1pg, free); } - pmap_invalidate_page(pmap, va); + pmap_invalidate_page(pmap, va, false); /* * Put page on a list so that it is released after @@ -1763,17 +1788,8 @@ struct spglist free; SLIST_INIT(&free); - if (pmap_unwire_l3(pmap, va, mpte, &free)) { - /* - * Although "va" was never mapped, the TLB could nonetheless - * have intermediate entries that refer to the freed page - * table pages. Invalidate those entries. - * - * XXX redundant invalidation (See _pmap_unwire_l3().) - */ - pmap_invalidate_page(pmap, va); + if (pmap_unwire_l3(pmap, va, mpte, &free)) vm_page_free_pages_toq(&free, true); - } } void @@ -2417,7 +2433,7 @@ if (pmap_pte_dirty(pmap, tpte)) vm_page_dirty(m); if ((tpte & ATTR_AF) != 0) { - pmap_invalidate_page(pmap, va); + pmap_invalidate_page(pmap, va, true); vm_page_aflag_set(m, PGA_REFERENCED); } CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); @@ -2898,7 +2914,7 @@ * Since a promotion must break the 4KB page mappings before making * the 2MB page mapping, a pmap_invalidate_page() suffices. */ - pmap_invalidate_page(pmap, sva); + pmap_invalidate_page(pmap, sva, true); if (old_l2 & ATTR_SW_WIRED) pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE; @@ -2948,7 +2964,7 @@ PMAP_LOCK_ASSERT(pmap, MA_OWNED); old_l3 = pmap_load_clear(l3); - pmap_invalidate_page(pmap, va); + pmap_invalidate_page(pmap, va, true); if (old_l3 & ATTR_SW_WIRED) pmap->pm_stats.wired_count -= 1; pmap_resident_count_dec(pmap, 1); @@ -2997,7 +3013,7 @@ for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) { if (!pmap_l3_valid(pmap_load(l3))) { if (va != eva) { - pmap_invalidate_range(pmap, va, sva); + pmap_invalidate_range(pmap, va, sva, true); va = eva; } continue; @@ -3025,7 +3041,7 @@ */ if (va != eva) { pmap_invalidate_range(pmap, va, - sva); + sva, true); va = eva; } rw_wunlock(*lockp); @@ -3049,7 +3065,7 @@ } } if (va != eva) - pmap_invalidate_range(pmap, va, sva); + pmap_invalidate_range(pmap, va, sva, true); } /* @@ -3104,7 +3120,7 @@ MPASS(pmap != kernel_pmap); MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0); pmap_clear(l1); - pmap_invalidate_page(pmap, sva); + pmap_invalidate_page(pmap, sva, true); pmap_resident_count_dec(pmap, L1_SIZE / PAGE_SIZE); pmap_unuse_pt(pmap, sva, pmap_load(l0), &free); continue; @@ -3235,7 +3251,7 @@ if (tpte & ATTR_SW_WIRED) pmap->pm_stats.wired_count--; if ((tpte & ATTR_AF) != 0) { - pmap_invalidate_page(pmap, pv->pv_va); + pmap_invalidate_page(pmap, pv->pv_va, true); vm_page_aflag_set(m, PGA_REFERENCED); } @@ -3300,7 +3316,7 @@ * Since a promotion must break the 4KB page mappings before making * the 2MB page mapping, a pmap_invalidate_page() suffices. */ - pmap_invalidate_page(pmap, sva); + pmap_invalidate_page(pmap, sva, true); } /* @@ -3357,7 +3373,7 @@ MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0); if ((pmap_load(l1) & mask) != nbits) { pmap_store(l1, (pmap_load(l1) & ~mask) | nbits); - pmap_invalidate_page(pmap, sva); + pmap_invalidate_page(pmap, sva, true); } continue; } @@ -3398,7 +3414,8 @@ */ if (!pmap_l3_valid(l3) || (l3 & mask) == nbits) { if (va != va_next) { - pmap_invalidate_range(pmap, va, sva); + pmap_invalidate_range(pmap, va, sva, + true); va = va_next; } continue; @@ -3421,7 +3438,7 @@ va = sva; } if (va != va_next) - pmap_invalidate_range(pmap, va, sva); + pmap_invalidate_range(pmap, va, sva, true); } PMAP_UNLOCK(pmap); } @@ -3483,7 +3500,13 @@ * lookup the physical address. */ pmap_clear_bits(pte, ATTR_DESCR_VALID); - pmap_invalidate_range(pmap, va, va + size); + + /* + * When promoting, the L{1,2}_TABLE entry that is being replaced might + * be cached, so we invalidate intermediate entries as well as final + * entries. + */ + pmap_invalidate_range(pmap, va, va + size, false); /* Create the new mapping */ pmap_store(pte, newpte); @@ -3937,7 +3960,7 @@ if (pmap_pte_dirty(pmap, orig_l3)) vm_page_dirty(om); if ((orig_l3 & ATTR_AF) != 0) { - pmap_invalidate_page(pmap, va); + pmap_invalidate_page(pmap, va, true); vm_page_aflag_set(om, PGA_REFERENCED); } CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); @@ -3952,7 +3975,7 @@ } else { KASSERT((orig_l3 & ATTR_AF) != 0, ("pmap_enter: unmanaged mapping lacks ATTR_AF")); - pmap_invalidate_page(pmap, va); + pmap_invalidate_page(pmap, va, true); } orig_l3 = 0; } else { @@ -4010,7 +4033,7 @@ if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) { /* same PA, different attributes */ orig_l3 = pmap_load_store(l3, new_l3); - pmap_invalidate_page(pmap, va); + pmap_invalidate_page(pmap, va, true); if ((orig_l3 & ATTR_SW_MANAGED) != 0 && pmap_pte_dirty(pmap, orig_l3)) vm_page_dirty(m); @@ -4179,13 +4202,15 @@ * Both pmap_remove_l2() and pmap_remove_l3_range() * will leave the kernel page table page zero filled. * Nonetheless, the TLB could have an intermediate - * entry for the kernel page table page. + * entry for the kernel page table page, so request + * an invalidation at all levels after clearing + * the L2_TABLE entry. */ mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); if (pmap_insert_pt_page(pmap, mt, false)) panic("pmap_enter_l2: trie insert failed"); pmap_clear(l2); - pmap_invalidate_page(pmap, va); + pmap_invalidate_page(pmap, va, false); } } @@ -5358,7 +5383,7 @@ if ((oldpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)) vm_page_dirty(m); - pmap_invalidate_page(pmap, pv->pv_va); + pmap_invalidate_page(pmap, pv->pv_va, true); } PMAP_UNLOCK(pmap); } @@ -5456,7 +5481,7 @@ (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 && (tpte & ATTR_SW_WIRED) == 0) { pmap_clear_bits(pte, ATTR_AF); - pmap_invalidate_page(pmap, va); + pmap_invalidate_page(pmap, va, true); cleared++; } else not_cleared++; @@ -5497,7 +5522,7 @@ if ((tpte & ATTR_AF) != 0) { if ((tpte & ATTR_SW_WIRED) == 0) { pmap_clear_bits(pte, ATTR_AF); - pmap_invalidate_page(pmap, pv->pv_va); + pmap_invalidate_page(pmap, pv->pv_va, true); cleared++; } else not_cleared++; @@ -5640,12 +5665,12 @@ continue; maybe_invlrng: if (va != va_next) { - pmap_invalidate_range(pmap, va, sva); + pmap_invalidate_range(pmap, va, sva, true); va = va_next; } } if (va != va_next) - pmap_invalidate_range(pmap, va, sva); + pmap_invalidate_range(pmap, va, sva, true); } PMAP_UNLOCK(pmap); } @@ -5706,7 +5731,7 @@ (oldl3 & ~ATTR_SW_DBM) | ATTR_S1_AP(ATTR_S1_AP_RO))) cpu_spinwait(); vm_page_dirty(m); - pmap_invalidate_page(pmap, va); + pmap_invalidate_page(pmap, va, true); } PMAP_UNLOCK(pmap); } @@ -5729,7 +5754,7 @@ oldl3 = pmap_load(l3); if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM){ pmap_set_bits(l3, ATTR_S1_AP(ATTR_S1_AP_RO)); - pmap_invalidate_page(pmap, pv->pv_va); + pmap_invalidate_page(pmap, pv->pv_va, true); } PMAP_UNLOCK(pmap); } @@ -6825,7 +6850,7 @@ if ((pte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RO)) { pmap_clear_bits(ptep, ATTR_S1_AP_RW_BIT); - pmap_invalidate_page(pmap, far); + pmap_invalidate_page(pmap, far, true); } rv = KERN_SUCCESS; }