Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -153,6 +153,8 @@ #define PMAP_ASSERT_STAGE1(pmap) MPASS((pmap)->pm_stage == PM_STAGE1) #define PMAP_ASSERT_STAGE2(pmap) MPASS((pmap)->pm_stage == PM_STAGE2) +#define PMAP_ASSERT_ALIVE(pmap) MPASS((pmap)->pm_dead == FALSE) + #define NL0PG (PAGE_SIZE/(sizeof (pd_entry_t))) #define NL1PG (PAGE_SIZE/(sizeof (pd_entry_t))) #define NL2PG (PAGE_SIZE/(sizeof (pd_entry_t))) @@ -1177,6 +1179,9 @@ { uint64_t r; + if (pmap->pm_dead == TRUE) + return; + PMAP_ASSERT_STAGE1(pmap); dsb(ishst); @@ -1196,6 +1201,9 @@ { uint64_t end, r, start; + if (pmap->pm_dead == TRUE) + return; + PMAP_ASSERT_STAGE1(pmap); dsb(ishst); @@ -1220,6 +1228,9 @@ { uint64_t r; + if (pmap->pm_dead == TRUE) + return; + PMAP_ASSERT_STAGE1(pmap); dsb(ishst); @@ -1736,6 +1747,7 @@ PMAP_LOCK_INIT(pmap); bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); + pmap->pm_dead = FALSE; pmap->pm_l0_paddr = READ_SPECIALREG(ttbr0_el1); pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr); pmap->pm_root.rt_root = 0; @@ -1753,6 +1765,8 @@ { vm_page_t m; + pmap->pm_dead = FALSE; + /* * allocate the l0 page */ @@ -1810,6 +1824,18 @@ return (pmap_pinit_stage(pmap, PM_STAGE1, 4)); } +/* + * This can be called before destroying a pmap so the tlb management can + * be skipped until the ASID is reused. + */ +void +pmap_pre_destroy(pmap_t pmap) +{ + KASSERT(pmap != kernel_pmap, + ("pmap_pre_destroy: Can't mark the kernel pmap as dead")); + pmap->pm_dead = TRUE; +} + /* * This routine is called if the desired page table page does not exist. * @@ -3593,6 +3619,7 @@ vm_page_t mp; PMAP_LOCK_ASSERT(pmap, MA_OWNED); + PMAP_ASSERT_ALIVE(pmap); KASSERT(psind > 0 && psind < MAXPAGESIZES, ("psind %d unexpected", psind)); KASSERT(((newpte & ~ATTR_MASK) & (pagesizes[psind] - 1)) == 0, @@ -3701,6 +3728,8 @@ boolean_t nosleep; int lvl, rv; + PMAP_ASSERT_ALIVE(pmap); + va = trunc_page(va); if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); @@ -4023,6 +4052,7 @@ PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_ASSERT_STAGE1(pmap); + PMAP_ASSERT_ALIVE(pmap); new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | @@ -4083,6 +4113,7 @@ vm_page_t l2pg, mt; PMAP_LOCK_ASSERT(pmap, MA_OWNED); + PMAP_ASSERT_ALIVE(pmap); if ((l2 = pmap_alloc_l2(pmap, va, &l2pg, (flags & PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) { @@ -4194,6 +4225,7 @@ vm_pindex_t diff, psize; VM_OBJECT_ASSERT_LOCKED(m_start->object); + PMAP_ASSERT_ALIVE(pmap); psize = atop(end - start); mpte = NULL; @@ -4231,6 +4263,7 @@ struct rwlock *lock; lock = NULL; + PMAP_ASSERT_ALIVE(pmap); PMAP_LOCK(pmap); (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock); if (lock != NULL) @@ -4252,6 +4285,7 @@ ("pmap_enter_quick_locked: managed mapping within the clean submap")); PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_ASSERT_STAGE1(pmap); + PMAP_ASSERT_ALIVE(pmap); CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va); /* Index: sys/arm64/include/pmap.h =================================================================== --- sys/arm64/include/pmap.h +++ sys/arm64/include/pmap.h @@ -94,6 +94,7 @@ struct asid_set *pm_asid_set; /* The ASID/VMID set to use */ enum pmap_stage pm_stage; int pm_levels; + int pm_dead; }; typedef struct pmap *pmap_t; @@ -174,6 +175,7 @@ void *pmap_mapdev_attr(vm_offset_t pa, vm_size_t size, vm_memattr_t ma); bool pmap_page_is_mapped(vm_page_t m); int pmap_pinit_stage(pmap_t, enum pmap_stage, int); +void pmap_pre_destroy(pmap_t pmap); bool pmap_ps_enabled(pmap_t pmap); uint64_t pmap_to_ttbr0(pmap_t pmap);