Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -508,7 +508,8 @@ static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va); static uma_zone_t pmap_bti_ranges_zone; -static bool pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva); +static bool pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, + pt_entry_t *pte); static pt_entry_t pmap_pte_bti(pmap_t pmap, vm_offset_t va); static void pmap_bti_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva); static void *bti_dup_range(void *ctx, void *data); @@ -4955,21 +4956,22 @@ #endif /* VM_NRESERVLEVEL > 0 */ static int -pmap_enter_largepage(pmap_t pmap, vm_offset_t va, pt_entry_t newpte, int flags, +pmap_enter_largepage(pmap_t pmap, vm_offset_t va, pt_entry_t pte, int flags, int psind) { - pd_entry_t *l0p, *l1p, *l2p, origpte; + pd_entry_t *l0p, *l1p, *l2p, newpte, origpte; vm_page_t mp; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(psind > 0 && psind < MAXPAGESIZES, ("psind %d unexpected", psind)); - KASSERT((PTE_TO_PHYS(newpte) & (pagesizes[psind] - 1)) == 0, - ("unaligned phys address %#lx newpte %#lx psind %d", - PTE_TO_PHYS(newpte), newpte, psind)); + KASSERT((PTE_TO_PHYS(pte) & (pagesizes[psind] - 1)) == 0, + ("unaligned phys address %#lx pte %#lx psind %d", + PTE_TO_PHYS(pte), pte, psind)); restart: - if (!pmap_bti_same(pmap, va, va + pagesizes[psind])) + newpte = pte; + if (!pmap_bti_same(pmap, va, va + pagesizes[psind], &newpte)) return (KERN_PROTECTION_FAILURE); if (psind == 2) { PMAP_ASSERT_L1_BLOCKS_SUPPORTED; @@ -5123,9 +5125,6 @@ lock = NULL; PMAP_LOCK(pmap); - /* Wait until we lock the pmap to protect the bti rangeset */ - new_l3 |= pmap_pte_bti(pmap, va); - if ((flags & PMAP_ENTER_LARGEPAGE) != 0) { KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed largepage va %#lx flags %#x", va, flags)); @@ -5197,6 +5196,7 @@ orig_l3 = pmap_load(l3); opa = PTE_TO_PHYS(orig_l3); pv = NULL; + new_l3 |= pmap_pte_bti(pmap, va); /* * Is the specified virtual address already mapped? @@ -5478,7 +5478,7 @@ * and let vm_fault() cope. Check after l2 allocation, since * it could sleep. */ - if (!pmap_bti_same(pmap, va, va + L2_SIZE)) { + if (!pmap_bti_same(pmap, va, va + L2_SIZE, &new_l2)) { KASSERT(l2pg != NULL, ("pmap_enter_l2: missing L2 PTP")); pmap_abort_ptp(pmap, va, l2pg); return (KERN_PROTECTION_FAILURE); @@ -5733,19 +5733,6 @@ } } l3p = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*ml3p)); - -have_l3p: - /* - * If bti is not the same for the whole L3C range, return - * failure and let vm_fault() cope. Check after L3 allocation, - * since it could sleep. - */ - if (!pmap_bti_same(pmap, va, va + L3C_SIZE)) { - (*ml3p)->ref_count -= L3C_ENTRIES - 1; - pmap_abort_ptp(pmap, va, *ml3p); - *ml3p = NULL; - return (KERN_PROTECTION_FAILURE); - } } else { *ml3p = NULL; @@ -5768,8 +5755,21 @@ pmap_load(pde))); } } +have_l3p: l3p = &l3p[pmap_l3_index(va)]; + /* + * If bti is not the same for the whole L3C range, return failure + * and let vm_fault() cope. Check after L3 allocation, since + * it could sleep. + */ + if (!pmap_bti_same(pmap, va, va + L3C_SIZE, &l3e)) { + (*ml3p)->ref_count -= L3C_ENTRIES - 1; + pmap_abort_ptp(pmap, va, *ml3p); + *ml3p = NULL; + return (KERN_PROTECTION_FAILURE); + } + /* * If there are existing mappings, either abort or remove them. */ @@ -9271,8 +9271,16 @@ rangeset_remove_all(pmap->pm_bti); } +/* + * Returns true if the BTI setting is the same across the specified address + * range, and false otherwise. When returning true, updates the referenced PTE + * to reflect the BTI setting. + * + * Only stage 1 pmaps support BTI. The kernel pmap is always a stage 1 pmap + * that has the same BTI setting implicitly across its entire address range. + */ static bool -pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) +pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t *pte) { struct rs_el *prev_rs, *rs; vm_offset_t va; @@ -9283,9 +9291,14 @@ KASSERT(ADDR_IS_CANONICAL(eva), ("%s: End address not in canonical form: %lx", __func__, eva)); - if (pmap->pm_bti == NULL || ADDR_IS_KERNEL(sva)) + if (pmap == kernel_pmap) { + *pte |= ATTR_KERN_GP; + return (true); + } + if (pmap->pm_bti == NULL) return (true); - MPASS(!ADDR_IS_KERNEL(eva)); + PMAP_ASSERT_STAGE1(pmap); + prev_rs = NULL; for (va = sva; va < eva; prev_rs = rs) { rs = rangeset_lookup(pmap->pm_bti, va); if (va == sva) @@ -9298,6 +9311,8 @@ } va = rs->re_end; } + if (prev_rs != NULL) + *pte |= ATTR_S1_GP; return (true); }