diff --git a/sys/arm64/arm64/efirt_machdep.c b/sys/arm64/arm64/efirt_machdep.c --- a/sys/arm64/arm64/efirt_machdep.c +++ b/sys/arm64/arm64/efirt_machdep.c @@ -214,7 +214,7 @@ p->md_phys, mode, p->md_pages); } - l3_attr = ATTR_DEFAULT | ATTR_S1_IDX(mode) | + l3_attr = pmap_sh_attr | ATTR_DEFAULT | ATTR_S1_IDX(mode) | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_nG | L3_PAGE; if (mode == VM_MEMATTR_DEVICE || p->md_attr & EFI_MD_ATTR_XP) l3_attr |= ATTR_S1_XN; diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S --- a/sys/arm64/arm64/locore.S +++ b/sys/arm64/arm64/locore.S @@ -86,6 +86,7 @@ * x27 = TTBR0 table * x26 = Kernel L1 table * x24 = TTBR1 table + * x22 = PTE shareability attributes */ /* Enable the mmu */ @@ -136,6 +137,10 @@ str x23, [x0, #BP_BOOT_EL] str x4, [x0, #BP_HCR_EL2] + /* Set this before it's used in kasan_init_early */ + adrp x1, pmap_sh_attr + str x22, [x1, :lo12:pmap_sh_attr] + #ifdef KASAN /* Save bootparams */ mov x19, x0 @@ -462,6 +467,30 @@ cmp x6, x27 b.lo 1b + /* + * Find the shareability attribute we should use. If FEAT_LPA2 is + * enabled then the shareability field is moved from the page table + * to tcr_el1 and the bits in the page table are reused by the + * address field. + */ +#if PAGE_SIZE == PAGE_SIZE_4K +#define LPA2_MASK ID_AA64MMFR0_TGran4_MASK +#define LPA2_VAL ID_AA64MMFR0_TGran4_LPA2 +#elif PAGE_SIZE == PAGE_SIZE_16K +#define LPA2_MASK ID_AA64MMFR0_TGran16_MASK +#define LPA2_VAL ID_AA64MMFR0_TGran16_LPA2 +#else +#error Unsupported page size +#endif + mrs x6, id_aa64mmfr0_el1 + mov x7, LPA2_VAL + and x6, x6, LPA2_MASK + cmp x6, x7 + ldr x22, =(ATTR_SH(ATTR_SH_IS)) + csel x22, xzr, x22, eq +#undef LPA2_MASK +#undef LPA2_VAL + /* * Build the TTBR1 maps. */ @@ -738,6 +767,8 @@ #ifdef __ARM_FEATURE_BTI_DEFAULT orr x12, x12, #(ATTR_S1_GP) #endif + /* Set the shareability attribute */ + orr x12, x12, x22 /* Only use the output address bits */ lsr x9, x9, #L2_SHIFT @@ -814,6 +845,8 @@ #ifdef __ARM_FEATURE_BTI_DEFAULT orr x12, x12, #(ATTR_S1_GP) #endif + /* Set the shareability attribute */ + orr x12, x12, x22 /* Only use the output address bits */ lsr x9, x9, #L3_SHIFT @@ -872,6 +905,13 @@ * to 1 only if the ASIDBits field equals 0b0010. */ ldr x2, tcr + + /* If x22 contains a non-zero value then LPA2 is not implemented */ + cbnz x22, .Lno_lpa2 + ldr x3, =(TCR_DS) + orr x2, x2, x3 +.Lno_lpa2: + mrs x3, id_aa64mmfr0_el1 /* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */ diff --git a/sys/arm64/arm64/minidump_machdep.c b/sys/arm64/arm64/minidump_machdep.c --- a/sys/arm64/arm64/minidump_machdep.c +++ b/sys/arm64/arm64/minidump_machdep.c @@ -310,8 +310,8 @@ for (i = 0; i < Ln_ENTRIES; i++) { for (j = 0; j < Ln_ENTRIES; j++) { tmpbuffer[j] = (pa + i * L2_SIZE + - j * PAGE_SIZE) | ATTR_DEFAULT | - L3_PAGE; + j * PAGE_SIZE) | pmap_sh_attr | + ATTR_DEFAULT | L3_PAGE; } error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); @@ -330,7 +330,7 @@ /* Generate fake l3 entries based upon the l1 entry */ for (i = 0; i < Ln_ENTRIES; i++) { tmpbuffer[i] = (pa + i * PAGE_SIZE) | - ATTR_DEFAULT | L3_PAGE; + pmap_sh_attr | ATTR_DEFAULT | L3_PAGE; } error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -355,6 +355,8 @@ static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "VM/pmap parameters"); +pt_entry_t pmap_sh_attr = ATTR_SH(ATTR_SH_IS); + #if PAGE_SIZE == PAGE_SIZE_4K #define L1_BLOCKS_SUPPORTED 1 #else @@ -1150,7 +1152,7 @@ MPASS((state->pa & L2_OFFSET) == 0); MPASS(state->l2[l2_slot] == 0); pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(state->pa) | - ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | + pmap_sh_attr | ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | contig | L2_BLOCK); } MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS)); @@ -1200,7 +1202,7 @@ MPASS((state->pa & L3_OFFSET) == 0); MPASS(state->l3[l3_slot] == 0); pmap_store(&state->l3[l3_slot], PHYS_TO_PTE(state->pa) | - ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | + pmap_sh_attr | ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | contig | L3_PAGE); } MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS)); @@ -1242,7 +1244,8 @@ MPASS((bs_state.pa & L1_OFFSET) == 0); pmap_store( &bs_state.l1[pmap_l1_index(bs_state.va)], - PHYS_TO_PTE(bs_state.pa) | ATTR_DEFAULT | + PHYS_TO_PTE(bs_state.pa) | + pmap_sh_attr | ATTR_DEFAULT | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_XN | ATTR_KERN_GP | L1_BLOCK); } @@ -1400,7 +1403,8 @@ bzero((void *)PHYS_TO_DMAP(pa), L2_SIZE); physmem_exclude_region(pa, L2_SIZE, EXFLAG_NOALLOC); - pmap_store(l2, PHYS_TO_PTE(pa) | PMAP_SAN_PTE_BITS | L2_BLOCK); + pmap_store(l2, PHYS_TO_PTE(pa) | + pmap_sh_attr | PMAP_SAN_PTE_BITS | L2_BLOCK); } *vap = va; } @@ -2111,8 +2115,8 @@ KASSERT((size & PAGE_MASK) == 0, ("pmap_kenter: Mapping is not page-sized")); - attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN | - ATTR_KERN_GP | ATTR_S1_IDX(mode); + attr = pmap_sh_attr | ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | + ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(mode); old_l3e = 0; va = sva; while (size != 0) { @@ -2326,7 +2330,8 @@ ("pmap_qenter: Invalid level %d", lvl)); m = ma[i]; - attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN | + attr = pmap_sh_attr | ATTR_DEFAULT | + ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE; pte = pmap_l2_to_l3(pde, va); old_l3e |= pmap_load_store(pte, VM_PAGE_TO_PTE(m) | attr); @@ -5122,7 +5127,8 @@ if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); pa = VM_PAGE_TO_PHYS(m); - new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_DEFAULT | L3_PAGE); + new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | pmap_sh_attr | ATTR_DEFAULT | + L3_PAGE); new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr); new_l3 |= pmap_pte_prot(pmap, prot); if ((flags & PMAP_ENTER_WIRED) != 0) @@ -5465,7 +5471,7 @@ KASSERT(ADDR_IS_CANONICAL(va), ("%s: Address not in canonical form: %lx", __func__, va)); - new_l2 = (pd_entry_t)(VM_PAGE_TO_PTE(m) | ATTR_DEFAULT | + new_l2 = (pd_entry_t)(VM_PAGE_TO_PTE(m) | pmap_sh_attr | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | L2_BLOCK); if ((m->oflags & VPO_UNMANAGED) == 0) { @@ -5694,7 +5700,7 @@ KASSERT(ADDR_IS_CANONICAL(va), ("%s: Address not in canonical form: %lx", __func__, va)); - l3e = VM_PAGE_TO_PTE(m) | ATTR_DEFAULT | + l3e = VM_PAGE_TO_PTE(m) | pmap_sh_attr | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_CONTIGUOUS | L3_PAGE; if ((m->oflags & VPO_UNMANAGED) == 0) { @@ -6091,8 +6097,8 @@ pmap_resident_count_inc(pmap, 1); pa = VM_PAGE_TO_PHYS(m); - l3_val = PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) | - ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE; + l3_val = PHYS_TO_PTE(pa) | pmap_sh_attr | ATTR_DEFAULT | + ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE; l3_val |= pmap_pte_bti(pmap, va); if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == VM_MEMATTR_DEVICE) @@ -7741,7 +7747,8 @@ /* Insert L2_BLOCK */ l2 = pmap_l1_to_l2(pde, va); old_l2e |= pmap_load_store(l2, - PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_XN | + PHYS_TO_PTE(pa) | pmap_sh_attr | + ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK); @@ -9576,7 +9583,7 @@ block = pmap_san_enter_bootstrap_alloc_l2(); pmap_store(&l2[slot], PHYS_TO_PTE(pmap_early_vtophys(block)) | - PMAP_SAN_PTE_BITS | L2_BLOCK); + pmap_sh_attr | PMAP_SAN_PTE_BITS | L2_BLOCK); dmb(ishst); } @@ -9594,7 +9601,7 @@ if ((pmap_load(l2) & ATTR_DESCR_VALID) == 0) { m = pmap_san_enter_alloc_l2(); if (m != NULL) { - pmap_store(l2, VM_PAGE_TO_PTE(m) | + pmap_store(l2, VM_PAGE_TO_PTE(m) | pmap_sh_attr | PMAP_SAN_PTE_BITS | L2_BLOCK); } else { m = pmap_san_enter_alloc_l3(); @@ -9608,7 +9615,8 @@ if ((pmap_load(l3) & ATTR_DESCR_VALID) != 0) return; m = pmap_san_enter_alloc_l3(); - pmap_store(l3, VM_PAGE_TO_PTE(m) | PMAP_SAN_PTE_BITS | L3_PAGE); + pmap_store(l3, VM_PAGE_TO_PTE(m) | pmap_sh_attr | PMAP_SAN_PTE_BITS | + L3_PAGE); dmb(ishst); } #endif /* KASAN || KMSAN */ diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h --- a/sys/arm64/include/pmap.h +++ b/sys/arm64/include/pmap.h @@ -127,6 +127,8 @@ extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; +extern pt_entry_t pmap_sh_attr; + /* * Macros to test if a mapping is mappable with an L1 Section mapping * or an L2 Large Page mapping. diff --git a/sys/arm64/include/pte.h b/sys/arm64/include/pte.h --- a/sys/arm64/include/pte.h +++ b/sys/arm64/include/pte.h @@ -111,7 +111,7 @@ #define ATTR_S2_MEMATTR_WT 0xa #define ATTR_S2_MEMATTR_WB 0xf -#define ATTR_DEFAULT (ATTR_AF | ATTR_SH(ATTR_SH_IS)) +#define ATTR_DEFAULT (ATTR_AF) #define ATTR_DESCR_MASK 3 #define ATTR_DESCR_VALID 1