diff --git a/sys/arm64/arm64/efirt_machdep.c b/sys/arm64/arm64/efirt_machdep.c --- a/sys/arm64/arm64/efirt_machdep.c +++ b/sys/arm64/arm64/efirt_machdep.c @@ -214,7 +214,7 @@ p->md_phys, mode, p->md_pages); } - l3_attr = ATTR_DEFAULT | ATTR_S1_IDX(mode) | + l3_attr = ATTR_AF | ATTR_SH(ATTR_SH_IS) | ATTR_S1_IDX(mode) | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_nG | L3_PAGE; if (mode == VM_MEMATTR_DEVICE || p->md_attr & EFI_MD_ATTR_XP) l3_attr |= ATTR_S1_XN; diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S --- a/sys/arm64/arm64/locore.S +++ b/sys/arm64/arm64/locore.S @@ -747,7 +747,7 @@ /* Build the L2 block entry */ orr x12, x7, #L2_BLOCK - orr x12, x12, #(ATTR_DEFAULT) + orr x12, x12, #(ATTR_AF | ATTR_SH(ATTR_SH_IS)) orr x12, x12, #(ATTR_S1_UXN) #ifdef __ARM_FEATURE_BTI_DEFAULT orr x12, x12, #(ATTR_S1_GP) @@ -823,7 +823,7 @@ /* Build the L3 page entry */ orr x12, x7, #L3_PAGE - orr x12, x12, #(ATTR_DEFAULT) + orr x12, x12, #(ATTR_AF | ATTR_SH(ATTR_SH_IS)) orr x12, x12, #(ATTR_S1_UXN) #ifdef __ARM_FEATURE_BTI_DEFAULT orr x12, x12, #(ATTR_S1_GP) diff --git a/sys/arm64/arm64/minidump_machdep.c b/sys/arm64/arm64/minidump_machdep.c --- a/sys/arm64/arm64/minidump_machdep.c +++ b/sys/arm64/arm64/minidump_machdep.c @@ -310,8 +310,8 @@ for (i = 0; i < Ln_ENTRIES; i++) { for (j = 0; j < Ln_ENTRIES; j++) { tmpbuffer[j] = (pa + i * L2_SIZE + - j * PAGE_SIZE) | ATTR_DEFAULT | - L3_PAGE; + j * PAGE_SIZE) | ATTR_AF | + ATTR_SH(ATTR_SH_IS) | L3_PAGE; } error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); @@ -330,7 +330,7 @@ /* Generate fake l3 entries based upon the l1 entry */ for (i = 0; i < Ln_ENTRIES; i++) { tmpbuffer[i] = (pa + i * PAGE_SIZE) | - ATTR_DEFAULT | L3_PAGE; + ATTR_AF | ATTR_SH(ATTR_SH_IS) | L3_PAGE; } error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -185,8 +185,8 @@ #else #define ATTR_KERN_GP 0 #endif -#define PMAP_SAN_PTE_BITS (ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | \ - ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW)) +#define PMAP_SAN_PTE_BITS (ATTR_AF | ATTR_SH(ATTR_SH_IS) | ATTR_S1_XN | \ + ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW)) struct pmap_large_md_page { struct rwlock pv_lock; @@ -1150,7 +1150,7 @@ MPASS((state->pa & L2_OFFSET) == 0); MPASS(state->l2[l2_slot] == 0); pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(state->pa) | - ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | + ATTR_AF | ATTR_SH(ATTR_SH_IS) | ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | contig | L2_BLOCK); } MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS)); @@ -1200,7 +1200,7 @@ MPASS((state->pa & L3_OFFSET) == 0); MPASS(state->l3[l3_slot] == 0); pmap_store(&state->l3[l3_slot], PHYS_TO_PTE(state->pa) | - ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | + ATTR_AF | ATTR_SH(ATTR_SH_IS) | ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | contig | L3_PAGE); } MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS)); @@ -1242,7 +1242,8 @@ MPASS((bs_state.pa & L1_OFFSET) == 0); pmap_store( &bs_state.l1[pmap_l1_index(bs_state.va)], - PHYS_TO_PTE(bs_state.pa) | ATTR_DEFAULT | + PHYS_TO_PTE(bs_state.pa) | ATTR_AF | + ATTR_SH(ATTR_SH_IS) | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_XN | ATTR_KERN_GP | L1_BLOCK); } @@ -2111,8 +2112,8 @@ KASSERT((size & PAGE_MASK) == 0, ("pmap_kenter: Mapping is not page-sized")); - attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN | - ATTR_KERN_GP | ATTR_S1_IDX(mode); + attr = ATTR_AF | ATTR_SH(ATTR_SH_IS) | ATTR_S1_AP(ATTR_S1_AP_RW) | + ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(mode); old_l3e = 0; va = sva; while (size != 0) { @@ -2326,7 +2327,8 @@ ("pmap_qenter: Invalid level %d", lvl)); m = ma[i]; - attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN | + attr = ATTR_AF | ATTR_SH(ATTR_SH_IS) | + ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE; pte = pmap_l2_to_l3(pde, va); old_l3e |= pmap_load_store(pte, VM_PAGE_TO_PTE(m) | attr); @@ -5122,7 +5124,8 @@ if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); pa = VM_PAGE_TO_PHYS(m); - new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_DEFAULT | L3_PAGE); + new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_AF | ATTR_SH(ATTR_SH_IS) | + L3_PAGE); new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr); new_l3 |= pmap_pte_prot(pmap, prot); if ((flags & PMAP_ENTER_WIRED) != 0) @@ -5465,13 +5468,13 @@ KASSERT(ADDR_IS_CANONICAL(va), ("%s: Address not in canonical form: %lx", __func__, va)); - new_l2 = (pd_entry_t)(VM_PAGE_TO_PTE(m) | ATTR_DEFAULT | + new_l2 = (pd_entry_t)(VM_PAGE_TO_PTE(m) | ATTR_SH(ATTR_SH_IS) | ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | L2_BLOCK); - if ((m->oflags & VPO_UNMANAGED) == 0) { + if ((m->oflags & VPO_UNMANAGED) == 0) new_l2 |= ATTR_SW_MANAGED; - new_l2 &= ~ATTR_AF; - } + else + new_l2 |= ATTR_AF; if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == VM_MEMATTR_DEVICE) new_l2 |= ATTR_S1_XN; @@ -5694,13 +5697,13 @@ KASSERT(ADDR_IS_CANONICAL(va), ("%s: Address not in canonical form: %lx", __func__, va)); - l3e = VM_PAGE_TO_PTE(m) | ATTR_DEFAULT | + l3e = VM_PAGE_TO_PTE(m) | ATTR_SH(ATTR_SH_IS) | ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_CONTIGUOUS | L3_PAGE; - if ((m->oflags & VPO_UNMANAGED) == 0) { + if ((m->oflags & VPO_UNMANAGED) == 0) l3e |= ATTR_SW_MANAGED; - l3e &= ~ATTR_AF; - } + else + l3e |= ATTR_AF; if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == VM_MEMATTR_DEVICE) l3e |= ATTR_S1_XN; @@ -6091,8 +6094,8 @@ pmap_resident_count_inc(pmap, 1); pa = VM_PAGE_TO_PHYS(m); - l3_val = PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) | - ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE; + l3_val = PHYS_TO_PTE(pa) | ATTR_SH(ATTR_SH_IS) | + ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE; l3_val |= pmap_pte_bti(pmap, va); if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == VM_MEMATTR_DEVICE) @@ -6107,10 +6110,10 @@ /* * Now validate mapping with RO protection */ - if ((m->oflags & VPO_UNMANAGED) == 0) { + if ((m->oflags & VPO_UNMANAGED) == 0) l3_val |= ATTR_SW_MANAGED; - l3_val &= ~ATTR_AF; - } + else + l3_val |= ATTR_AF; /* Sync icache before the mapping is stored to PTE */ if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap && @@ -7741,9 +7744,9 @@ /* Insert L2_BLOCK */ l2 = pmap_l1_to_l2(pde, va); old_l2e |= pmap_load_store(l2, - PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_XN | - ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | - L2_BLOCK); + PHYS_TO_PTE(pa) | ATTR_AF | ATTR_SH(ATTR_SH_IS) | + ATTR_S1_XN | ATTR_KERN_GP | + ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK); va += L2_SIZE; pa += L2_SIZE; diff --git a/sys/arm64/include/pte.h b/sys/arm64/include/pte.h --- a/sys/arm64/include/pte.h +++ b/sys/arm64/include/pte.h @@ -111,8 +111,6 @@ #define ATTR_S2_MEMATTR_WT 0xa #define ATTR_S2_MEMATTR_WB 0xf -#define ATTR_DEFAULT (ATTR_AF | ATTR_SH(ATTR_SH_IS)) - #define ATTR_DESCR_MASK 3 #define ATTR_DESCR_VALID 1 #define ATTR_DESCR_TYPE_MASK 2 diff --git a/sys/arm64/iommu/iommu_pmap.c b/sys/arm64/iommu/iommu_pmap.c --- a/sys/arm64/iommu/iommu_pmap.c +++ b/sys/arm64/iommu/iommu_pmap.c @@ -708,7 +708,7 @@ KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space")); va = trunc_page(va); - new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | + new_l3 = (pt_entry_t)(pa | ATTR_AF | ATTR_SH(ATTR_SH_IS) | ATTR_S1_IDX(VM_MEMATTR_DEVICE) | IOMMU_L3_PAGE); if ((prot & VM_PROT_WRITE) == 0) new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO); diff --git a/sys/arm64/vmm/vmm_mmu.c b/sys/arm64/vmm/vmm_mmu.c --- a/sys/arm64/vmm/vmm_mmu.c +++ b/sys/arm64/vmm/vmm_mmu.c @@ -294,7 +294,7 @@ KASSERT((size & PAGE_MASK) == 0, ("%s: Mapping is not page-sized", __func__)); - l3e = ATTR_DEFAULT | L3_PAGE; + l3e = ATTR_AF | ATTR_SH(ATTR_SH_IS) | L3_PAGE; /* This bit is res1 at EL2 */ l3e |= ATTR_S1_AP(ATTR_S1_AP_USER); /* Only normal memory is used at EL2 */