Index: head/sys/arm64/arm64/locore.S =================================================================== --- head/sys/arm64/arm64/locore.S +++ head/sys/arm64/arm64/locore.S @@ -34,6 +34,7 @@ #include #include #include +#include #include #define VIRT_BITS 48 @@ -42,10 +43,6 @@ .globl kernbase .set kernbase, KERNBASE -#define DEVICE_MEM 0 -#define NORMAL_UNCACHED 1 -#define NORMAL_MEM 2 - /* * We assume: * MMU on with an identity map, or off @@ -396,7 +393,7 @@ /* Create the kernel space L2 table */ mov x6, x26 - mov x7, #NORMAL_MEM + mov x7, #VM_MEMATTR_WRITE_BACK mov x8, #(KERNBASE & L2_BLOCK_MASK) mov x9, x28 bl build_l2_block_pagetable @@ -433,15 +430,17 @@ mov x6, x27 /* The initial page table */ #if defined(SOCDEV_PA) && defined(SOCDEV_VA) /* Create a table for the UART */ - mov x7, #(ATTR_nG | ATTR_IDX(DEVICE_MEM)) + mov x7, #(ATTR_nG | ATTR_IDX(VM_MEMATTR_DEVICE)) mov x8, #(SOCDEV_VA) /* VA start */ mov x9, #(SOCDEV_PA) /* PA start */ mov x10, #1 bl build_l1_block_pagetable #endif - /* Create the VA = PA map */ - mov x7, #(ATTR_nG | ATTR_IDX(NORMAL_UNCACHED)) + /* + * Create the VA = PA map + */ + mov x7, #(ATTR_nG | ATTR_IDX(VM_MEMATTR_UNCACHEABLE)) mov x9, x27 mov x8, x9 /* VA start (== PA start) */ mov x10, #1 @@ -658,10 +657,10 @@ .align 3 mair: - .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, 0) | \ - MAIR_ATTR(MAIR_NORMAL_NC, 1) | \ - MAIR_ATTR(MAIR_NORMAL_WB, 2) | \ - MAIR_ATTR(MAIR_NORMAL_WT, 3) + .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE) | \ + MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE) | \ + MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK) | \ + MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH) tcr: .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG1_4K | \ TCR_CACHE_ATTRS | TCR_SMP_ATTRS) Index: head/sys/arm64/arm64/pmap.c =================================================================== --- head/sys/arm64/arm64/pmap.c +++ head/sys/arm64/arm64/pmap.c @@ -169,14 +169,6 @@ #define PMAP_INLINE #endif -/* - * These are configured by the mair_el1 register. This is set up in locore.S - */ -#define DEVICE_MEMORY 0 -#define UNCACHED_MEMORY 1 -#define CACHED_MEMORY 2 - - #ifdef PV_STATS #define PV_STAT(x) do { x ; } while (0) #else @@ -707,7 +699,7 @@ KASSERT(l2_slot != 0, ("...")); pmap_store(&l2[l2_slot], (pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN | - ATTR_IDX(CACHED_MEMORY) | L2_BLOCK); + ATTR_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK); } KASSERT(va == (pa - dmap_phys_base + DMAP_MIN_ADDRESS), ("...")); @@ -719,7 +711,7 @@ l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT); pmap_store(&pagetable_dmap[l1_slot], (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_XN | - ATTR_IDX(CACHED_MEMORY) | L1_BLOCK); + ATTR_IDX(VM_MEMATTR_WRITE_BACK) | L1_BLOCK); } /* Create L2 mappings at the end of the region */ @@ -744,7 +736,7 @@ l2_slot = pmap_l2_index(va); pmap_store(&l2[l2_slot], (pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN | - ATTR_IDX(CACHED_MEMORY) | L2_BLOCK); + ATTR_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK); } } @@ -1268,7 +1260,7 @@ pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa) { - pmap_kenter(sva, size, pa, DEVICE_MEMORY); + pmap_kenter(sva, size, pa, VM_MEMATTR_DEVICE); } /* @@ -3275,7 +3267,8 @@ L3_PAGE); if ((prot & VM_PROT_WRITE) == 0) new_l3 |= ATTR_AP(ATTR_AP_RO); - if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY) + if ((prot & VM_PROT_EXECUTE) == 0 || + m->md.pv_memattr == VM_MEMATTR_DEVICE) new_l3 |= ATTR_XN; if ((flags & PMAP_ENTER_WIRED) != 0) new_l3 |= ATTR_SW_WIRED; @@ -3543,7 +3536,8 @@ new_l2 |= ATTR_SW_MANAGED; new_l2 &= ~ATTR_AF; } - if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY) + if ((prot & VM_PROT_EXECUTE) == 0 || + m->md.pv_memattr == VM_MEMATTR_DEVICE) new_l2 |= ATTR_XN; if (va < VM_MAXUSER_ADDRESS) new_l2 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN; @@ -3845,7 +3839,8 @@ pa = VM_PAGE_TO_PHYS(m); l3_val = pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) | ATTR_AP(ATTR_AP_RO) | L3_PAGE; - if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY) + if ((prot & VM_PROT_EXECUTE) == 0 || + m->md.pv_memattr == VM_MEMATTR_DEVICE) l3_val |= ATTR_XN; if (va < VM_MAXUSER_ADDRESS) l3_val |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN; @@ -5223,7 +5218,7 @@ l2 = pmap_l1_to_l2(pde, va); pmap_load_store(l2, pa | ATTR_DEFAULT | ATTR_XN | - ATTR_IDX(CACHED_MEMORY) | L2_BLOCK); + ATTR_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK); va += L2_SIZE; pa += L2_SIZE; @@ -5247,7 +5242,7 @@ /* L3 table is linked */ va = trunc_page(va); pa = trunc_page(pa); - pmap_kenter(va, size, pa, CACHED_MEMORY); + pmap_kenter(va, size, pa, VM_MEMATTR_WRITE_BACK); } return ((void *)(va + offset)); @@ -5433,7 +5428,7 @@ l3 = pmap_load(pte); l3 &= ~ATTR_IDX_MASK; l3 |= ATTR_IDX(mode); - if (mode == DEVICE_MEMORY) + if (mode == VM_MEMATTR_DEVICE) l3 |= ATTR_XN; pmap_update_entry(kernel_pmap, pte, l3, tmpva, @@ -5509,7 +5504,8 @@ if (tmpl1 != 0) { pmap_kenter(tmpl1, PAGE_SIZE, - DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET, CACHED_MEMORY); + DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET, + VM_MEMATTR_WRITE_BACK); l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK)); } @@ -5651,7 +5647,8 @@ */ if (tmpl2 != 0) { pmap_kenter(tmpl2, PAGE_SIZE, - DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET, CACHED_MEMORY); + DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET, + VM_MEMATTR_WRITE_BACK); l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK)); }