Index: sys/arm64/arm64/machdep.c =================================================================== --- sys/arm64/arm64/machdep.c +++ sys/arm64/arm64/machdep.c @@ -32,6 +32,7 @@ #include #include +#include #include #include #include @@ -84,6 +85,12 @@ #include #endif +#define ARM64_PA_BITS 48 +#define PHYSMEM_MAP_SHIFT (L1_SHIFT) +#define PHYSMEM_SETSIZE (1 << (ARM64_PA_BITS - PHYSMEM_MAP_SHIFT)) +BITSET_DEFINE(physmem_bits, PHYSMEM_SETSIZE); +static struct physmem_bits physmem_bits; +struct direct_map_desc arm64_dmap_desc[MAX_DMAP_ENTRIES]; struct pcpu __pcpu[MAXCPU]; static struct trapframe proc0_tf; @@ -794,7 +801,11 @@ vm_offset_t lastaddr; caddr_t kmdp; vm_paddr_t mem_len; + vm_offset_t dmap_address; + int dmap_cnt; int i; + int on; + vm_paddr_t start, end; /* Set the module data location */ preload_metadata = (caddr_t)(uintptr_t)(abp->modulep); @@ -820,16 +831,72 @@ MODINFO_METADATA | MODINFOMD_EFI_MAP); add_efi_map_entries(efihdr, physmap, &physmap_idx); - /* Print the memory map */ + /* Load the memory map and prepare DMAP descriptor */ mem_len = 0; for (i = 0; i < physmap_idx; i += 2) { dump_avail[i] = physmap[i]; dump_avail[i + 1] = physmap[i + 1]; mem_len += physmap[i + 1] - physmap[i]; + + /* + * DMAP can be mapped only by chunks of size specified + * by PHYSMEM_MAP_SHIFT. Set appropriate bits in bitmap + * to indicate this physical range should be mapped. + */ + start = physmap[i] >> PHYSMEM_MAP_SHIFT; + end = ((physmap[i+1] - 1) >> PHYSMEM_MAP_SHIFT) + 1; + while (start < end) { + BIT_SET(PHYSMEM_SETSIZE, start, &physmem_bits); + start++; + } } dump_avail[i] = 0; dump_avail[i + 1] = 0; + /* Convert physmap data from bitmap to chunks */ + on = 0; + dmap_address = DMAP_MIN_ADDRESS; + dmap_cnt = 0; + for (i = 0; i < PHYSMEM_SETSIZE; i++) { + if (BIT_ISSET(PHYSMEM_SETSIZE, i, &physmem_bits) + && (on == 0)) { + on = 1; + start = i; + start = start << PHYSMEM_MAP_SHIFT; + } + if ((!BIT_ISSET(PHYSMEM_SETSIZE, i, &physmem_bits) + || (i == (PHYSMEM_SETSIZE - 1))) && (on == 1)) { + on = 0; + end = i; + end = (end << PHYSMEM_MAP_SHIFT); + + /* Create physical descriptor */ + + if (dmap_cnt >= MAX_DMAP_ENTRIES) + panic("Physmap is too fragmented to fit in DMAP"); + + arm64_dmap_desc[dmap_cnt].va_start = dmap_address; + arm64_dmap_desc[dmap_cnt].va_end = dmap_address + end - start - 1; + arm64_dmap_desc[dmap_cnt].pa_start = start; + arm64_dmap_desc[dmap_cnt].pa_end = end - 1; + arm64_dmap_desc[dmap_cnt].flags = DMAP_FLAG_VALID; + + if (dmap_address > DMAP_MAX_ADDRESS) + panic("Physmap is too large to fit DMAP"); + + if (bootverbose) + printf("PHYSMAP RANGE: " + "VA: %lx-%lx PA: %lx-%lx\n", + arm64_dmap_desc[dmap_cnt].va_start, + arm64_dmap_desc[dmap_cnt].va_end, + arm64_dmap_desc[dmap_cnt].pa_start, + arm64_dmap_desc[dmap_cnt].pa_end); + + dmap_address += end - start; + dmap_cnt++; + } + } + /* Set the pcpu data, this is needed by pmap_bootstrap */ pcpup = &__pcpu[0]; pcpu_init(pcpup, 0, sizeof(struct pcpu)); Index: sys/arm64/arm64/minidump_machdep.c =================================================================== --- sys/arm64/arm64/minidump_machdep.c +++ sys/arm64/arm64/minidump_machdep.c @@ -299,9 +299,8 @@ mdhdr.bitmapsize = vm_page_dump_size; mdhdr.pmapsize = pmapsize; mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS; - mdhdr.dmapphys = DMAP_MIN_PHYSADDR; - mdhdr.dmapbase = DMAP_MIN_ADDRESS; - mdhdr.dmapend = DMAP_MAX_ADDRESS; + memcpy(&mdhdr.dmap_desc, &arm64_dmap_desc, + sizeof(struct direct_map_desc) * MAX_DMAP_ENTRIES); mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_AARCH64_VERSION, dumpsize, di->blocksize); Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -452,14 +452,20 @@ vm_paddr_t pa; pd_entry_t *l1; u_int l1_slot; + struct direct_map_desc *desc; + int a; - va = DMAP_MIN_ADDRESS; + for (a = 0; a < MAX_DMAP_ENTRIES; a++) { + desc = &arm64_dmap_desc[a]; + if (desc->flags == DMAP_FLAG_VALID) { + va = desc->va_start; l1 = (pd_entry_t *)l1pt; - l1_slot = pmap_l1_index(DMAP_MIN_ADDRESS); + l1_slot = pmap_l1_index(va); - for (pa = 0; va < DMAP_MAX_ADDRESS; + for (pa = desc->pa_start; va < desc->va_end; pa += L1_SIZE, va += L1_SIZE, l1_slot++) { - KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index")); + KASSERT(l1_slot < Ln_ENTRIES, + ("Invalid L1 index")); pmap_load_store(&l1[l1_slot], (pa & ~L1_OFFSET) | ATTR_DEFAULT | @@ -467,6 +473,10 @@ } cpu_dcache_wb_range((vm_offset_t)l1, PAGE_SIZE); + } + + desc++; + } cpu_tlb_flushID(); } @@ -856,7 +866,7 @@ pt_entry_t *l3; vm_paddr_t pa; - if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) { + if (VIRT_IN_DMAP(va)) { pa = DMAP_TO_PHYS(va); } else { l2 = pmap_l2(kernel_pmap, va); @@ -3120,7 +3130,7 @@ needs_mapping = FALSE; for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); - if (__predict_false(paddr >= DMAP_MAX_PHYSADDR)) { + if (__predict_false(PHYS_IN_DMAP(paddr) == 0)) { error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, &vaddr[i]); KASSERT(error == 0, ("vmem_alloc failed: %d", error)); @@ -3138,7 +3148,7 @@ sched_pin(); for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); - if (paddr >= DMAP_MAX_PHYSADDR) { + if (PHYS_IN_DMAP(paddr) == 0) { panic( "pmap_map_io_transient: TODO: Map out of DMAP data"); } @@ -3158,7 +3168,7 @@ sched_unpin(); for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); - if (paddr >= DMAP_MAX_PHYSADDR) { + if (PHYS_IN_DMAP(paddr) == 0) { panic("ARM64TODO: pmap_unmap_io_transient: Unmap data"); } } Index: sys/arm64/include/minidump.h =================================================================== --- sys/arm64/include/minidump.h +++ sys/arm64/include/minidump.h @@ -33,6 +33,8 @@ #define MINIDUMP_MAGIC "minidump FreeBSD/arm64" #define MINIDUMP_VERSION 1 +#include + struct minidumphdr { char magic[24]; uint32_t version; @@ -40,9 +42,7 @@ uint32_t bitmapsize; uint32_t pmapsize; uint64_t kernbase; - uint64_t dmapphys; - uint64_t dmapbase; - uint64_t dmapend; + struct direct_map_desc dmap_desc[MAX_DMAP_ENTRIES]; }; #endif /* _MACHINE_MINIDUMP_H_ */ Index: sys/arm64/include/vmparam.h =================================================================== --- sys/arm64/include/vmparam.h +++ sys/arm64/include/vmparam.h @@ -149,6 +149,24 @@ * VM_MIN_USER_ADDRESS and VM_MAX_USER_ADDRESS define the start and end of the * user address space. */ + +#include +#include +#include + +#define DMAP_FLAG_VALID (0x1) + +#define MAX_DMAP_ENTRIES 8 +struct direct_map_desc { + uint64_t pa_start; + uint64_t pa_end; + uint64_t va_start; + uint64_t va_end; + uint64_t flags; +}; + +extern struct direct_map_desc arm64_dmap_desc[]; + #define VM_MIN_ADDRESS (0x0000000000000000UL) #define VM_MAX_ADDRESS (0xffffffffffffffffUL) @@ -156,34 +174,89 @@ #define VM_MIN_KERNEL_ADDRESS (0xffffff8000000000UL) #define VM_MAX_KERNEL_ADDRESS (0xffffff8800000000UL) -/* Direct Map for 128 GiB of PA: 0x0 - 0x1fffffffff */ +/* VA Addresses of Direct Map for 128 GiB */ #define DMAP_MIN_ADDRESS (0xffffffc000000000UL) #define DMAP_MAX_ADDRESS (0xffffffdfffffffffUL) -#define DMAP_MIN_PHYSADDR (0x0000000000000000UL) -#define DMAP_MAX_PHYSADDR (DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) - -/* True if pa is in the dmap range */ -#define PHYS_IN_DMAP(pa) ((pa) <= DMAP_MAX_PHYSADDR) -/* True if va is in the dmap range */ -#define VIRT_IN_DMAP(va) ((va) >= DMAP_MIN_ADDRESS && \ - (va) <= DMAP_MAX_ADDRESS) - -#define PHYS_TO_DMAP(pa) \ -({ \ - KASSERT(PHYS_IN_DMAP(pa), \ - ("%s: PA out of range, PA: 0x%lx", __func__, \ - (vm_paddr_t)(pa))); \ - (pa) | DMAP_MIN_ADDRESS; \ -}) - -#define DMAP_TO_PHYS(va) \ -({ \ - KASSERT(VIRT_IN_DMAP(va), \ - ("%s: VA out of range, VA: 0x%lx", __func__, \ - (vm_offset_t)(va))); \ - (va) & ~DMAP_MIN_ADDRESS; \ -}) +static inline int +PHYS_IN_DMAP(uint64_t pa) +{ + struct direct_map_desc *desc; + int a; + + for (a = 0; a < MAX_DMAP_ENTRIES; a++) { + desc = &arm64_dmap_desc[a]; + if (desc->flags == DMAP_FLAG_VALID) { + if (pa >= desc->pa_start && pa <= desc->pa_end) + return (1); + } + } + + return (0); +} + +static inline int +VIRT_IN_DMAP(uint64_t va) +{ + struct direct_map_desc *desc; + int a; + + for (a = 0; a < MAX_DMAP_ENTRIES; a++) { + desc = &arm64_dmap_desc[a]; + if (desc->flags == DMAP_FLAG_VALID) { + if (va >= desc->va_start && va <= desc->va_end) + return (1); + } + } + + return (0); +} + +static inline uint64_t +PHYS_TO_DMAP(uint64_t pa) +{ + struct direct_map_desc *desc; + int a; + + KASSERT(PHYS_IN_DMAP(pa), ("%s: PA out of range, PA: 0x%lx", + __func__, (vm_paddr_t)(pa))); + + for (a = 0; a < MAX_DMAP_ENTRIES; a++) { + desc = &arm64_dmap_desc[a]; + if (desc->flags == DMAP_FLAG_VALID) { + if (pa >= desc->pa_start && pa <= desc->pa_end) { + uint64_t offset; + offset = pa - desc->pa_start; + return (desc->va_start + offset); + } + } + } + + return (0); +} + +static inline uint64_t +DMAP_TO_PHYS(uint64_t va) +{ + struct direct_map_desc *desc; + int a; + + KASSERT(VIRT_IN_DMAP(va), ("%s: VA out of range, VA: 0x%lx", + __func__, (vm_paddr_t)(va))); + + for (a = 0; a < MAX_DMAP_ENTRIES; a++) { + desc = &arm64_dmap_desc[a]; + if (desc->flags == DMAP_FLAG_VALID) { + if (va >= desc->va_start && va <= desc->va_end) { + uint64_t offset; + offset = va - desc->va_start; + return (desc->pa_start + offset); + } + } + } + + return (0); +} #define VM_MIN_USER_ADDRESS (0x0000000000000000UL) #define VM_MAX_USER_ADDRESS (0x0000008000000000UL)