Index: sys/riscv/riscv/pmap.c =================================================================== --- sys/riscv/riscv/pmap.c +++ sys/riscv/riscv/pmap.c @@ -539,14 +539,12 @@ pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen) { u_int l1_slot, l2_slot, avail_slot, map_slot, used_map_slot; - uint64_t kern_delta; pt_entry_t *l2; vm_offset_t va, freemempos; vm_offset_t dpcpu, msgbufpv; - vm_paddr_t pa, min_pa, max_pa; + vm_paddr_t end, max_pa, min_pa, pa, start; int i; - kern_delta = KERNBASE - kernstart; physmem = 0; printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen); @@ -565,7 +563,7 @@ LIST_INIT(&allpmaps); /* Assume the address we were loaded to is a valid physical address */ - min_pa = max_pa = KERNBASE - kern_delta; + min_pa = max_pa = kernstart; /* * Find the minimum physical address. physmap is sorted, @@ -586,35 +584,24 @@ /* Create a direct map region early so we can use it for pa -> va */ pmap_bootstrap_dmap(l1pt, min_pa, max_pa); - va = KERNBASE; - pa = KERNBASE - kern_delta; - /* * Start to initialize phys_avail by copying from physmap * up to the physical address KERNBASE points at. */ map_slot = avail_slot = 0; - for (; map_slot < (physmap_idx * 2); map_slot += 2) { + for (; map_slot < physmap_idx * 2 && physmap[map_slot] < kernstart; + map_slot += 2) { if (physmap[map_slot] == physmap[map_slot + 1]) continue; - if (physmap[map_slot] <= pa && - physmap[map_slot + 1] > pa) - break; + start = physmap[map_slot]; + end = physmap[map_slot + 1]; + if (start <= kernstart && end > kernstart) + end = kernstart; - phys_avail[avail_slot] = physmap[map_slot]; - phys_avail[avail_slot + 1] = physmap[map_slot + 1]; - physmem += (phys_avail[avail_slot + 1] - - phys_avail[avail_slot]) >> PAGE_SHIFT; - avail_slot += 2; - } - - /* Add the memory before the kernel */ - if (physmap[avail_slot] < pa) { - phys_avail[avail_slot] = physmap[map_slot]; - phys_avail[avail_slot + 1] = pa; - physmem += (phys_avail[avail_slot + 1] - - phys_avail[avail_slot]) >> PAGE_SHIFT; + phys_avail[avail_slot] = start; + phys_avail[avail_slot + 1] = end; + physmem += (end - start) >> PAGE_SHIFT; avail_slot += 2; } used_map_slot = map_slot; @@ -630,22 +617,16 @@ KASSERT(l2_slot == 0, ("The L2 index is non-zero")); /* Find how many pages we have mapped */ - for (; l2_slot < Ln_ENTRIES; l2_slot++) { + for (va = KERNBASE; l2_slot < Ln_ENTRIES; l2_slot++, va += L2_SIZE) { if ((l2[l2_slot] & PTE_V) == 0) break; - /* Check locore used L2 superpages */ KASSERT((l2[l2_slot] & PTE_RX) != 0, ("Invalid bootstrap L2 table")); - - va += L2_SIZE; - pa += L2_SIZE; } - va = roundup2(va, L2_SIZE); - freemempos = KERNBASE + kernlen; - freemempos = roundup2(freemempos, PAGE_SIZE); + freemempos = roundup2(KERNBASE + kernlen, PAGE_SIZE); /* Create the l3 tables for the early devmap */ freemempos = pmap_bootstrap_l3(l1pt, @@ -672,30 +653,25 @@ pa = pmap_early_vtophys(l1pt, freemempos); - /* Finish initialising physmap */ + /* Finish initialising phys_avail. */ map_slot = used_map_slot; - for (; avail_slot < (PHYS_AVAIL_SIZE - 2) && - map_slot < (physmap_idx * 2); map_slot += 2) { - if (physmap[map_slot] == physmap[map_slot + 1]) { + for (; avail_slot < PHYS_AVAIL_SIZE - 2 && + map_slot < physmap_idx * 2; map_slot += 2) { + if (physmap[map_slot] == physmap[map_slot + 1]) continue; - } /* Have we used the current range? */ - if (physmap[map_slot + 1] <= pa) { + if (physmap[map_slot + 1] <= pa) continue; - } /* Do we need to split the entry? */ - if (physmap[map_slot] < pa) { + if (physmap[map_slot] < pa) phys_avail[avail_slot] = pa; - phys_avail[avail_slot + 1] = physmap[map_slot + 1]; - } else { + else phys_avail[avail_slot] = physmap[map_slot]; - phys_avail[avail_slot + 1] = physmap[map_slot + 1]; - } + phys_avail[avail_slot + 1] = physmap[map_slot + 1]; physmem += (phys_avail[avail_slot + 1] - phys_avail[avail_slot]) >> PAGE_SHIFT; - avail_slot += 2; } phys_avail[avail_slot] = 0;