diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -5026,12 +5026,13 @@ npdpg = howmany(size, NBPDP); npde = size / NBPDR; - dummypa = vm_phys_early_alloc(-1, PAGE_SIZE); + dummypa = vm_phys_early_alloc(PAGE_SIZE, -1); pagezero((void *)PHYS_TO_DMAP(dummypa)); - dummypt = vm_phys_early_alloc(-1, PAGE_SIZE); + dummypt = vm_phys_early_alloc(PAGE_SIZE, -1); pagezero((void *)PHYS_TO_DMAP(dummypt)); - dummypd = vm_phys_early_alloc(-1, PAGE_SIZE * npdpg); + + dummypd = vm_phys_early_alloc(PAGE_SIZE * npdpg, -1); for (i = 0; i < npdpg; i++) pagezero((void *)PHYS_TO_DMAP(dummypd + ptoa(i))); @@ -5090,7 +5091,7 @@ domain = vm_phys_domain(ptoa(pfn)); pdpe = pmap_pdpe(kernel_pmap, va); if ((*pdpe & X86_PG_V) == 0) { - pa = vm_phys_early_alloc(domain, PAGE_SIZE); + pa = vm_phys_early_alloc(PAGE_SIZE, domain); dump_add_page(pa); pagezero((void *)PHYS_TO_DMAP(pa)); *pdpe = (pdp_entry_t)(pa | X86_PG_V | X86_PG_RW | @@ -5099,7 +5100,7 @@ pde = pmap_pdpe_to_pde(pdpe, va); if ((*pde & X86_PG_V) != 0) panic("Unexpected pde"); - pa = vm_phys_early_alloc(domain, NBPDR); + pa = vm_phys_early_alloc(NBPDR, domain); for (i = 0; i < NPDEPG; i++) dump_add_page(pa + i * PAGE_SIZE); newpdir = (pd_entry_t)(pa | X86_PG_V | X86_PG_RW | X86_PG_A | diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -3442,7 +3442,7 @@ /* Short-circuit single-domain systems. */ if (vm_ndomains == 1) { size = round_page(pages * sizeof(struct vm_page)); - pa = vm_phys_early_alloc(0, size); + pa = vm_phys_early_alloc(size, 0); vm_page_base = moea64_map(&vm_page_base, pa, pa + size, VM_PROT_READ | VM_PROT_WRITE); vm_page_array_size = pages; @@ -3484,7 +3484,7 @@ size = round_page(size * sizeof(struct vm_page)); needed = size; size = roundup2(size, moea64_large_page_size); - pa = vm_phys_early_alloc(i, size); + pa = vm_phys_early_alloc(size, i); vm_page_array_size += size / sizeof(struct vm_page); moea64_map_range(va, pa, size >> PAGE_SHIFT); /* Scoot up domain 0, to reduce the domain page overlap. */ diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c --- a/sys/powerpc/aim/mmu_radix.c +++ b/sys/powerpc/aim/mmu_radix.c @@ -6438,7 +6438,7 @@ start = VM_MIN_KERNEL_ADDRESS; end = start + pages * sizeof(struct vm_page); - pa = vm_phys_early_alloc(-1, end - start); + pa = vm_phys_early_alloc(end - start, -1); start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT); #ifdef notyet @@ -6448,7 +6448,7 @@ domain = vm_phys_domain(ptoa(pfn)); l2e = pmap_pml2e(kernel_pmap, va); if ((be64toh(*l2e) & PG_V) == 0) { - pa = vm_phys_early_alloc(domain, PAGE_SIZE); + pa = vm_phys_early_alloc(PAGE_SIZE, domain); dump_add_page(pa); pagezero(PHYS_TO_DMAP(pa)); pde_store(l2e, (pml2_entry_t)pa); @@ -6456,7 +6456,7 @@ pde = pmap_l2e_to_l3e(l2e, va); if ((be64toh(*pde) & PG_V) != 0) panic("Unexpected pde %p", pde); - pa = vm_phys_early_alloc(domain, L3_PAGE_SIZE); + pa = vm_phys_early_alloc(L3_PAGE_SIZE, domain); for (i = 0; i < NPDEPG; i++) dump_add_page(pa + i * PAGE_SIZE); newl3 = (pml3_entry_t)(pa | RPTE_EAA_P | RPTE_EAA_R | RPTE_EAA_W); diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h --- a/sys/vm/vm_phys.h +++ b/sys/vm/vm_phys.h @@ -81,7 +81,7 @@ bool vm_phys_unfree_page(vm_paddr_t pa); int vm_phys_mem_affinity(int f, int t); void vm_phys_early_add_seg(vm_paddr_t start, vm_paddr_t end); -vm_paddr_t vm_phys_early_alloc(int domain, size_t alloc_size); +vm_paddr_t vm_phys_early_alloc(size_t alloc_size, int domain); void vm_phys_early_startup(void); int vm_phys_avail_largest(void); vm_paddr_t vm_phys_avail_size(int i); diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c --- a/sys/vm/vm_phys.c +++ b/sys/vm/vm_phys.c @@ -1926,7 +1926,7 @@ * allocator is bootstrapped. */ vm_paddr_t -vm_phys_early_alloc(int domain, size_t alloc_size) +vm_phys_early_alloc(size_t alloc_size, int domain) { int i, biggestone; vm_paddr_t pa, mem_start, mem_end, size, biggestsize, align_off;