Index: sys/amd64/amd64/machdep.c =================================================================== --- sys/amd64/amd64/machdep.c +++ sys/amd64/amd64/machdep.c @@ -1223,7 +1223,7 @@ * Tell the physical memory allocator about pages used to store * the kernel and preloaded data. See kmem_bootstrap_free(). */ - vm_phys_add_seg((vm_paddr_t)kernphys, trunc_page(first)); + vm_phys_early_add_seg((vm_paddr_t)kernphys, trunc_page(first)); bzero(physmap, sizeof(physmap)); physmap_idx = 0; Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -1700,7 +1700,7 @@ * are required for promotion of the corresponding kernel virtual * addresses to superpage mappings. */ - vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt)); + vm_phys_early_add_seg(KPTphys, KPTphys + ptoa(nkpt)); /* * Account for the virtual addresses mapped by create_pagetables(). Index: sys/i386/i386/machdep.c =================================================================== --- sys/i386/i386/machdep.c +++ sys/i386/i386/machdep.c @@ -1828,7 +1828,7 @@ * Tell the physical memory allocator about pages used to store * the kernel and preloaded data. See kmem_bootstrap_free(). */ - vm_phys_add_seg((vm_paddr_t)KERNLOAD, trunc_page(first)); + vm_phys_early_add_seg((vm_paddr_t)KERNLOAD, trunc_page(first)); TUNABLE_INT_FETCH("hw.above4g_allow", &above4g_allow); TUNABLE_INT_FETCH("hw.above24g_allow", &above24g_allow); Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c +++ sys/i386/i386/pmap.c @@ -633,7 +633,7 @@ * are required for promotion of the corresponding kernel virtual * addresses to superpage mappings. */ - vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt)); + vm_phys_early_add_seg(KPTphys, KPTphys + ptoa(nkpt)); /* * Initialize the first available kernel virtual address. Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c +++ sys/vm/uma_core.c @@ -2810,6 +2810,7 @@ size_t ksize, zsize, size; uma_keg_t masterkeg; uintptr_t m; + int domain; uint8_t pflag; bootstart = bootmem = virtual_avail; @@ -2827,7 +2828,12 @@ /* Allocate the zone of zones, zone of kegs, and zone of zones keg. */ size = (zsize * 2) + ksize; - m = (uintptr_t)startup_alloc(NULL, size, 0, &pflag, M_NOWAIT | M_ZERO); + for (domain = 0; domain < vm_ndomains; domain++) { + m = (uintptr_t)startup_alloc(NULL, size, domain, &pflag, + M_NOWAIT | M_ZERO); + if (m != 0) + break; + } zones = (uma_zone_t)m; m += zsize; kegs = (uma_zone_t)m; Index: sys/vm/vm_phys.h =================================================================== --- sys/vm/vm_phys.h +++ sys/vm/vm_phys.h @@ -103,6 +103,7 @@ void vm_phys_set_pool(int pool, vm_page_t m, int order); boolean_t vm_phys_unfree_page(vm_page_t m); int vm_phys_mem_affinity(int f, int t); +void vm_phys_early_add_seg(vm_paddr_t start, vm_paddr_t end); vm_paddr_t vm_phys_early_alloc(int domain, size_t alloc_size); void vm_phys_early_startup(void); int vm_phys_avail_largest(void); Index: sys/vm/vm_phys.c =================================================================== --- sys/vm/vm_phys.c +++ sys/vm/vm_phys.c @@ -82,6 +82,8 @@ struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX]; int __read_mostly vm_phys_nsegs; +static struct vm_phys_seg vm_phys_early_segs[8]; +static int vm_phys_early_nsegs; struct vm_phys_fictitious_seg; static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *, @@ -1611,6 +1613,21 @@ return (0); } +void +vm_phys_early_add_seg(vm_paddr_t start, vm_paddr_t end) +{ + struct vm_phys_seg *seg; + + if (vm_phys_early_nsegs == -1) + panic("%s: called after initialization", __func__); + if (vm_phys_early_nsegs == nitems(vm_phys_early_segs)) + panic("%s: ran out of early segments", __func__); + + seg = &vm_phys_early_segs[vm_phys_early_nsegs++]; + seg->start = start; + seg->end = end; +} + /* * This routine allocates NUMA node specific memory before the page * allocator is bootstrapped. @@ -1699,6 +1716,7 @@ void vm_phys_early_startup(void) { + struct vm_phys_seg *seg; int i; for (i = 0; phys_avail[i + 1] != 0; i += 2) { @@ -1723,6 +1741,12 @@ } } #endif + + for (i = 0; i < vm_phys_early_nsegs; i++) { + seg = &vm_phys_early_segs[i]; + vm_phys_add_seg(seg->start, seg->end); + } + vm_phys_early_nsegs = -1; } #ifdef DDB