Index: sys/vm/vm_phys.h =================================================================== --- sys/vm/vm_phys.h +++ sys/vm/vm_phys.h @@ -69,6 +69,7 @@ vm_paddr_t start; vm_paddr_t end; vm_page_t first_page; + int first_superpage; int domain; struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX]; }; @@ -96,6 +97,7 @@ void vm_phys_free_pages(vm_page_t m, int order); void vm_phys_init(void); vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa); +int vm_phys_paddr_to_superpage_index(vm_paddr_t pa); void vm_phys_register_domains(int ndomains, struct mem_affinity *affinity, int *locality); vm_page_t vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, Index: sys/vm/vm_phys.c =================================================================== --- sys/vm/vm_phys.c +++ sys/vm/vm_phys.c @@ -72,6 +72,11 @@ _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX, "Too many physsegs."); +#if VM_NRESERVLEVEL > 0 +#define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT) +#define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT) +#endif + #ifdef NUMA struct mem_affinity __read_mostly *mem_affinity; int __read_mostly *mem_locality; @@ -489,6 +494,10 @@ { struct vm_freelist *fl; struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg; +#if VM_NRESERVLEVEL > 0 + vm_pindex_t l0_start, l0_end, l0_end_prev; + u_long nl0; +#endif u_long npages; int dom, flind, freelist, oind, pind, segind; @@ -543,6 +552,10 @@ */ #ifdef VM_PHYSSEG_SPARSE npages = 0; +#endif +#if VM_NRESERVLEVEL > 0 + nl0 = 0; + l0_end_prev = 0; #endif for (segind = 0; segind < vm_phys_nsegs; segind++) { seg = &vm_phys_segs[segind]; @@ -552,6 +565,17 @@ #else seg->first_page = PHYS_TO_VM_PAGE(seg->start); #endif +#if VM_NRESERVLEVEL > 0 + l0_start = seg->start >> VM_LEVEL_0_SHIFT; + l0_end = roundup2(seg->end, VM_LEVEL_0_SIZE) >> VM_LEVEL_0_SHIFT; + seg->first_superpage = nl0; + nl0 += l0_end - l0_start; + if (l0_end_prev > l0_start) { + seg->first_superpage--; + nl0--; + } + l0_end_prev = l0_end; +#endif #ifdef VM_FREELIST_LOWMEM if (seg->end <= VM_LOWMEM_BOUNDARY) { flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM]; @@ -910,6 +934,24 @@ return (NULL); } +#if VM_NRESERVLEVEL > 0 +int +vm_phys_paddr_to_superpage_index(vm_paddr_t pa) +{ + struct vm_phys_seg *seg; + int segind; + + for (segind = 0; segind < vm_phys_nsegs; segind++) { + seg = &vm_phys_segs[segind]; + if (pa >= seg->start && pa < seg->end) + return (seg->first_superpage + + (pa >> VM_LEVEL_0_SHIFT) - + (seg->start >> VM_LEVEL_0_SHIFT)); + } + panic("pa 0x%lx not within vm_phys_segs", pa); +} +#endif + vm_page_t vm_phys_fictitious_to_vm_page(vm_paddr_t pa) { Index: sys/vm/vm_reserv.c =================================================================== --- sys/vm/vm_reserv.c +++ sys/vm/vm_reserv.c @@ -1,3 +1,4 @@ + /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * @@ -233,6 +234,8 @@ */ static vm_reserv_t vm_reserv_array; +#define PHYS_TO_VM_RESERV(pa) &vm_reserv_array[vm_phys_paddr_to_superpage_index(pa)] + /* * The per-domain partially populated reservation queues * @@ -335,7 +338,7 @@ paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + VM_LEVEL_0_SIZE <= seg->end) { - rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; + rv = PHYS_TO_VM_RESERV(paddr); fullpop += rv->popcnt == VM_LEVEL_0_NPAGES; paddr += VM_LEVEL_0_SIZE; } @@ -497,7 +500,7 @@ vm_reserv_from_page(vm_page_t m) { - return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]); + return (PHYS_TO_VM_RESERV(VM_PAGE_TO_PHYS(m))); } /* @@ -1065,7 +1068,7 @@ paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + VM_LEVEL_0_SIZE <= seg->end) { - rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; + rv = PHYS_TO_VM_RESERV(paddr); rv->pages = PHYS_TO_VM_PAGE(paddr); rv->domain = seg->domain; mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF); @@ -1400,20 +1403,19 @@ vm_paddr_t vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end) { - vm_paddr_t new_end, high_water; + vm_paddr_t new_end; size_t size; - int i; + int count, i; - high_water = phys_avail[1]; + count = 0; for (i = 0; i < vm_phys_nsegs; i++) { - if (vm_phys_segs[i].end > high_water) - high_water = vm_phys_segs[i].end; + count += howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE) - + vm_phys_segs[i].start / VM_LEVEL_0_SIZE; } - /* Skip the first chunk. It is already accounted for. */ - for (i = 2; phys_avail[i + 1] != 0; i += 2) { - if (phys_avail[i + 1] > high_water) - high_water = phys_avail[i + 1]; + for (i = 0; phys_avail[i + 1] != 0; i += 2) { + count += howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE) - + phys_avail[i] / VM_LEVEL_0_SIZE; } /* @@ -1423,7 +1425,7 @@ * number of elements in the reservation array can be greater than the * number of superpages. */ - size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv); + size = count * sizeof(struct vm_reserv); /* * Allocate and map the physical memory for the reservation array. The