Index: sys/vm/vm_phys.h =================================================================== --- sys/vm/vm_phys.h +++ sys/vm/vm_phys.h @@ -69,6 +69,7 @@ vm_paddr_t start; vm_paddr_t end; vm_page_t first_page; + vm_reserv_t first_reserv; int domain; struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX]; }; Index: sys/vm/vm_reserv.c =================================================================== --- sys/vm/vm_reserv.c +++ sys/vm/vm_reserv.c @@ -233,6 +233,21 @@ */ static vm_reserv_t vm_reserv_array; +static vm_reserv_t +phys_to_reserv(vm_paddr_t pa) { + struct vm_phys_seg *seg; + int segind; + + for (segind = 0; segind < vm_phys_nsegs; segind++) { + seg = &vm_phys_segs[segind]; + if (pa >= seg->start && pa < seg->end) + return (seg->first_reserv + + (pa >> VM_LEVEL_0_SHIFT) - + (seg->start >> VM_LEVEL_0_SHIFT)); + } + panic("pa 0x%jx not within vm_phys_segs", (uintmax_t)pa); +} + /* * The per-domain partially populated reservation queues * @@ -333,11 +348,13 @@ for (segind = 0; segind < vm_phys_nsegs; segind++) { seg = &vm_phys_segs[segind]; paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); + rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) - + (seg->start >> VM_LEVEL_0_SHIFT); while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + VM_LEVEL_0_SIZE <= seg->end) { - rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; fullpop += rv->popcnt == VM_LEVEL_0_NPAGES; paddr += VM_LEVEL_0_SIZE; + rv++; } } return (sysctl_handle_int(oidp, &fullpop, 0, req)); @@ -496,8 +513,11 @@ static __inline vm_reserv_t vm_reserv_from_page(vm_page_t m) { + struct vm_phys_seg *seg; - return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]); + seg = &vm_phys_segs[m->segind]; + return (seg->first_reserv + (m->phys_addr >> VM_LEVEL_0_SHIFT) - + (seg->start >> VM_LEVEL_0_SHIFT)); } /* @@ -1052,7 +1072,7 @@ { vm_paddr_t paddr; struct vm_phys_seg *seg; - struct vm_reserv *rv; + struct vm_reserv *rv, *first; struct vm_reserv_domain *rvd; int i, j, segind; @@ -1060,17 +1080,23 @@ * Initialize the reservation array. Specifically, initialize the * "pages" field for every element that has an underlying superpage. */ + first = vm_reserv_array; for (segind = 0; segind < vm_phys_nsegs; segind++) { seg = &vm_phys_segs[segind]; + seg->first_reserv = first; paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); + rv = first + (paddr >> VM_LEVEL_0_SHIFT) - + (seg->start >> VM_LEVEL_0_SHIFT); while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + VM_LEVEL_0_SIZE <= seg->end) { - rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; rv->pages = PHYS_TO_VM_PAGE(paddr); rv->domain = seg->domain; mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF); paddr += VM_LEVEL_0_SIZE; + rv++; } + first += howmany(seg->end, VM_LEVEL_0_SIZE) - + seg->start / VM_LEVEL_0_SIZE; } for (i = 0; i < MAXMEMDOM; i++) { rvd = &vm_rvd[i]; @@ -1400,30 +1426,29 @@ vm_paddr_t vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end) { - vm_paddr_t new_end, high_water; + vm_paddr_t new_end; size_t size; - int i; + int count, i; - high_water = phys_avail[1]; + count = 0; for (i = 0; i < vm_phys_nsegs; i++) { - if (vm_phys_segs[i].end > high_water) - high_water = vm_phys_segs[i].end; + count += howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE) - + vm_phys_segs[i].start / VM_LEVEL_0_SIZE; } - /* Skip the first chunk. It is already accounted for. */ - for (i = 2; phys_avail[i + 1] != 0; i += 2) { - if (phys_avail[i + 1] > high_water) - high_water = phys_avail[i + 1]; + for (i = 0; phys_avail[i + 1] != 0; i += 2) { + count += howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE) - + phys_avail[i] / VM_LEVEL_0_SIZE; } /* - * Calculate the size (in bytes) of the reservation array. Round up - * from "high_water" because every small page is mapped to an element - * in the reservation array based on its physical address. Thus, the - * number of elements in the reservation array can be greater than the - * number of superpages. + * Calculate the size (in bytes) of the reservation array. Rounding up + * for partial superpages at boundaries, as every small page is mapped + * to an element in the reservation array based on its physical address. + * Thus, the number of elements in the reservation array can be greater + * than the number of superpages. */ - size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv); + size = count * sizeof(struct vm_reserv); /* * Allocate and map the physical memory for the reservation array. The