Index: sys/vm/_vm_phys.h =================================================================== --- sys/vm/_vm_phys.h +++ sys/vm/_vm_phys.h @@ -53,6 +53,8 @@ int lcnt; }; +typedef struct vm_freelist (*vm_freelist_tbl)[VM_NFREEPOOL][VM_NFREEORDER_MAX]; + struct vm_phys_seg { vm_paddr_t start; vm_paddr_t end; @@ -64,7 +66,7 @@ void *md_first; #endif int domain; - struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX]; + vm_freelist_tbl free_queues; }; extern struct vm_phys_seg vm_phys_segs[]; Index: sys/vm/vm_phys.c =================================================================== --- sys/vm/vm_phys.c +++ sys/vm/vm_phys.c @@ -173,7 +173,7 @@ SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, &vm_ndomains, 0, "Number of physical memory domains available."); -static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, +static vm_page_t vm_phys_alloc_queues_contig(vm_freelist_tbl queues, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain); @@ -1367,6 +1367,7 @@ vm_paddr_t pa_end, pa_start; vm_page_t m_run; struct vm_phys_seg *seg; + vm_freelist_tbl prev_tbl; int segind; KASSERT(npages > 0, ("npages is 0")); @@ -1375,6 +1376,7 @@ vm_domain_free_assert_locked(VM_DOMAIN(domain)); if (low >= high) return (NULL); + prev_tbl = NULL; m_run = NULL; for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { seg = &vm_phys_segs[segind]; @@ -1392,8 +1394,17 @@ pa_end = seg->end; if (pa_end - pa_start < ptoa(npages)) continue; - m_run = vm_phys_alloc_seg_contig(seg, npages, low, high, - alignment, boundary); + /* + * If a previous segment led to a search using + * the same free lists as would this segment, then + * we've actually already searched within this + * too. So skip it. + */ + if (seg->free_queues == prev_tbl) + continue; + prev_tbl = seg->free_queues; + m_run = vm_phys_alloc_queues_contig(prev_tbl, npages, + low, high, alignment, boundary); if (m_run != NULL) break; } @@ -1401,13 +1412,14 @@ } /* - * Allocate a run of contiguous physical pages from the free list for the - * specified segment. + * Allocate a run of contiguous physical pages from the specified free list + * table. */ static vm_page_t -vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages, +vm_phys_alloc_queues_contig(vm_freelist_tbl queues, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) { + struct vm_phys_seg *seg; struct vm_freelist *fl; vm_paddr_t pa, pa_end, size; vm_page_t m, m_ret; @@ -1417,7 +1429,6 @@ KASSERT(npages > 0, ("npages is 0")); KASSERT(powerof2(alignment), ("alignment is not a power of 2")); KASSERT(powerof2(boundary), ("boundary is not a power of 2")); - vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); /* Compute the queue that is the best fit for npages. */ order = flsl(npages - 1); /* Search for a run satisfying the specified conditions. */ @@ -1425,7 +1436,7 @@ for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) { for (pind = 0; pind < VM_NFREEPOOL; pind++) { - fl = (*seg->free_queues)[pind]; + fl = (*queues)[pind]; TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) { /* * Determine if the address range starting at pa @@ -1451,8 +1462,8 @@ * (without overflow in pa_end calculation) * and fits within the segment. */ - if (pa_end < pa || - pa < seg->start || seg->end < pa_end) + seg = &vm_phys_segs[m_ret->segind]; + if (pa_end < pa || seg->end < pa_end) continue; /* @@ -1467,13 +1478,26 @@ goto done; } while (VM_NFREEORDER - 1 == seg->first_page[ atop(pa - seg->start)].order); + + for (;;) { + m = &seg->first_page[ + atop(pa - seg->start)]; + if (m->order == VM_NFREEORDER || + pa + (2 << (PAGE_SHIFT + m->order)) + <= pa_end) + break; + pa += 1 << (PAGE_SHIFT + m->order); + if (pa >= pa_end) + goto done; + } } } } return (NULL); done: for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) { - fl = (*seg->free_queues)[m->pool]; + fl = (*queues)[m->pool]; + oind = m->order; vm_freelist_rem(fl, m, oind); if (m->pool != VM_FREEPOOL_DEFAULT) vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind); @@ -1481,7 +1505,7 @@ /* Return excess pages to the free lists. */ npages_end = roundup2(npages, 1 << oind); if (npages < npages_end) { - fl = (*seg->free_queues)[VM_FREEPOOL_DEFAULT]; + fl = (*queues)[VM_FREEPOOL_DEFAULT]; vm_phys_enq_range(&m_ret[npages], npages_end - npages, fl, 0); } return (m_ret);