Index: vm/vm_page.c =================================================================== --- vm/vm_page.c +++ vm/vm_page.c @@ -2236,7 +2236,7 @@ { struct vm_domain *vmd; vm_page_t m; - int i, j, n; + int i, j, n, order; vmd = arg; /* Only import if we can bring in a full bucket. */ @@ -2243,13 +2243,13 @@ if (cnt == 1 || !vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt)) return (0); domain = vmd->vmd_domain; - n = 64; /* Starting stride, arbitrary. */ vm_domain_free_lock(vmd); for (i = 0; i < cnt; i+=n) { - n = vm_phys_alloc_npages(domain, VM_FREELIST_DEFAULT, &m, - MIN(n, cnt-i)); - if (n == 0) + order = fls(cnt - i) - 1; + m = vm_phys_alloc_max(domain, VM_FREEPOOL_DEFAULT, &order); + if (m == NULL) break; + n = 1 << order; for (j = 0; j < n; j++) store[i+j] = m++; } Index: vm/vm_phys.h =================================================================== --- vm/vm_phys.h +++ vm/vm_phys.h @@ -77,8 +77,8 @@ vm_paddr_t high, u_long alignment, vm_paddr_t boundary); vm_page_t vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order); +vm_page_t vm_phys_alloc_max(int domain, int pool, int *orderp); vm_page_t vm_phys_alloc_pages(int domain, int pool, int order); -int vm_phys_alloc_npages(int domain, int pool, vm_page_t *m, int cnt); int vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high); int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, vm_memattr_t memattr); Index: vm/vm_phys.c =================================================================== --- vm/vm_phys.c +++ vm/vm_phys.c @@ -604,7 +604,99 @@ } } +static int foo[VM_NFREEORDER]; + +static int +sysctl_debug_foo(SYSCTL_HANDLER_ARGS) +{ + struct sbuf sbuf; + int error, oind, savings; + + error = sysctl_wire_old_buffer(req, 0); + if (error != 0) + return (error); + sbuf_new_for_sysctl(&sbuf, NULL, 1024, req); + sbuf_printf(&sbuf,"\nfoo:"); + savings = 0; + for (oind = 0; oind < VM_NFREEORDER; oind++) { + sbuf_printf(&sbuf, " %d", foo[oind]); + savings += ((1 << oind) - 1) * foo[oind]; + } + sbuf_printf(&sbuf, "\ncalls avoided: %d\n", savings); + error = sbuf_finish(&sbuf); + sbuf_delete(&sbuf); + return (error); +} +SYSCTL_OID(_debug, OID_AUTO, foo, CTLTYPE_STRING | CTLFLAG_RD, + NULL, 0, sysctl_debug_foo, "A", ""); + /* + * Allocate a contiguous, power-of-two-sized set of physical pages from the + * specified pool within the specified domain. The size of the returned set + * will vary, depending on the current state of the free lists. The caller + * specifies its maximum acceptable size through *orderp, and this function + * returns the actual allocated size through *orderp. + * + * Multiple, back-to-back calls to vm_phys_alloc_pages() can waste time in + * vm_phys_split_pages(). That wasted time can be avoided by calling this + * function instead. + * + * The free page queues for the specified domain must be locked. + */ +vm_page_t +vm_phys_alloc_max(int domain, int pool, int *orderp) +{ + struct vm_freelist *alt, *fl; + vm_page_t m; + int flind, freelist, oind, pind; + + KASSERT(domain >= 0 && domain < vm_ndomains, + ("vm_phys_alloc_max: domain %d is out of range", domain)); + KASSERT(pool < VM_NFREEPOOL, + ("vm_phys_alloc_max: pool %d is out of range", pool)); + KASSERT(*orderp < VM_NFREEORDER, + ("vm_phys_alloc_max: *orderp %d is out of range", *orderp)); + vm_domain_free_assert_locked(VM_DOMAIN(domain)); + for (freelist = 0; freelist < VM_NFREELIST; freelist++) { + flind = vm_freelist_to_flind[freelist]; + if (flind < 0) + continue; + fl = vm_phys_free_queues[domain][flind][pool]; + for (oind = 0; oind < VM_NFREEORDER; oind++) { + m = TAILQ_FIRST(&fl[oind].pl); + if (m != NULL) { + vm_freelist_rem(fl, m, oind); + if (oind < *orderp) + *orderp = oind; + else + vm_phys_split_pages(m, oind, fl, + *orderp); +foo[*orderp]++; + return (m); + } + } + for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { + for (pind = 0; pind < VM_NFREEPOOL; pind++) { + alt = vm_phys_free_queues[domain][flind][pind]; + m = TAILQ_FIRST(&alt[oind].pl); + if (m != NULL) { + vm_freelist_rem(alt, m, oind); + vm_phys_set_pool(pool, m, oind); + if (oind < *orderp) + *orderp = oind; + else + vm_phys_split_pages(m, oind, + fl, *orderp); +foo[*orderp]++; + return (m); + } + } + } + } + return (NULL); +} + +/* * Allocate a contiguous, power of two-sized set of physical pages * from the free lists. * @@ -624,26 +716,6 @@ return (NULL); } -int -vm_phys_alloc_npages(int domain, int pool, vm_page_t *mp, int cnt) -{ - vm_page_t m; - int order, freelist; - - for (freelist = 0; freelist < VM_NFREELIST; freelist++) { - for (order = fls(cnt) -1; order >= 0; order--) { - m = vm_phys_alloc_freelist_pages(domain, freelist, - pool, order); - if (m != NULL) { - *mp = m; - return (1 << order); - } - } - } - *mp = NULL; - return (0); -} - /* * Allocate a contiguous, power of two-sized set of physical pages from the * specified free list. The free list must be specified using one of the