Index: vm/vm_page.c =================================================================== --- vm/vm_page.c +++ vm/vm_page.c @@ -2235,8 +2235,7 @@ vm_page_import(void *arg, void **store, int cnt, int domain, int flags) { struct vm_domain *vmd; - vm_page_t m; - int i, j, n; + int i; vmd = arg; /* Only import if we can bring in a full bucket. */ @@ -2243,16 +2242,9 @@ if (cnt == 1 || !vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt)) return (0); domain = vmd->vmd_domain; - n = 64; /* Starting stride, arbitrary. */ vm_domain_free_lock(vmd); - for (i = 0; i < cnt; i+=n) { - n = vm_phys_alloc_npages(domain, VM_FREELIST_DEFAULT, &m, - MIN(n, cnt-i)); - if (n == 0) - break; - for (j = 0; j < n; j++) - store[i+j] = m++; - } + i = vm_phys_alloc_npages(domain, VM_FREEPOOL_DEFAULT, cnt, + (vm_page_t *)store); vm_domain_free_unlock(vmd); if (cnt != i) vm_domain_freecnt_inc(vmd, cnt - i); Index: vm/vm_phys.h =================================================================== --- vm/vm_phys.h +++ vm/vm_phys.h @@ -77,8 +77,8 @@ vm_paddr_t high, u_long alignment, vm_paddr_t boundary); vm_page_t vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order); +int vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t *ma); vm_page_t vm_phys_alloc_pages(int domain, int pool, int order); -int vm_phys_alloc_npages(int domain, int pool, vm_page_t *m, int cnt); int vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high); int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, vm_memattr_t memattr); Index: vm/vm_phys.c =================================================================== --- vm/vm_phys.c +++ vm/vm_phys.c @@ -604,7 +604,100 @@ } } +static int foo[VM_NFREEORDER]; + +static int +sysctl_debug_foo(SYSCTL_HANDLER_ARGS) +{ + struct sbuf sbuf; + int error, oind, savings; + + error = sysctl_wire_old_buffer(req, 0); + if (error != 0) + return (error); + sbuf_new_for_sysctl(&sbuf, NULL, 1024, req); + sbuf_printf(&sbuf,"\nfoo:"); + savings = 0; + for (oind = 0; oind < VM_NFREEORDER; oind++) { + sbuf_printf(&sbuf, " %d", foo[oind]); + savings += ((1 << oind) - 1) * foo[oind]; + } + //sbuf_printf(&sbuf, "\ncalls avoided: %d\n", savings); + error = sbuf_finish(&sbuf); + sbuf_delete(&sbuf); + return (error); +} +SYSCTL_OID(_debug, OID_AUTO, foo, CTLTYPE_STRING | CTLFLAG_RD, + NULL, 0, sysctl_debug_foo, "A", ""); + /* + * XXX + * + * The free page queues for the specified domain must be locked. + */ +int +vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t *ma) +{ + struct vm_freelist *alt, *fl; + vm_page_t m; + int avail, end, flind, freelist, i, need, oind, pind; + + KASSERT(domain >= 0 && domain < vm_ndomains, + ("vm_phys_alloc_npages: domain %d is out of range", domain)); + KASSERT(pool < VM_NFREEPOOL, + ("vm_phys_alloc_npages: pool %d is out of range", pool)); + KASSERT(npages <= 1 << (VM_NFREEORDER - 1), + ("vm_phys_alloc_npages: npages %d is out of range", npages)); + vm_domain_free_assert_locked(VM_DOMAIN(domain)); + i = 0; + for (freelist = 0; freelist < VM_NFREELIST; freelist++) { + flind = vm_freelist_to_flind[freelist]; + if (flind < 0) + continue; + fl = vm_phys_free_queues[domain][flind][pool]; + for (oind = 0; oind < VM_NFREEORDER; oind++) { + while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) { + vm_freelist_rem(fl, m, oind); +foo[oind]++; + avail = 1 << oind; + need = imin(npages - i, avail); + end = i + need; + while (i < end) + ma[i++] = m++; + if (need < avail) { + vm_phys_free_contig(m, avail - need); + return (npages); + } else if (i == npages) + return (npages); + } + } + for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { + for (pind = 0; pind < VM_NFREEPOOL; pind++) { + alt = vm_phys_free_queues[domain][flind][pind]; + while ((m = TAILQ_FIRST(&alt[oind].pl)) != + NULL) { + vm_freelist_rem(alt, m, oind); +foo[oind]++; + vm_phys_set_pool(pool, m, oind); + avail = 1 << oind; + need = imin(npages - i, avail); + end = i + need; + while (i < end) + ma[i++] = m++; + if (need < avail) { + vm_phys_free_contig(m, avail - + need); + return (npages); + } else if (i == npages) + return (npages); + } + } + } + } + return (i); +} + +/* * Allocate a contiguous, power of two-sized set of physical pages * from the free lists. * @@ -624,26 +717,6 @@ return (NULL); } -int -vm_phys_alloc_npages(int domain, int pool, vm_page_t *mp, int cnt) -{ - vm_page_t m; - int order, freelist; - - for (freelist = 0; freelist < VM_NFREELIST; freelist++) { - for (order = fls(cnt) -1; order >= 0; order--) { - m = vm_phys_alloc_freelist_pages(domain, freelist, - pool, order); - if (m != NULL) { - *mp = m; - return (1 << order); - } - } - } - *mp = NULL; - return (0); -} - /* * Allocate a contiguous, power of two-sized set of physical pages from the * specified free list. The free list must be specified using one of the