Index: head/sys/vm/vm_page.c =================================================================== --- head/sys/vm/vm_page.c +++ head/sys/vm/vm_page.c @@ -2043,7 +2043,7 @@ * VM_ALLOC_ZERO prefer a zeroed page */ vm_page_t -vm_page_alloc_freelist(int flind, int req) +vm_page_alloc_freelist(int freelist, int req) { struct vm_domain_iterator vi; vm_page_t m; @@ -2056,7 +2056,7 @@ while (vm_domain_iterator_run(&vi, &domain) == 0) { if (vm_domain_iterator_isdone(&vi)) req |= wait; - m = vm_page_alloc_freelist_domain(domain, flind, req); + m = vm_page_alloc_freelist_domain(domain, freelist, req); if (m != NULL) break; } @@ -2066,7 +2066,7 @@ } vm_page_t -vm_page_alloc_freelist_domain(int domain, int flind, int req) +vm_page_alloc_freelist_domain(int domain, int freelist, int req) { vm_page_t m; u_int flags, free_count; @@ -2090,7 +2090,7 @@ vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && vm_cnt.v_free_count > 0)) - m = vm_phys_alloc_freelist_pages(domain, flind, + m = vm_phys_alloc_freelist_pages(domain, freelist, VM_FREEPOOL_DIRECT, 0); if (m == NULL) { if (vm_page_alloc_fail(NULL, req)) Index: head/sys/vm/vm_phys.c =================================================================== --- head/sys/vm/vm_phys.c +++ head/sys/vm/vm_phys.c @@ -603,10 +603,10 @@ vm_phys_alloc_pages(int domain, int pool, int order) { vm_page_t m; - int flind; + int freelist; - for (flind = 0; flind < vm_nfreelists; flind++) { - m = vm_phys_alloc_freelist_pages(domain, flind, pool, order); + for (freelist = 0; freelist < VM_NFREELIST; freelist++) { + m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order); if (m != NULL) return (m); } @@ -621,22 +621,27 @@ * The free page queues must be locked. */ vm_page_t -vm_phys_alloc_freelist_pages(int domain, int flind, int pool, int order) +vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order) { struct vm_freelist *alt, *fl; vm_page_t m; - int oind, pind; + int oind, pind, flind; KASSERT(domain >= 0 && domain < vm_ndomains, ("vm_phys_alloc_freelist_pages: domain %d is out of range", domain)); - KASSERT(flind < VM_NFREELIST, + KASSERT(freelist < VM_NFREELIST, ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind)); KASSERT(pool < VM_NFREEPOOL, ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); KASSERT(order < VM_NFREEORDER, ("vm_phys_alloc_freelist_pages: order %d is out of range", order)); + + flind = vm_freelist_to_flind[freelist]; + /* Check if freelist is present */ + if (flind < 0) + return (NULL); mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); fl = &vm_phys_free_queues[domain][flind][pool][0];