diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -2178,12 +2178,47 @@ return (m); } +static vm_page_t +vm_page_find_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, + vm_paddr_t high, u_long alignment, vm_paddr_t boundary) +{ + struct vm_domain *vmd; + vm_page_t m_ret; + + vmd = VM_DOMAIN(domain); + if (!vm_domain_allocate(vmd, req, npages)) + return (NULL); +#if VM_NRESERVLEVEL > 0 +again: +#endif + /* + * Try to allocate the pages from the free page queues. + */ + vm_domain_free_lock(vmd); + m_ret = vm_phys_alloc_contig(domain, npages, low, high, + alignment, boundary); + vm_domain_free_unlock(vmd); + if (m_ret != NULL) + return (m_ret); + vm_domain_freecnt_inc(vmd, npages); +#if VM_NRESERVLEVEL > 0 + /* + * Try to break a reservation to replenish free page queues + * in a way that allows the allocation to succeed. + */ + if ((req & VM_ALLOC_NORECLAIM) == 0 && + vm_reserv_reclaim_contig(domain, npages, low, + high, alignment, boundary)) + goto again; +#endif + return (m_ret); +} + vm_page_t vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { - struct vm_domain *vmd; vm_page_t m, m_ret, mpred; u_int busy_lock, flags, oflags; @@ -2210,45 +2245,23 @@ * Can we allocate the pages without the number of free pages falling * below the lower bound for the allocation class? */ - m_ret = NULL; -again: + for (;;) { #if VM_NRESERVLEVEL > 0 - /* - * Can we allocate the pages from a reservation? - */ - if (vm_object_reserv(object) && - (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req, - mpred, npages, low, high, alignment, boundary)) != NULL) { - goto found; - } -#endif - vmd = VM_DOMAIN(domain); - if (vm_domain_allocate(vmd, req, npages)) { /* - * allocate them from the free page queues. + * Can we allocate the pages from a reservation? */ - vm_domain_free_lock(vmd); - m_ret = vm_phys_alloc_contig(domain, npages, low, high, - alignment, boundary); - vm_domain_free_unlock(vmd); - if (m_ret == NULL) { - vm_domain_freecnt_inc(vmd, npages); -#if VM_NRESERVLEVEL > 0 - if ((req & VM_ALLOC_NORECLAIM) == 0 && - vm_reserv_reclaim_contig(domain, npages, low, - high, alignment, boundary)) - goto again; -#endif + if (vm_object_reserv(object) && + (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req, + mpred, npages, low, high, alignment, boundary)) != NULL) { + break; } - } - if (m_ret == NULL) { - if (vm_domain_alloc_fail(vmd, object, req)) - goto again; - return (NULL); - } -#if VM_NRESERVLEVEL > 0 -found: #endif + if ((m_ret = vm_page_find_contig_domain(domain, req, npages, + low, high, alignment, boundary)) != NULL) + break; + if (!vm_domain_alloc_fail(VM_DOMAIN(domain), object, req)) + return (NULL); + } for (m = m_ret; m < &m_ret[npages]; m++) { vm_page_dequeue(m); vm_page_alloc_check(m); @@ -2462,7 +2475,6 @@ vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { - struct vm_domain *vmd; vm_page_t m, m_ret; u_int flags; @@ -2477,31 +2489,10 @@ ("invalid request %#x", req)); KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); - m_ret = NULL; -again: - vmd = VM_DOMAIN(domain); - if (vm_domain_allocate(vmd, req, npages)) { - /* - * allocate them from the free page queues. - */ - vm_domain_free_lock(vmd); - m_ret = vm_phys_alloc_contig(domain, npages, low, high, - alignment, boundary); - vm_domain_free_unlock(vmd); - if (m_ret == NULL) { - vm_domain_freecnt_inc(vmd, npages); -#if VM_NRESERVLEVEL > 0 - if ((req & VM_ALLOC_NORECLAIM) == 0 && - vm_reserv_reclaim_contig(domain, npages, low, - high, alignment, boundary)) - goto again; -#endif - } - } - if (m_ret == NULL) { - if (vm_domain_alloc_fail(vmd, NULL, req)) - goto again; - return (NULL); + while ((m_ret = vm_page_find_contig_domain(domain, req, npages, + low, high, alignment, boundary)) == NULL) { + if (!vm_domain_alloc_fail(VM_DOMAIN(domain), NULL, req)) + return (NULL); } /*