diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -2186,9 +2186,6 @@ vm_page_t m_ret; vmd = VM_DOMAIN(domain); -#if VM_NRESERVLEVEL > 0 -again: -#endif if (!vm_domain_allocate(vmd, req, npages)) return (NULL); /* @@ -2200,18 +2197,19 @@ vm_domain_free_unlock(vmd); if (m_ret != NULL) return (m_ret); - vm_domain_freecnt_inc(vmd, npages); #if VM_NRESERVLEVEL > 0 /* - * Try to break a reservation to replenish free page queues - * in a way that allows the allocation to succeed. + * Try to break a reservation to allocate the pages. */ - if ((req & VM_ALLOC_NORECLAIM) == 0 && - vm_reserv_reclaim_contig(domain, npages, low, - high, alignment, boundary)) - goto again; + if ((req & VM_ALLOC_NORECLAIM) == 0) { + m_ret = vm_reserv_reclaim_contig(domain, npages, low, + high, alignment, boundary); + if (m_ret != NULL) + return (m_ret); + } #endif - return (m_ret); + vm_domain_freecnt_inc(vmd, npages); + return (NULL); } vm_page_t diff --git a/sys/vm/vm_reserv.h b/sys/vm/vm_reserv.h --- a/sys/vm/vm_reserv.h +++ b/sys/vm/vm_reserv.h @@ -59,7 +59,7 @@ bool vm_reserv_is_page_free(vm_page_t m); int vm_reserv_level(vm_page_t m); int vm_reserv_level_iffullpop(vm_page_t m); -bool vm_reserv_reclaim_contig(int domain, u_long npages, +vm_page_t vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); bool vm_reserv_reclaim_inactive(int domain); diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c --- a/sys/vm/vm_reserv.c +++ b/sys/vm/vm_reserv.c @@ -1312,12 +1312,13 @@ * contiguous physical memory. If a satisfactory reservation is found, it is * broken. Returns true if a reservation is broken and false otherwise. */ -bool +vm_page_t vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) { struct vm_reserv_queue *queue; vm_paddr_t pa, size; + vm_page_t m_ret; vm_reserv_t marker, rv, rvn; int hi, lo, posn, ppn_align, ppn_bound; @@ -1333,7 +1334,7 @@ * no boundary-constrained allocation is possible. */ if (size > boundary) - return (false); + return (NULL); marker = &vm_rvd[domain].marker; queue = &vm_rvd[domain].partpop; /* @@ -1386,18 +1387,22 @@ posn = vm_reserv_find_contig(rv, (int)npages, lo, hi, ppn_align, ppn_bound); if (posn >= 0) { - pa = VM_PAGE_TO_PHYS(&rv->pages[posn]); + vm_reserv_domain_scan_unlock(domain); + /* Allocate requested space */ + rv->popcnt += npages; + while (npages-- > 0) + popmap_set(rv->popmap, posn + npages); + vm_reserv_reclaim(rv); + vm_reserv_unlock(rv); + m_ret = &rv->pages[posn]; + pa = VM_PAGE_TO_PHYS(m_ret); KASSERT((pa & (alignment - 1)) == 0, ("%s: adjusted address does not align to %lx", __func__, alignment)); KASSERT(((pa ^ (pa + size - 1)) & -boundary) == 0, ("%s: adjusted address spans boundary to %jx", __func__, (uintmax_t)boundary)); - - vm_reserv_domain_scan_unlock(domain); - vm_reserv_reclaim(rv); - vm_reserv_unlock(rv); - return (true); + return (m_ret); } vm_reserv_domain_lock(domain); rvn = TAILQ_NEXT(rv, partpopq); @@ -1405,7 +1410,7 @@ } vm_reserv_domain_unlock(domain); vm_reserv_domain_scan_unlock(domain); - return (false); + return (NULL); } /*