diff --git a/sys/arm/nvidia/drm2/tegra_bo.c b/sys/arm/nvidia/drm2/tegra_bo.c --- a/sys/arm/nvidia/drm2/tegra_bo.c +++ b/sys/arm/nvidia/drm2/tegra_bo.c @@ -95,7 +95,7 @@ vm_page_t **ret_page) { vm_page_t m; - int tries, i; + int err, i, tries; vm_paddr_t low, high, boundary; low = 0; @@ -107,9 +107,12 @@ low, high, alignment, boundary, memattr); if (m == NULL) { if (tries < 3) { - if (!vm_page_reclaim_contig(0, npages, low, high, - alignment, boundary)) + err = vm_page_reclaim_contig(0, npages, low, high, + alignment, boundary); + if (err == ENOMEM) vm_wait(NULL); + else if (err != 0) + return (ENOMEM); tries++; goto retry; } diff --git a/sys/compat/linuxkpi/common/src/linux_page.c b/sys/compat/linuxkpi/common/src/linux_page.c --- a/sys/compat/linuxkpi/common/src/linux_page.c +++ b/sys/compat/linuxkpi/common/src/linux_page.c @@ -100,6 +100,7 @@ linux_alloc_pages(gfp_t flags, unsigned int order) { struct page *page; + int err; if (PMAP_HAS_DMAP) { unsigned long npages = 1UL << order; @@ -119,10 +120,12 @@ PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); if (page == NULL) { if (flags & M_WAITOK) { - if (!vm_page_reclaim_contig(req, - npages, 0, pmax, PAGE_SIZE, 0)) { + err = vm_page_reclaim_contig(req, + npages, 0, pmax, PAGE_SIZE, 0); + if (err == ENOMEM) vm_wait(NULL); - } + else if (err != 0) + return (NULL); flags &= ~M_WAITOK; goto retry; } diff --git a/sys/dev/drm2/ttm/ttm_bo.c b/sys/dev/drm2/ttm/ttm_bo.c --- a/sys/dev/drm2/ttm/ttm_bo.c +++ b/sys/dev/drm2/ttm/ttm_bo.c @@ -1498,8 +1498,8 @@ VM_MAX_ADDRESS, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE); if (unlikely(glob->dummy_read_page == NULL)) { - if (tries < 1 && vm_page_reclaim_contig(0, 1, 0, - VM_MAX_ADDRESS, PAGE_SIZE, 0)) { + if (tries < 1 && (vm_page_reclaim_contig(0, 1, 0, + VM_MAX_ADDRESS, PAGE_SIZE, 0) == 0)) { tries++; goto retry; } diff --git a/sys/dev/drm2/ttm/ttm_page_alloc.c b/sys/dev/drm2/ttm/ttm_page_alloc.c --- a/sys/dev/drm2/ttm/ttm_page_alloc.c +++ b/sys/dev/drm2/ttm/ttm_page_alloc.c @@ -158,16 +158,19 @@ ttm_vm_page_alloc_dma32(int req, vm_memattr_t memattr) { vm_page_t p; - int tries; + int err, tries; for (tries = 0; ; tries++) { p = vm_page_alloc_noobj_contig(req, 1, 0, 0xffffffff, PAGE_SIZE, 0, memattr); if (p != NULL || tries > 2) return (p); - if (!vm_page_reclaim_contig(req, 1, 0, 0xffffffff, - PAGE_SIZE, 0)) + err = vm_page_reclaim_contig(req, 1, 0, 0xffffffff, + PAGE_SIZE, 0); + if (err == ENOMEM) vm_wait(NULL); + else if (err != 0) + return (NULL); } } diff --git a/sys/kern/uipc_ktls.c b/sys/kern/uipc_ktls.c --- a/sys/kern/uipc_ktls.c +++ b/sys/kern/uipc_ktls.c @@ -3160,8 +3160,9 @@ * backlogs of buffers to be encrypted, leading to * surges of traffic and potential NIC output drops. */ - if (!vm_page_reclaim_contig_domain_ext(domain, VM_ALLOC_NORMAL, - atop(ktls_maxlen), 0, ~0ul, PAGE_SIZE, 0, ktls_max_reclaim)) { + if (vm_page_reclaim_contig_domain_ext(domain, VM_ALLOC_NORMAL, + atop(ktls_maxlen), 0, ~0ul, PAGE_SIZE, 0, + ktls_max_reclaim) != 0) { vm_wait_domain(domain); } else { sc->reclaims += ktls_max_reclaim; diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c --- a/sys/kern/uipc_shm.c +++ b/sys/kern/uipc_shm.c @@ -877,8 +877,9 @@ } error = vm_page_reclaim_contig(aflags, pagesizes[psind] / PAGE_SIZE, 0, ~0, - pagesizes[psind], 0) ? 0 : - vm_wait_intr(object); + pagesizes[psind], 0); + if (error == ENOMEM) + error = vm_wait_intr(object); if (error != 0) { VM_OBJECT_WLOCK(object); return (error); diff --git a/sys/vm/vm_domainset.h b/sys/vm/vm_domainset.h --- a/sys/vm/vm_domainset.h +++ b/sys/vm/vm_domainset.h @@ -31,6 +31,7 @@ struct vm_domainset_iter { struct domainset *di_domain; unsigned int *di_iter; + domainset_t di_valid_mask; vm_pindex_t di_offset; int di_flags; uint16_t di_policy; @@ -47,6 +48,7 @@ struct domainset *, int *, int *); void vm_domainset_iter_policy_ref_init(struct vm_domainset_iter *, struct domainset_ref *, int *, int *); +void vm_domainset_iter_ignore(struct vm_domainset_iter *, int); int vm_wait_doms(const domainset_t *, int mflags); diff --git a/sys/vm/vm_domainset.c b/sys/vm/vm_domainset.c --- a/sys/vm/vm_domainset.c +++ b/sys/vm/vm_domainset.c @@ -68,6 +68,7 @@ di->di_domain = ds; di->di_iter = iter; di->di_policy = ds->ds_policy; + DOMAINSET_COPY(&ds->ds_mask, &di->di_valid_mask); if (di->di_policy == DOMAINSET_POLICY_INTERLEAVE) { #if VM_NRESERVLEVEL > 0 if (vm_object_reserv(obj)) { @@ -158,7 +159,7 @@ switch (di->di_policy) { case DOMAINSET_POLICY_FIRSTTOUCH: *domain = PCPU_GET(domain); - if (DOMAINSET_ISSET(*domain, &di->di_domain->ds_mask)) { + if (DOMAINSET_ISSET(*domain, &di->di_valid_mask)) { /* * Add an extra iteration because we will visit the * current domain a second time in the rr iterator. @@ -221,11 +222,14 @@ vm_domainset_iter_page(struct vm_domainset_iter *di, struct vm_object *obj, int *domain) { + if (DOMAINSET_EMPTY(&di->di_valid_mask)) + return (ENOMEM); /* If there are more domains to visit we run the iterator. */ while (--di->di_n != 0) { vm_domainset_iter_next(di, domain); - if (!di->di_minskip || !vm_page_count_min_domain(*domain)) + if (DOMAINSET_ISSET(*domain, &di->di_valid_mask) && + (!di->di_minskip || !vm_page_count_min_domain(*domain))) return (0); } @@ -243,7 +247,7 @@ /* Wait for one of the domains to accumulate some free pages. */ if (obj != NULL) VM_OBJECT_WUNLOCK(obj); - vm_wait_doms(&di->di_domain->ds_mask, 0); + vm_wait_doms(&di->di_valid_mask, 0); if (obj != NULL) VM_OBJECT_WLOCK(obj); if ((di->di_flags & VM_ALLOC_WAITFAIL) != 0) @@ -288,11 +292,14 @@ int vm_domainset_iter_policy(struct vm_domainset_iter *di, int *domain) { + if (DOMAINSET_EMPTY(&di->di_valid_mask)) + return (ENOMEM); /* If there are more domains to visit we run the iterator. */ while (--di->di_n != 0) { vm_domainset_iter_next(di, domain); - if (!di->di_minskip || !vm_page_count_min_domain(*domain)) + if (DOMAINSET_ISSET(*domain, &di->di_valid_mask) && + (!di->di_minskip || !vm_page_count_min_domain(*domain))) return (0); } @@ -308,7 +315,7 @@ return (ENOMEM); /* Wait for one of the domains to accumulate some free pages. */ - vm_wait_doms(&di->di_domain->ds_mask, 0); + vm_wait_doms(&di->di_valid_mask, 0); /* Restart the search. */ vm_domainset_iter_first(di, domain); @@ -316,6 +323,12 @@ return (0); } +void +vm_domainset_iter_ignore(struct vm_domainset_iter *di, int domain) +{ + DOMAINSET_CLR(domain, &di->di_valid_mask); +} + #else /* !NUMA */ int @@ -357,4 +370,10 @@ *domain = 0; } +void +vm_domainset_iter_ignore(struct vm_domainset_iter *di __unused, + int domain __unused) +{ +} + #endif /* NUMA */ diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -212,8 +212,8 @@ break; VM_OBJECT_WUNLOCK(object); - if (!vm_page_reclaim_contig_domain(domain, pflags, npages, - low, high, alignment, boundary) && wait) + if ((vm_page_reclaim_contig_domain(domain, pflags, npages, + low, high, alignment, boundary) == ENOMEM) && wait) vm_wait_domain(domain); VM_OBJECT_WLOCK(object); } @@ -286,8 +286,12 @@ vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr) { struct vm_domainset_iter di; + vm_page_t bounds[2]; void *addr; int domain; + int start_segind; + + start_segind = -1; vm_domainset_iter_policy_init(&di, ds, &domain, &flags); do { @@ -295,6 +299,12 @@ memattr); if (addr != NULL) break; + if (start_segind == -1) + start_segind = vm_phys_lookup_segind(low); + if (vm_phys_find_range(bounds, start_segind, domain, + atop(round_page(size)), low, high) == -1) { + vm_domainset_iter_ignore(&di, domain); + } } while (vm_domainset_iter_policy(&di, &domain) == 0); return (addr); @@ -370,8 +380,12 @@ vm_memattr_t memattr) { struct vm_domainset_iter di; + vm_page_t bounds[2]; void *addr; int domain; + int start_segind; + + start_segind = -1; vm_domainset_iter_policy_init(&di, ds, &domain, &flags); do { @@ -379,6 +393,12 @@ alignment, boundary, memattr); if (addr != NULL) break; + if (start_segind == -1) + start_segind = vm_phys_lookup_segind(low); + if (vm_phys_find_range(bounds, start_segind, domain, + atop(round_page(size)), low, high) == -1) { + vm_domainset_iter_ignore(&di, domain); + } } while (vm_domainset_iter_policy(&di, &domain) == 0); return (addr); diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -662,11 +662,11 @@ bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m); void vm_page_putfake(vm_page_t m); void vm_page_readahead_finish(vm_page_t m); -bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, +int vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); -bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages, +int vm_page_reclaim_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); -bool vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages, +int vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, int desired_runs); void vm_page_reference(vm_page_t m); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -2172,8 +2172,12 @@ vm_paddr_t boundary, vm_memattr_t memattr) { struct vm_domainset_iter di; + vm_page_t bounds[2]; vm_page_t m; int domain; + int start_segind; + + start_segind = -1; vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); do { @@ -2181,6 +2185,12 @@ npages, low, high, alignment, boundary, memattr); if (m != NULL) break; + if (start_segind == -1) + start_segind = vm_phys_lookup_segind(low); + if (vm_phys_find_range(bounds, start_segind, domain, + npages, low, high) == -1) { + vm_domainset_iter_ignore(&di, domain); + } } while (vm_domainset_iter_page(&di, object, &domain) == 0); return (m); @@ -3024,7 +3034,7 @@ * "npages" must be greater than zero. Both "alignment" and "boundary" * must be a power of two. */ -bool +int vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, int desired_runs) @@ -3032,14 +3042,15 @@ struct vm_domain *vmd; vm_page_t bounds[2], m_run, _m_runs[NRUNS], *m_runs; u_long count, minalign, reclaimed; - int error, i, min_reclaim, nruns, options, req_class, segind; - bool ret; + int error, i, min_reclaim, nruns, options, req_class; + int segind, start_segind; + int ret; KASSERT(npages > 0, ("npages is 0")); KASSERT(powerof2(alignment), ("alignment is not a power of 2")); KASSERT(powerof2(boundary), ("boundary is not a power of 2")); - ret = false; + ret = ENOMEM; /* * If the caller wants to reclaim multiple runs, try to allocate @@ -3079,6 +3090,13 @@ if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) req_class = VM_ALLOC_SYSTEM; + start_segind = vm_phys_lookup_segind(low); + if (vm_phys_find_range(bounds, start_segind, domain, npages, + low, high) == -1) { + ret = ERANGE; + goto done; + } + /* * Return if the number of free pages cannot satisfy the requested * allocation. @@ -3100,7 +3118,7 @@ * and restrictions, and record them in "m_runs". */ count = 0; - segind = vm_phys_lookup_segind(low); + segind = start_segind; while ((segind = vm_phys_find_range(bounds, segind, domain, npages, low, high)) != -1) { while ((m_run = vm_page_scan_contig(npages, bounds[0], @@ -3128,7 +3146,7 @@ if (error == 0) { reclaimed += npages; if (reclaimed >= min_reclaim) { - ret = true; + ret = 0; goto done; } } @@ -3143,7 +3161,8 @@ else if (options == VPSC_NOSUPER) options = VPSC_ANY; else if (options == VPSC_ANY) { - ret = reclaimed != 0; + if (reclaimed != 0) + ret = 0; goto done; } } @@ -3153,7 +3172,7 @@ return (ret); } -bool +int vm_page_reclaim_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) { @@ -3161,20 +3180,25 @@ alignment, boundary, 1)); } -bool +int vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) { struct vm_domainset_iter di; - int domain; - bool ret; + int domain, ret, status; + + ret = ERANGE; vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); do { - ret = vm_page_reclaim_contig_domain(domain, req, npages, low, + status = vm_page_reclaim_contig_domain(domain, req, npages, low, high, alignment, boundary); - if (ret) - break; + if (status == 0) + return (0); + if (status != ENOMEM) + vm_domainset_iter_ignore(&di, domain); + else + ret = ENOMEM; } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); return (ret);