Index: amd64/amd64/pmap.c =================================================================== --- amd64/amd64/pmap.c +++ amd64/amd64/pmap.c @@ -2414,9 +2414,8 @@ /* * allocate the page directory page */ - while ((pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) - VM_WAIT; + pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | + VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK); pml4phys = VM_PAGE_TO_PHYS(pml4pg); pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(pml4phys); Index: amd64/amd64/uma_machdep.c =================================================================== --- amd64/amd64/uma_machdep.c +++ amd64/amd64/uma_machdep.c @@ -51,16 +51,9 @@ *flags = UMA_SLAB_PRIV; pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED; - for (;;) { - m = vm_page_alloc(NULL, 0, pflags); - if (m == NULL) { - if (wait & M_NOWAIT) - return (NULL); - else - VM_WAIT; - } else - break; - } + m = vm_page_alloc(NULL, 0, pflags); + if (m == NULL) + return (NULL); pa = m->phys_addr; if ((wait & M_NODUMP) == 0) dump_add_page(pa); Index: arm64/arm64/uma_machdep.c =================================================================== --- arm64/arm64/uma_machdep.c +++ arm64/arm64/uma_machdep.c @@ -51,15 +51,9 @@ *flags = UMA_SLAB_PRIV; pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED; - for (;;) { - m = vm_page_alloc(NULL, 0, pflags); - if (m == NULL) { - if (wait & M_NOWAIT) - return (NULL); - else - VM_WAIT; - } else - break; + m = vm_page_alloc(NULL, 0, pflags); + if (m == NULL) { + return (NULL); } pa = m->phys_addr; if ((wait & M_NODUMP) == 0) Index: fs/tmpfs/tmpfs_subr.c =================================================================== --- fs/tmpfs/tmpfs_subr.c +++ fs/tmpfs/tmpfs_subr.c @@ -1407,13 +1407,10 @@ goto retry; MPASS(m->valid == VM_PAGE_BITS_ALL); } else if (vm_pager_has_page(uobj, idx, NULL, NULL)) { - m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL); - if (m == NULL) { - VM_OBJECT_WUNLOCK(uobj); - VM_WAIT; - VM_OBJECT_WLOCK(uobj); + m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL | + VM_ALLOC_WAITFAIL); + if (m == NULL) goto retry; - } rv = vm_pager_get_pages(uobj, &m, 1, NULL, NULL); vm_page_lock(m); Index: kern/uipc_shm.c =================================================================== --- kern/uipc_shm.c +++ kern/uipc_shm.c @@ -454,13 +454,10 @@ if (vm_page_sleep_if_busy(m, "shmtrc")) goto retry; } else if (vm_pager_has_page(object, idx, NULL, NULL)) { - m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL); - if (m == NULL) { - VM_OBJECT_WUNLOCK(object); - VM_WAIT; - VM_OBJECT_WLOCK(object); + m = vm_page_alloc(object, idx, + VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL); + if (m == NULL) goto retry; - } rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); vm_page_lock(m); Index: kern/vfs_bio.c =================================================================== --- kern/vfs_bio.c +++ kern/vfs_bio.c @@ -4515,7 +4515,6 @@ index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; for (pg = from; pg < to; pg += PAGE_SIZE, index++) { -tryagain: /* * note: must allocate system pages since blocking here * could interfere with paging I/O, no matter which @@ -4522,11 +4521,8 @@ * process we are. */ p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT)); - if (p == NULL) { - VM_WAIT; - goto tryagain; - } + VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT) | + VM_ALLOC_WAITOK); pmap_qenter(pg, &p, 1); bp->b_pages[index] = p; } Index: mips/mips/uma_machdep.c =================================================================== --- mips/mips/uma_machdep.c +++ mips/mips/uma_machdep.c @@ -51,6 +51,10 @@ *flags = UMA_SLAB_PRIV; pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; +#ifndef __mips_n64 + pflags &= ~VM_PAGE_ALLOC_WAITOK | VM_PAGE_ALLOC_WAITFAIL; + pflags |= VM_PAGE_ALLOC_NOWAIT; +#endif for (;;) { m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, pflags); Index: powerpc/aim/mmu_oea64.c =================================================================== --- powerpc/aim/mmu_oea64.c +++ powerpc/aim/mmu_oea64.c @@ -1519,15 +1519,10 @@ needed_lock = !PMAP_LOCKED(kernel_pmap); pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; - for (;;) { - m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); - if (m == NULL) { - if (wait & M_NOWAIT) - return (NULL); - VM_WAIT; - } else - break; - } + m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); + if (m == NULL) { + return (NULL); + } va = VM_PAGE_TO_PHYS(m); Index: powerpc/aim/slb.c =================================================================== --- powerpc/aim/slb.c +++ powerpc/aim/slb.c @@ -490,18 +490,11 @@ *flags = UMA_SLAB_PRIV; pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED; + m = vm_page_alloc_contig(NULL, 0, pflags, 1, 0, realmax, + PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT); + if (m == NULL) + return (NULL); - for (;;) { - m = vm_page_alloc_contig(NULL, 0, pflags, 1, 0, realmax, - PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT); - if (m == NULL) { - if (wait & M_NOWAIT) - return (NULL); - VM_WAIT; - } else - break; - } - va = (void *) VM_PAGE_TO_PHYS(m); if (!hw_direct_map) Index: powerpc/powerpc/uma_machdep.c =================================================================== --- powerpc/powerpc/uma_machdep.c +++ powerpc/powerpc/uma_machdep.c @@ -61,14 +61,9 @@ *flags = UMA_SLAB_PRIV; pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; - for (;;) { - m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); - if (m == NULL) { - if (wait & M_NOWAIT) - return (NULL); - VM_WAIT; - } else - break; + m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); + if (m == NULL) { + return (NULL); } pa = VM_PAGE_TO_PHYS(m); Index: sparc64/sparc64/vm_machdep.c =================================================================== --- sparc64/sparc64/vm_machdep.c +++ sparc64/sparc64/vm_machdep.c @@ -402,15 +402,9 @@ *flags = UMA_SLAB_PRIV; pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; - for (;;) { - m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); - if (m == NULL) { - if (wait & M_NOWAIT) - return (NULL); - else - VM_WAIT; - } else - break; + m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); + if (m == NULL) { + return (NULL); } pa = VM_PAGE_TO_PHYS(m); Index: sys/smp.h =================================================================== --- sys/smp.h +++ sys/smp.h @@ -36,7 +36,7 @@ /* NUMA node. */ TOPO_TYPE_NODE, /* Other logical or physical grouping of PUs. */ - /* E.g. PUs on the same dye, or PUs sharing an FPU. */ + /* E.g. PUs on the same die, or PUs sharing an FPU. */ TOPO_TYPE_GROUP, /* The whole system. */ TOPO_TYPE_SYSTEM Index: vm/phys_pager.c =================================================================== --- vm/phys_pager.c +++ vm/phys_pager.c @@ -209,13 +209,10 @@ if (m == NULL) { ahead = MIN(end - i, PHYSALLOC); m = vm_page_alloc(object, i, VM_ALLOC_NORMAL | - VM_ALLOC_ZERO | VM_ALLOC_COUNT(ahead)); - if (m == NULL) { - VM_OBJECT_WUNLOCK(object); - VM_WAIT; - VM_OBJECT_WLOCK(object); + VM_ALLOC_ZERO | VM_ALLOC_WAITFAIL | + VM_ALLOC_COUNT(ahead)); + if (m == NULL) goto retry; - } if ((m->flags & PG_ZERO) == 0) pmap_zero_page(m); m->valid = VM_PAGE_BITS_ALL; Index: vm/swap_pager.c =================================================================== --- vm/swap_pager.c +++ vm/swap_pager.c @@ -1820,7 +1820,7 @@ vm_pageout_oom(VM_OOM_SWAPZ); pause("swzonxb", 10); } else - VM_WAIT; + uma_zwait(swblk_zone); VM_OBJECT_WLOCK(object); sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, rdpi); @@ -1850,7 +1850,7 @@ vm_pageout_oom(VM_OOM_SWAPZ); pause("swzonxp", 10); } else - VM_WAIT; + uma_zwait(swpctrie_zone); VM_OBJECT_WLOCK(object); sb1 = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, rdpi); Index: vm/uma.h =================================================================== --- vm/uma.h +++ vm/uma.h @@ -366,6 +366,11 @@ } /* + * Wait until the specified zone can allocate an item. + */ +void uma_zwait(uma_zone_t zone); + +/* * XXX The rest of the prototypes in this header are h0h0 magic for the VM. * If you think you need to use it for a normal zone you're probably incorrect. */ Index: vm/uma_core.c =================================================================== --- vm/uma_core.c +++ vm/uma_core.c @@ -1109,7 +1109,8 @@ npages = howmany(bytes, PAGE_SIZE); while (npages > 0) { p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | - VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); + VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | + (wait & M_WAITOK) ? VM_ALLOC_WAITOK : VM_ALLOC_NOWAIT); if (p != NULL) { /* * Since the page does not belong to an object, its @@ -1119,11 +1120,6 @@ npages--; continue; } - if (wait & M_WAITOK) { - VM_WAIT; - continue; - } - /* * Page allocation failed, free intermediate pages and * exit. @@ -2026,6 +2022,15 @@ sx_sunlock(&uma_drain_lock); } +void +uma_zwait(uma_zone_t zone) +{ + void *item; + + item = uma_zalloc_arg(zone, NULL, M_WAITOK); + uma_zfree(zone, item); +} + /* See uma.h */ void * uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) Index: vm/vm_fault.c =================================================================== --- vm/vm_fault.c +++ vm/vm_fault.c @@ -1669,6 +1669,10 @@ dst_m = vm_page_alloc(dst_object, (src_object == dst_object ? src_pindex : 0) + dst_pindex, VM_ALLOC_NORMAL); + /* + * XXX can we busy/hold src_m to permit dropping the + * object lock before allocation? + */ if (dst_m == NULL) { VM_OBJECT_WUNLOCK(dst_object); VM_OBJECT_RUNLOCK(object); Index: vm/vm_kern.c =================================================================== --- vm/vm_kern.c +++ vm/vm_kern.c @@ -172,6 +172,8 @@ return (0); offset = addr - VM_MIN_KERNEL_ADDRESS; pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; + pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); + pflags |= VM_ALLOC_NOWAIT; VM_OBJECT_WLOCK(object); for (i = 0; i < size; i += PAGE_SIZE) { tries = 0; @@ -227,6 +229,8 @@ return (0); offset = addr - VM_MIN_KERNEL_ADDRESS; pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; + pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); + pflags |= VM_ALLOC_NOWAIT; npages = atop(size); VM_OBJECT_WLOCK(object); tries = 0; @@ -338,10 +342,13 @@ offset = addr - VM_MIN_KERNEL_ADDRESS; pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; + pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); + if (flags & M_WAITOK) + pflags |= VM_ALLOC_WAITFAIL; i = 0; + VM_OBJECT_WLOCK(object); retry: - VM_OBJECT_WLOCK(object); mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i)); for (; i < size; i += PAGE_SIZE, mpred = m) { m = vm_page_alloc_after(object, atop(offset + i), pflags, @@ -353,11 +360,9 @@ * aren't on any queues. */ if (m == NULL) { + if ((flags & M_NOWAIT) == 0) + goto retry; VM_OBJECT_WUNLOCK(object); - if ((flags & M_NOWAIT) == 0) { - VM_WAIT; - goto retry; - } kmem_unback(object, addr, i); return (KERN_NO_SPACE); } Index: vm/vm_object.c =================================================================== --- vm/vm_object.c +++ vm/vm_object.c @@ -1471,7 +1471,7 @@ if (vm_page_rename(m, new_object, idx)) { VM_OBJECT_WUNLOCK(new_object); VM_OBJECT_WUNLOCK(orig_object); - VM_WAIT; + vm_radix_wait(); VM_OBJECT_WLOCK(orig_object); VM_OBJECT_WLOCK(new_object); goto retry; @@ -1533,8 +1533,9 @@ vm_page_lock(p); VM_OBJECT_WUNLOCK(object); VM_OBJECT_WUNLOCK(backing_object); + /* The page is only NULL when rename fails. */ if (p == NULL) - VM_WAIT; + vm_radix_wait(); else vm_page_busy_sleep(p, "vmocol", false); VM_OBJECT_WLOCK(object); Index: vm/vm_page.h =================================================================== --- vm/vm_page.h +++ vm/vm_page.h @@ -415,6 +415,8 @@ #define VM_ALLOC_INTERRUPT 1 #define VM_ALLOC_SYSTEM 2 #define VM_ALLOC_CLASS_MASK 3 +#define VM_ALLOC_WAITOK 0x0008 /* (acf) Sleep and retry */ +#define VM_ALLOC_WAITFAIL 0x0010 /* (acf) Sleep and return error */ #define VM_ALLOC_WIRED 0x0020 /* (acfgp) Allocate a wired page */ #define VM_ALLOC_ZERO 0x0040 /* (acfgp) Allocate a prezeroed page */ #define VM_ALLOC_NOOBJ 0x0100 /* (acg) No associated object */ @@ -422,7 +424,7 @@ #define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */ #define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */ #define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */ -#define VM_ALLOC_NOWAIT 0x8000 /* (gp) Do not sleep */ +#define VM_ALLOC_NOWAIT 0x8000 /* (acfgp) Do not sleep */ #define VM_ALLOC_COUNT_SHIFT 16 #define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT) @@ -441,6 +443,10 @@ pflags |= VM_ALLOC_ZERO; if ((malloc_flags & M_NODUMP) != 0) pflags |= VM_ALLOC_NODUMP; + if ((malloc_flags & M_NOWAIT)) + pflags |= VM_ALLOC_NOWAIT; + if ((malloc_flags & M_WAITOK)) + pflags |= VM_ALLOC_WAITOK; return (pflags); } #endif Index: vm/vm_page.c =================================================================== --- vm/vm_page.c +++ vm/vm_page.c @@ -172,6 +172,7 @@ vm_page_t mpred); static int vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run, vm_paddr_t high); +static int vm_page_alloc_fail(vm_object_t object, int req); SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL); @@ -1595,6 +1596,8 @@ ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), ("inconsistent object(%p)/req(%x)", object, req)); + KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0, + ("Can't sleep and retry object insertion.")); KASSERT(mpred == NULL || mpred->pindex < pindex, ("mpred %p doesn't precede pindex 0x%jx", mpred, (uintmax_t)pindex)); @@ -1613,6 +1616,7 @@ * Allocate a page if the number of free pages exceeds the minimum * for the request class. */ +again: mtx_lock(&vm_page_queue_free_mtx); if (vm_cnt.v_free_count > vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && @@ -1645,10 +1649,8 @@ /* * Not allocatable, give up. */ - mtx_unlock(&vm_page_queue_free_mtx); - atomic_add_int(&vm_pageout_deficit, - max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); - pagedaemon_wakeup(); + if (vm_page_alloc_fail(object, req)) + goto again; return (NULL); } @@ -1700,6 +1702,11 @@ m->busy_lock = VPB_UNBUSIED; /* Don't change PG_ZERO. */ vm_page_free_toq(m); + if (req & VM_ALLOC_WAITFAIL) { + VM_OBJECT_WUNLOCK(object); + vm_radix_wait(); + VM_OBJECT_WLOCK(object); + } return (NULL); } @@ -1777,6 +1784,8 @@ (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), ("vm_page_alloc_contig: inconsistent object(%p)/req(%x)", object, req)); + KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0, + ("Can't sleep and retry object insertion.")); if (object != NULL) { VM_OBJECT_ASSERT_WLOCKED(object); KASSERT((object->flags & OBJ_FICTITIOUS) == 0, @@ -1802,6 +1811,7 @@ * Can we allocate the pages without the number of free pages falling * below the lower bound for the allocation class? */ +again: mtx_lock(&vm_page_queue_free_mtx); if (vm_cnt.v_free_count >= npages + vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && @@ -1823,9 +1833,8 @@ m_ret = vm_phys_alloc_contig(npages, low, high, alignment, boundary); } else { - mtx_unlock(&vm_page_queue_free_mtx); - atomic_add_int(&vm_pageout_deficit, npages); - pagedaemon_wakeup(); + if (vm_page_alloc_fail(object, req)) + goto again; return (NULL); } if (m_ret != NULL) @@ -1891,6 +1900,11 @@ /* Don't change PG_ZERO. */ vm_page_free_toq(m); } + if (req & VM_ALLOC_WAITFAIL) { + VM_OBJECT_WUNLOCK(object); + vm_radix_wait(); + VM_OBJECT_WLOCK(object); + } return (NULL); } mpred = m; @@ -1963,18 +1977,17 @@ /* * Do not allocate reserved pages unless the req has asked for it. */ +again: mtx_lock(&vm_page_queue_free_mtx); if (vm_cnt.v_free_count > vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && - vm_cnt.v_free_count > 0)) + vm_cnt.v_free_count > 0)) { m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0); - else { - mtx_unlock(&vm_page_queue_free_mtx); - atomic_add_int(&vm_pageout_deficit, - max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); - pagedaemon_wakeup(); + } else { + if (vm_page_alloc_fail(NULL, req)) + goto again; return (NULL); } if (m == NULL) { @@ -2533,11 +2546,11 @@ * Sleep until free pages are available for allocation. * - Called in various places before memory allocations. */ -void -vm_wait(void) +static void +_vm_wait(void) { - mtx_lock(&vm_page_queue_free_mtx); + mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); if (curproc == pageproc) { vm_pageout_pages_needed = 1; msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx, @@ -2555,7 +2568,46 @@ } } +void +vm_wait(void) +{ + mtx_lock(&vm_page_queue_free_mtx); + _vm_wait(); +} + /* + * vm_page_alloc_fail: + * + * Called when a page allocation function fails. Informs the + * pagedaemon and performs the requested wait. Requires the + * page_queue_free and object lock on entry. Returns with the + * object lock held and free lock released. Returns an error when + * retry is necessary. + * + */ +static int +vm_page_alloc_fail(vm_object_t object, int req) +{ + + mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); + + atomic_add_int(&vm_pageout_deficit, + max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); + pagedaemon_wakeup(); + if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) { + if (object != NULL) + VM_OBJECT_WUNLOCK(object); + _vm_wait(); + if (object != NULL) + VM_OBJECT_WLOCK(object); + if (req & VM_ALLOC_WAITOK) + return (EAGAIN); + } else + mtx_unlock(&vm_page_queue_free_mtx); + return (0); +} + +/* * vm_waitpfault: (also see VM_WAITPFAULT macro) * * Sleep until free pages are available for allocation. @@ -3179,11 +3231,15 @@ { vm_page_t m; int sleep; + int pflags; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || (allocflags & VM_ALLOC_IGN_SBUSY) != 0, ("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); + pflags = allocflags & ~(VM_ALLOC_NOWAIT|VM_ALLOC_WAITOK|VM_ALLOC_WAITFAIL); + if ((allocflags & VM_ALLOC_NOWAIT) == 0) + pflags |= VM_ALLOC_WAITFAIL; retrylookup: if ((m = vm_page_lookup(object, pindex)) != NULL) { sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ? @@ -3217,13 +3273,10 @@ return (m); } } - m = vm_page_alloc(object, pindex, allocflags); + m = vm_page_alloc(object, pindex, pflags); if (m == NULL) { if ((allocflags & VM_ALLOC_NOWAIT) != 0) return (NULL); - VM_OBJECT_WUNLOCK(object); - VM_WAIT; - VM_OBJECT_WLOCK(object); goto retrylookup; } if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) @@ -3262,6 +3315,7 @@ vm_page_t *ma, int count) { vm_page_t m, mpred; + int pflags; int i; bool sleep; @@ -3276,6 +3330,10 @@ ("vm_page_grab_pages: VM_ALLOC_SBUSY/IGN_SBUSY mismatch")); if (count == 0) return (0); + pflags = allocflags & ~(VM_ALLOC_NOWAIT|VM_ALLOC_WAITOK| + VM_ALLOC_WAITFAIL|VM_ALLOC_IGN_SBUSY); + if ((allocflags & VM_ALLOC_NOWAIT) == 0) + pflags |= VM_ALLOC_WAITFAIL; i = 0; retrylookup: m = vm_radix_lookup_le(&object->rtree, pindex + i); @@ -3316,14 +3374,10 @@ vm_page_sbusy(m); } else { m = vm_page_alloc_after(object, pindex + i, - (allocflags & ~VM_ALLOC_IGN_SBUSY) | - VM_ALLOC_COUNT(count - i), mpred); + pflags | VM_ALLOC_COUNT(count - i), mpred); if (m == NULL) { if ((allocflags & VM_ALLOC_NOWAIT) != 0) break; - VM_OBJECT_WUNLOCK(object); - VM_WAIT; - VM_OBJECT_WLOCK(object); goto retrylookup; } } Index: vm/vm_radix.h =================================================================== --- vm/vm_radix.h +++ vm/vm_radix.h @@ -36,6 +36,7 @@ #ifdef _KERNEL int vm_radix_insert(struct vm_radix *rtree, vm_page_t page); +void vm_radix_wait(void); boolean_t vm_radix_is_singleton(struct vm_radix *rtree); vm_page_t vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index); vm_page_t vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index); Index: vm/vm_radix.c =================================================================== --- vm/vm_radix.c +++ vm/vm_radix.c @@ -775,6 +775,12 @@ panic("%s: original replacing page not found", __func__); } +void +vm_radix_wait(void) +{ + uma_zwait(vm_radix_node_zone); +} + #ifdef DDB /* * Show details about the given radix node. Index: x86/iommu/intel_utils.c =================================================================== --- x86/iommu/intel_utils.c +++ x86/iommu/intel_utils.c @@ -270,7 +270,9 @@ break; } m = vm_page_alloc_contig(obj, idx, VM_ALLOC_NOBUSY | - VM_ALLOC_SYSTEM | VM_ALLOC_NODUMP | zeroed, 1, 0, + VM_ALLOC_SYSTEM | VM_ALLOC_NODUMP | zeroed | + (flags & DMAR_PGF_WAITOK) ? + VM_ALLOC_WAITFAIL : VM_ALLOC_NOWAIT, 1, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WUNLOCK(obj); @@ -282,11 +284,6 @@ } if ((flags & DMAR_PGF_WAITOK) == 0) break; - if ((flags & DMAR_PGF_OBJL) != 0) - VM_OBJECT_WUNLOCK(obj); - VM_WAIT; - if ((flags & DMAR_PGF_OBJL) != 0) - VM_OBJECT_WLOCK(obj); } return (m); }