diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -162,6 +162,22 @@ static uma_zone_t fakepg_zone; +#if VM_NRESERVLEVEL > 0 + +/* + * This structure is used to separate NOFREE pages on systems + * using superpage reservations, with the goal of reducing long-term + * memory fragmentation caused by NOFREE pages. + */ +static struct vm_nofreeq { + struct mtx lock; + vm_page_t ma; + int offs; +} __aligned(CACHE_LINE_SIZE) nofreeqs[MAXMEMDOM]; +#define VM_NOFREEQ_LOCK(nqp) mtx_lock(&(nqp)->lock) +#define VM_NOFREEQ_UNLOCK(nqp) mtx_unlock(&(nqp)->lock) +#endif + static void vm_page_alloc_check(vm_page_t m); static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked); @@ -184,6 +200,7 @@ static int vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags); static void vm_page_zone_release(void *arg, void **store, int cnt); +static vm_page_t vm_page_alloc_nofree_domain(int domain, int req); SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL); @@ -865,6 +882,10 @@ * Initialize the reservation management system. */ vm_reserv_init(); + + /* Initialize locks for the NOFREE page queues. */ + for (int i = 0; i < vm_ndomains; i++) + mtx_init(&nofreeqs[i].lock, "vmnfq", NULL, MTX_DEF); #endif return (vaddr); @@ -2100,6 +2121,12 @@ return (NULL); again: #if VM_NRESERVLEVEL > 0 + if ((req & VM_ALLOC_NOFREE) != 0) { + m = vm_page_alloc_nofree_domain(domain, req); + if (m != NULL) + goto found; + } + /* * Can we allocate the page from a reservation? */ @@ -2431,6 +2458,13 @@ ((req & VM_ALLOC_NOFREE) != 0 ? PG_NOFREE : 0); vmd = VM_DOMAIN(domain); again: +#if VM_NRESERVLEVEL > 0 + if ((req & VM_ALLOC_NOFREE) != 0) { + m = vm_page_alloc_nofree_domain(domain, req); + if (m != NULL) + goto found; + } +#endif if (freelist == VM_NFREELIST && vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone != NULL) { m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone, @@ -2513,6 +2547,52 @@ return (_vm_page_alloc_noobj_domain(domain, freelist, req)); } +/* + * Allocate a single NOFREE page. + * + * This routine hands out NOFREE pages from superpage-sized + * physical memory blocks in order to reduce memory fragmentation. + * When a NOFREE superpage is used up the routine will try to fetch + * a new one from the freelists and discard the old one. + */ +static vm_page_t +vm_page_alloc_nofree_domain(int domain, int req) +{ +#if VM_NRESERVLEVEL > 0 + vm_page_t m; + struct vm_domain *vmd; + struct vm_nofreeq *nqp; + + KASSERT((req & VM_ALLOC_NOFREE) != 0, ("invalid request %#x", req)); + + nqp = &nofreeqs[domain]; + VM_NOFREEQ_LOCK(nqp); + if (nqp->offs >= (1 << VM_LEVEL_0_ORDER) || nqp->ma == NULL) { + vmd = VM_DOMAIN(domain); + if (!vm_domain_allocate(vmd, req, 1 << VM_LEVEL_0_ORDER)) { + VM_NOFREEQ_UNLOCK(nqp); + return (NULL); + } + vm_domain_free_lock(vmd); + nqp->ma = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, + VM_LEVEL_0_ORDER); + vm_domain_free_unlock(vmd); + if (nqp->ma == NULL) { + VM_NOFREEQ_UNLOCK(nqp); + vm_domain_freecnt_inc(vmd, 1 << VM_LEVEL_0_ORDER); + return (NULL); + } + nqp->offs = 0; + } + m = &nqp->ma[nqp->offs++]; + VM_NOFREEQ_UNLOCK(nqp); + + return (m); +#else + return (NULL); +#endif +} + vm_page_t vm_page_alloc_noobj(int req) {