diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -208,6 +208,22 @@ static struct timeout_task uma_timeout_task; #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ +#if defined(UMA_USE_DMAP) && !defined(UMA_MD_SMALL_ALLOC) && VM_NRESERVLEVEL > 0 + +/* + * This structure is used to separate NOFREE slabs on systems + * using superpage reservations, with the goal of reducing long-term + * memory fragmentation caused by NOFREE slabs. + */ +static struct uma_nofreeq { + struct mtx lock; + vm_page_t ma; + int offs; +} nofreeqs[MAXMEMDOM]; +#define UMA_NOFREEQ_LOCK(nqp) mtx_lock(&(nqp)->lock) +#define UMA_NOFREEQ_UNLOCK(nqp) mtx_unlock(&(nqp)->lock) +#endif + /* * This structure is passed as the zone ctor arg so that I don't have to create * a special allocation function just for zones. @@ -2081,6 +2097,60 @@ } #if defined(UMA_USE_DMAP) && !defined(UMA_MD_SMALL_ALLOC) +#if VM_NRESERVLEVEL > 0 + +/* + * Allocate a NOFREE slab. + * + * This routine hands out NOFREE slabs from superpage-sized + * physical memory blocks in order to reduce memory fragmentation. + * When a NOFREE superpage is used up the routine will try to fetch + * a new one from the freelists and discard the old one. + */ +static vm_page_t +uma_alloc_nofree(int domain, int req) +{ + vm_page_t m; + struct vm_domain *vmd; + struct uma_nofreeq *nqp; + + nqp = &nofreeqs[domain]; + UMA_NOFREEQ_LOCK(nqp); + if (nqp->offs >= (1 << VM_LEVEL_0_ORDER) || nqp->ma == NULL) { + vmd = VM_DOMAIN(domain); + if (!vm_domain_allocate(vmd, req, 1)) { + UMA_NOFREEQ_UNLOCK(nqp); + return (NULL); + } + vm_domain_free_lock(vmd); + nqp->ma = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, + VM_LEVEL_0_ORDER); + vm_domain_free_unlock(vmd); + if (nqp->ma == NULL) { + UMA_NOFREEQ_UNLOCK(nqp); + vm_domain_freecnt_inc(vmd, 1); + return (NULL); + } + nqp->offs = 0; + } + m = &nqp->ma[nqp->offs++]; + UMA_NOFREEQ_UNLOCK(nqp); + + vm_page_dequeue(m); + m->pindex = 0xdeadc0dedeadc0de; + m->flags = m->flags & PG_ZERO; + m->a.flags = 0; + m->oflags = VPO_UNMANAGED; + m->busy_lock = VPB_UNBUSIED; + vm_wire_add(1); + m->ref_count = 1; + if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) + pmap_zero_page(m); + + return (m); +} +#endif + void * uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, int wait) @@ -2088,12 +2158,23 @@ vm_page_t m; vm_paddr_t pa; void *va; + int req; + req = malloc2vm_flags(wait); *flags = UMA_SLAB_PRIV; - m = vm_page_alloc_noobj_domain(domain, - malloc2vm_flags(wait) | VM_ALLOC_WIRED); +#if VM_NRESERVLEVEL > 0 + if ((wait & M_NEVERFREED) != 0) { + m = uma_alloc_nofree(domain, req); + if (m != NULL) + goto found; + } +#endif + m = vm_page_alloc_noobj_domain(domain, req | VM_ALLOC_WIRED); if (m == NULL) return (NULL); +#if VM_NRESERVLEVEL > 0 +found: +#endif pa = m->phys_addr; if ((wait & M_NODUMP) == 0) dump_add_page(pa); @@ -3217,6 +3298,12 @@ vm_radix_reserve_kva(); #endif +#if defined(UMA_USE_DMAP) && !defined(UMA_MD_SMALL_ALLOC) && VM_NRESERVLEVEL > 0 + /* Initialize locks for the NOFREE page queues. */ + for (int i = 0; i < vm_ndomains; i++) + mtx_init(&nofreeqs[i].lock, "umanq", NULL, MTX_DEF); +#endif + booted = BOOT_KVA; zone_foreach_unlocked(zone_kva_available, NULL); bucket_enable();