Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -185,6 +185,10 @@ VM_ALLOC_NORMAL | VM_ALLOC_WIRED); } +#define VM_PGCACHE_DOMAIN_SHIFT 4 +#define VM_PGCACHE_POOL_MASK ((1 << VM_PGCACHE_DOMAIN_SHIFT) - 1) +CTASSERT(1 << VM_PGCACHE_DOMAIN_SHIFT > VM_NFREEPOOL); + /* * The cache page zone is initialized later since we need to be able to allocate * pages before UMA is fully initialized. @@ -193,21 +197,26 @@ vm_page_init_cache_zones(void *dummy __unused) { struct vm_domain *vmd; - int i; + int arg, domain, pool; + + for (domain = 0; domain < vm_ndomains; domain++) { + vmd = VM_DOMAIN(domain); - for (i = 0; i < vm_ndomains; i++) { - vmd = VM_DOMAIN(i); /* - * Don't allow the page cache to take up more than .25% of + * Don't allow the page cache to take up more than .50% of * memory. */ if (vmd->vmd_page_count / 400 < 256 * mp_ncpus) continue; - vmd->vmd_pgcache = uma_zcache_create("vm pgcache", - sizeof(struct vm_page), NULL, NULL, NULL, NULL, - vm_page_zone_import, vm_page_zone_release, vmd, - UMA_ZONE_MAXBUCKET | UMA_ZONE_VM); - (void )uma_zone_set_maxcache(vmd->vmd_pgcache, 0); + for (pool = 0; pool < VM_NFREEPOOL; pool++) { + arg = (domain << VM_PGCACHE_DOMAIN_SHIFT) | pool; + vmd->vmd_pgcache[pool] = uma_zcache_create("vm pgcache", + sizeof(struct vm_page), NULL, NULL, NULL, NULL, + vm_page_zone_import, vm_page_zone_release, + (void *)(uintptr_t)arg, + UMA_ZONE_MAXBUCKET | UMA_ZONE_VM); + (void)uma_zone_set_maxcache(vmd->vmd_pgcache[pool], 0); + } } } SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL); @@ -1788,7 +1797,7 @@ { struct vm_domain *vmd; vm_page_t m; - int flags; + int flags, pool; KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && @@ -1805,6 +1814,7 @@ flags = 0; m = NULL; + pool = (object != NULL) ? VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT; again: #if VM_NRESERVLEVEL > 0 /* @@ -1819,8 +1829,8 @@ } #endif vmd = VM_DOMAIN(domain); - if (object != NULL && vmd->vmd_pgcache != NULL) { - m = uma_zalloc(vmd->vmd_pgcache, M_NOWAIT); + if (vmd->vmd_pgcache[pool] != NULL) { + m = uma_zalloc(vmd->vmd_pgcache[pool], M_NOWAIT); if (m != NULL) { flags |= PG_PCPU_CACHE; goto found; @@ -1831,8 +1841,7 @@ * If not, allocate it from the free page queues. */ vm_domain_free_lock(vmd); - m = vm_phys_alloc_pages(domain, object != NULL ? - VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); + m = vm_phys_alloc_pages(domain, pool, 0); vm_domain_free_unlock(vmd); if (m == NULL) { vm_domain_freecnt_inc(vmd, 1); @@ -2223,16 +2232,16 @@ vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags) { struct vm_domain *vmd; - int i; + int i, pool; - vmd = arg; + pool = (int)(uintptr_t)arg & VM_PGCACHE_POOL_MASK; + vmd = VM_DOMAIN((int)(uintptr_t)arg >> VM_PGCACHE_DOMAIN_SHIFT); /* Only import if we can bring in a full bucket. */ if (cnt == 1 || !vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt)) return (0); domain = vmd->vmd_domain; vm_domain_free_lock(vmd); - i = vm_phys_alloc_npages(domain, VM_FREEPOOL_DEFAULT, cnt, - (vm_page_t *)store); + i = vm_phys_alloc_npages(domain, pool, cnt, (vm_page_t *)store); vm_domain_free_unlock(vmd); if (cnt != i) vm_domain_freecnt_inc(vmd, cnt - i); @@ -2247,7 +2256,7 @@ vm_page_t m; int i; - vmd = arg; + vmd = VM_DOMAIN((int)(uintptr_t)arg >> VM_PGCACHE_DOMAIN_SHIFT); vm_domain_free_lock(vmd); for (i = 0; i < cnt; i++) { m = (vm_page_t)store[i]; @@ -3480,13 +3489,15 @@ vm_page_free_toq(vm_page_t m) { struct vm_domain *vmd; + int pool; if (!vm_page_free_prep(m)) return; + pool = m->pool; vmd = vm_pagequeue_domain(m); - if ((m->flags & PG_PCPU_CACHE) != 0 && vmd->vmd_pgcache != NULL) { - uma_zfree(vmd->vmd_pgcache, m); + if ((m->flags & PG_PCPU_CACHE) != 0 && vmd->vmd_pgcache[pool] != NULL) { + uma_zfree(vmd->vmd_pgcache[pool], m); return; } vm_domain_free_lock(vmd); Index: sys/vm/vm_pagequeue.h =================================================================== --- sys/vm/vm_pagequeue.h +++ sys/vm/vm_pagequeue.h @@ -103,7 +103,7 @@ struct vm_pagequeue vmd_pagequeues[PQ_COUNT]; struct mtx_padalign vmd_free_mtx; struct mtx_padalign vmd_pageout_mtx; - uma_zone_t vmd_pgcache; /* (c) page free cache. */ + uma_zone_t vmd_pgcache[VM_NFREEPOOL]; /* (c) page free cache. */ struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */ struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */ u_int vmd_domain; /* (c) Domain number. */