Index: sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c =================================================================== --- sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c +++ sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c @@ -238,14 +238,14 @@ kmem_cache_reap_soon(kmem_cache_t *cache) { #ifndef KMEM_DEBUG - uma_zone_reclaim(cache->kc_zone, UMA_RECLAIM_DRAIN); + uma_zone_reclaim(cache->kc_zone, UMA_ANYDOMAIN, UMA_RECLAIM_DRAIN); #endif } void kmem_reap(void) { - uma_reclaim(UMA_RECLAIM_TRIM); + uma_reclaim(UMA_ANYDOMAIN, UMA_RECLAIM_TRIM); } #else void Index: sys/kern/kern_mbuf.c =================================================================== --- sys/kern/kern_mbuf.c +++ sys/kern/kern_mbuf.c @@ -716,7 +716,7 @@ * mbuf free. */ if (uma_zone_exhausted(zone_clust)) - uma_zone_reclaim(zone_pack, UMA_RECLAIM_DRAIN); + uma_zone_reclaim(zone_pack, UMA_ANYDOMAIN, UMA_RECLAIM_DRAIN); } /* @@ -1358,7 +1358,7 @@ * we might be able to loosen a few clusters up on the drain. */ if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) { - uma_zone_reclaim(zone_pack, UMA_RECLAIM_DRAIN); + uma_zone_reclaim(zone_pack, UMA_ANYDOMAIN, UMA_RECLAIM_DRAIN); uma_zalloc_arg(zone_clust, m, how); } MBUF_PROBE2(m__clget, m, how); Index: sys/kern/subr_vmem.c =================================================================== --- sys/kern/subr_vmem.c +++ sys/kern/subr_vmem.c @@ -586,7 +586,8 @@ qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; for (i = 0; i < qcache_idx_max; i++) - uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_RECLAIM_DRAIN); + uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_ANYDOMAIN, + UMA_RECLAIM_DRAIN); } #ifndef UMA_MD_SMALL_ALLOC Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c +++ sys/kern/vfs_subr.c @@ -1449,7 +1449,7 @@ done = vlrureclaim(reclaim_nc_src, trigger, target); mtx_unlock(&vnode_list_mtx); if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) - uma_reclaim(UMA_RECLAIM_DRAIN); + uma_reclaim(UMA_ANYDOMAIN, UMA_RECLAIM_DRAIN); if (done == 0) { if (force == 0 || force == 1) { force = 2; Index: sys/vm/uma.h =================================================================== --- sys/vm/uma.h +++ sys/vm/uma.h @@ -460,8 +460,8 @@ #define UMA_RECLAIM_DRAIN 1 /* release bucket cache */ #define UMA_RECLAIM_DRAIN_CPU 2 /* release bucket and per-CPU caches */ #define UMA_RECLAIM_TRIM 3 /* trim bucket cache to WSS */ -void uma_reclaim(int req); -void uma_zone_reclaim(uma_zone_t, int req); +void uma_reclaim(int domain, int req); +void uma_zone_reclaim(uma_zone_t, int domain, int req); /* * Sets the alignment mask to be used for all zones requesting cache Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c +++ sys/vm/uma_core.c @@ -275,7 +275,7 @@ static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int); static void cache_drain(uma_zone_t); static void bucket_drain(uma_zone_t, uma_bucket_t); -static void bucket_cache_reclaim(uma_zone_t zone, bool); +static void bucket_cache_reclaim(uma_zone_t zone, int domain, bool); static int keg_ctor(void *, int, void *, int); static void keg_dtor(void *, int, void *); static int zone_ctor(void *, int, void *, int); @@ -300,7 +300,7 @@ static void bucket_init(void); static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); -static void bucket_zone_drain(void); +static void bucket_zone_drain(int domain); static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int); static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item); @@ -521,12 +521,12 @@ } static void -bucket_zone_drain(void) +bucket_zone_drain(int domain) { struct uma_bucket_zone *ubz; for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) - uma_zone_reclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN); + uma_zone_reclaim(ubz->ubz_zone, domain, UMA_RECLAIM_DRAIN); } /* @@ -1037,7 +1037,6 @@ bucket_free(zone, bucket, NULL); } } - bucket_cache_reclaim(zone, true); } static void @@ -1144,7 +1143,7 @@ * estimated working set size. */ static void -bucket_cache_reclaim(uma_zone_t zone, bool drain) +bucket_cache_reclaim(uma_zone_t zone, int domain, bool drain) { uma_zone_domain_t zdom; uma_bucket_t bucket; @@ -1152,6 +1151,9 @@ int i; for (i = 0; i < vm_ndomains; i++) { + if (domain != UMA_ANYDOMAIN && i != domain) + continue; + /* * The cross bucket is partially filled and not part of * the item count. Reclaim it individually here. @@ -1251,7 +1253,7 @@ * Returns nothing. */ static void -keg_drain(uma_keg_t keg) +keg_drain(uma_keg_t keg, int domain) { struct slabhead freeslabs; uma_domain_t dom; @@ -1262,6 +1264,9 @@ return; for (i = 0; i < vm_ndomains; i++) { + if (domain != UMA_ANYDOMAIN && i != domain) + continue; + CTR4(KTR_UMA, "keg_drain %s(%p) domain %d free items: %u", keg->uk_name, keg, i, dom->ud_free_items); dom = &keg->uk_domain[i]; @@ -1288,7 +1293,7 @@ } static void -zone_reclaim(uma_zone_t zone, int waitok, bool drain) +zone_reclaim(uma_zone_t zone, int domain, int waitok, bool drain) { /* @@ -1305,7 +1310,7 @@ } zone->uz_flags |= UMA_ZFLAG_RECLAIMING; ZONE_UNLOCK(zone); - bucket_cache_reclaim(zone, drain); + bucket_cache_reclaim(zone, domain, drain); /* * The DRAINING flag protects us from being freed while @@ -1313,7 +1318,7 @@ * must be able to release and acquire the right lock for each keg. */ if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0) - keg_drain(zone->uz_keg); + keg_drain(zone->uz_keg, domain); ZONE_LOCK(zone); zone->uz_flags &= ~UMA_ZFLAG_RECLAIMING; wakeup(zone); @@ -1322,17 +1327,21 @@ } static void -zone_drain(uma_zone_t zone, void *unused) +zone_drain(uma_zone_t zone, void *arg) { + int domain; - zone_reclaim(zone, M_NOWAIT, true); + domain = *(int *)arg; + zone_reclaim(zone, domain, M_NOWAIT, true); } static void -zone_trim(uma_zone_t zone, void *unused) +zone_trim(uma_zone_t zone, void *arg) { + int domain; - zone_reclaim(zone, M_NOWAIT, false); + domain = *(int *)arg; + zone_reclaim(zone, domain, M_NOWAIT, false); } /* @@ -2532,7 +2541,7 @@ * released and then refilled before we * remove it... we dont care for now */ - zone_reclaim(zone, M_WAITOK, true); + zone_reclaim(zone, UMA_ANYDOMAIN, M_WAITOK, true); /* * We only destroy kegs from non secondary/non cache zones. */ @@ -4570,7 +4579,7 @@ /* See uma.h */ void -uma_reclaim(int req) +uma_reclaim(int domain, int req) { CTR0(KTR_UMA, "UMA: vm asked us to release pages!"); @@ -4579,14 +4588,14 @@ switch (req) { case UMA_RECLAIM_TRIM: - zone_foreach(zone_trim, NULL); + zone_foreach(zone_trim, &domain); break; case UMA_RECLAIM_DRAIN: case UMA_RECLAIM_DRAIN_CPU: - zone_foreach(zone_drain, NULL); + zone_foreach(zone_drain, &domain); if (req == UMA_RECLAIM_DRAIN_CPU) { pcpu_cache_drain_safe(NULL); - zone_foreach(zone_drain, NULL); + zone_foreach(zone_drain, &domain); } break; default: @@ -4598,9 +4607,9 @@ * we visit again so that we can free pages that are empty once other * zones are drained. We have to do the same for buckets. */ - zone_drain(slabzones[0], NULL); - zone_drain(slabzones[1], NULL); - bucket_zone_drain(); + zone_drain(slabzones[0], &domain); + zone_drain(slabzones[1], &domain); + bucket_zone_drain(domain); sx_xunlock(&uma_reclaim_lock); } @@ -4625,7 +4634,7 @@ hz); sx_xunlock(&uma_reclaim_lock); EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); - uma_reclaim(UMA_RECLAIM_DRAIN_CPU); + uma_reclaim(UMA_ANYDOMAIN, UMA_RECLAIM_DRAIN_CPU); atomic_store_int(&uma_reclaim_needed, 0); /* Don't fire more than once per-second. */ pause("umarclslp", hz); @@ -4634,19 +4643,19 @@ /* See uma.h */ void -uma_zone_reclaim(uma_zone_t zone, int req) +uma_zone_reclaim(uma_zone_t zone, int domain, int req) { switch (req) { case UMA_RECLAIM_TRIM: - zone_trim(zone, NULL); + zone_trim(zone, &domain); break; case UMA_RECLAIM_DRAIN: - zone_drain(zone, NULL); + zone_drain(zone, &domain); break; case UMA_RECLAIM_DRAIN_CPU: pcpu_cache_drain_safe(zone); - zone_drain(zone, NULL); + zone_drain(zone, &domain); break; default: panic("unhandled reclamation request %d", req); Index: sys/vm/vm_pageout.c =================================================================== --- sys/vm/vm_pageout.c +++ sys/vm/vm_pageout.c @@ -1972,7 +1972,7 @@ * We do this explicitly after the caches have been * drained above. */ - uma_reclaim(UMA_RECLAIM_TRIM); + uma_reclaim(UMA_ANYDOMAIN, UMA_RECLAIM_TRIM); ret = true; break; }