Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c +++ sys/vm/uma_core.c @@ -1253,39 +1253,37 @@ static void keg_drain(uma_keg_t keg) { - struct slabhead freeslabs = { 0 }; + struct slabhead freeslabs; uma_domain_t dom; - uma_slab_t slab, tmp; + uma_slab_t slab; int i, n; - /* - * We don't want to take pages from statically allocated kegs at this - * time - */ if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) return; for (i = 0; i < vm_ndomains; i++) { CTR4(KTR_UMA, "keg_drain %s(%p) domain %d free items: %u", - keg->uk_name, keg, i, dom->ud_free); - n = 0; + keg->uk_name, keg, i, dom->ud_free_items); dom = &keg->uk_domain[i]; + LIST_INIT(&freeslabs); + KEG_LOCK(keg, i); - LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) { - if (keg->uk_flags & UMA_ZFLAG_HASH) - UMA_HASH_REMOVE(&keg->uk_hash, slab); - n++; - LIST_REMOVE(slab, us_link); - LIST_INSERT_HEAD(&freeslabs, slab, us_link); + if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0) { + LIST_FOREACH(slab, &dom->ud_free_slab, us_link) + if (keg->uk_flags & UMA_ZFLAG_HASH) + UMA_HASH_REMOVE(&keg->uk_hash, slab); } + n = dom->ud_free_slabs; + LIST_SWAP(&freeslabs, &dom->ud_free_slab, uma_slab, us_link); + dom->ud_free_slabs = 0; + dom->ud_free_items -= n * keg->uk_ipers; dom->ud_pages -= n * keg->uk_ppera; - dom->ud_free -= n * keg->uk_ipers; KEG_UNLOCK(keg, i); - } - while ((slab = LIST_FIRST(&freeslabs)) != NULL) { - LIST_REMOVE(slab, us_link); - keg_free_slab(keg, slab, keg->uk_ipers); + while ((slab = LIST_FIRST(&freeslabs)) != NULL) { + LIST_REMOVE(slab, us_link); + keg_free_slab(keg, slab, keg->uk_ipers); + } } } @@ -1453,7 +1451,7 @@ dom = &keg->uk_domain[domain]; LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); dom->ud_pages += keg->uk_ppera; - dom->ud_free += keg->uk_ipers; + dom->ud_free_items += keg->uk_ipers; return (slab); @@ -2205,7 +2203,7 @@ "pages", CTLFLAG_RD, &dom->ud_pages, 0, "Total pages currently allocated from VM"); SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, - "free", CTLFLAG_RD, &dom->ud_free, 0, + "free_items", CTLFLAG_RD, &dom->ud_free_items, 0, "items free in the slab layer"); } } else @@ -2493,7 +2491,7 @@ keg = (uma_keg_t)arg; free = pages = 0; for (i = 0; i < vm_ndomains; i++) { - free += keg->uk_domain[i].ud_free; + free += keg->uk_domain[i].ud_free_items; pages += keg->uk_domain[i].ud_pages; KEG_LOCK_FINI(keg, i); } @@ -3307,11 +3305,11 @@ start = domain; do { dom = &keg->uk_domain[domain]; - if (!LIST_EMPTY(&dom->ud_part_slab)) - return (LIST_FIRST(&dom->ud_part_slab)); - if (!LIST_EMPTY(&dom->ud_free_slab)) { - slab = LIST_FIRST(&dom->ud_free_slab); + if ((slab = LIST_FIRST(&dom->ud_part_slab)) != NULL) + return (slab); + if ((slab = LIST_FIRST(&dom->ud_free_slab)) != NULL) { LIST_REMOVE(slab, us_link); + dom->ud_free_slabs--; LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); return (slab); } @@ -3338,7 +3336,7 @@ KEG_LOCK(keg, domain); reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve; - if (keg->uk_domain[domain].ud_free <= reserve || + if (keg->uk_domain[domain].ud_free_items <= reserve || (slab = keg_first_slab(keg, domain, rr)) == NULL) { KEG_UNLOCK(keg, domain); return (NULL); @@ -3423,7 +3421,7 @@ BIT_CLR(keg->uk_ipers, freei, &slab->us_free); item = slab_item(slab, keg, freei); slab->us_freecount--; - dom->ud_free--; + dom->ud_free_items--; /* Move this slab to the full list */ if (slab->us_freecount == 0) { @@ -3459,7 +3457,7 @@ dom = &keg->uk_domain[slab->us_domain]; while (slab->us_freecount && i < max) { bucket[i++] = slab_alloc_item(keg, slab); - if (dom->ud_free <= keg->uk_reserve) + if (dom->ud_free_items <= keg->uk_reserve) break; #ifdef NUMA /* @@ -4161,9 +4159,10 @@ /* Do we need to remove from any lists? */ dom = &keg->uk_domain[slab->us_domain]; - if (slab->us_freecount+1 == keg->uk_ipers) { + if (slab->us_freecount + 1 == keg->uk_ipers) { LIST_REMOVE(slab, us_link); LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); + dom->ud_free_slabs++; } else if (slab->us_freecount == 0) { LIST_REMOVE(slab, us_link); LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); @@ -4175,7 +4174,7 @@ slab->us_freecount++; /* Keg statistics. */ - dom->ud_free++; + dom->ud_free_items++; } static void @@ -4559,6 +4558,7 @@ LIST_REMOVE(slab, us_link); LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); + dom->ud_free_slabs++; KEG_UNLOCK(keg, slab->us_domain); break; } @@ -4836,7 +4836,7 @@ LIST_FOREACH(kz, &uma_kegs, uk_link) { kfree = pages = 0; for (i = 0; i < vm_ndomains; i++) { - kfree += kz->uk_domain[i].ud_free; + kfree += kz->uk_domain[i].ud_free_items; pages += kz->uk_domain[i].ud_pages; } LIST_FOREACH(z, &kz->uk_zones, uz_link) { @@ -5140,7 +5140,7 @@ *cachefree += z->uz_domain[i].uzd_nitems; if (!((z->uz_flags & UMA_ZONE_SECONDARY) && (LIST_FIRST(&kz->uk_zones) != z))) - *cachefree += kz->uk_domain[i].ud_free; + *cachefree += kz->uk_domain[i].ud_free_items; } *used = *allocs - frees; return (((int64_t)*used + *cachefree) * kz->uk_size); Index: sys/vm/uma_int.h =================================================================== --- sys/vm/uma_int.h +++ sys/vm/uma_int.h @@ -325,7 +325,8 @@ struct slabhead ud_free_slab; /* completely unallocated slabs */ struct slabhead ud_full_slab; /* fully allocated slabs */ uint32_t ud_pages; /* Total page count */ - uint32_t ud_free; /* Count of items free in slabs */ + uint32_t ud_free_items; /* Count of items free in all slabs */ + uint32_t ud_free_slabs; /* Count of free slabs */ } __aligned(CACHE_LINE_SIZE); typedef struct uma_domain * uma_domain_t;