Changeset View
Standalone View
sys/vm/uma_core.c
Show First 20 Lines • Show All 512 Lines • ▼ Show 20 Lines | #endif | ||||
return (bucket); | return (bucket); | ||||
} | } | ||||
static void | static void | ||||
bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) | bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) | ||||
{ | { | ||||
struct uma_bucket_zone *ubz; | struct uma_bucket_zone *ubz; | ||||
if (bucket->ub_cnt != 0) | |||||
bucket_drain(zone, bucket); | |||||
KASSERT(bucket->ub_cnt == 0, | KASSERT(bucket->ub_cnt == 0, | ||||
("bucket_free: Freeing a non free bucket.")); | ("bucket_free: Freeing a non free bucket.")); | ||||
KASSERT(bucket->ub_seq == SMR_SEQ_INVALID, | KASSERT(bucket->ub_seq == SMR_SEQ_INVALID, | ||||
("bucket_free: Freeing an SMR bucket.")); | ("bucket_free: Freeing an SMR bucket.")); | ||||
if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) | if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) | ||||
udata = (void *)(uintptr_t)zone->uz_flags; | udata = (void *)(uintptr_t)zone->uz_flags; | ||||
ubz = bucket_zone_lookup(bucket->ub_entries); | ubz = bucket_zone_lookup(bucket->ub_entries); | ||||
uma_zfree_arg(ubz->ubz_zone, bucket, udata); | uma_zfree_arg(ubz->ubz_zone, bucket, udata); | ||||
} | } | ||||
static void | static void | ||||
bucket_zone_drain(void) | bucket_zone_drain(void) | ||||
{ | { | ||||
struct uma_bucket_zone *ubz; | struct uma_bucket_zone *ubz; | ||||
for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) | for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) | ||||
uma_zone_reclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN); | uma_zone_reclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN); | ||||
} | } | ||||
/* | /* | ||||
* Acquire the domain lock and record contention. | |||||
*/ | |||||
static uma_zone_domain_t | |||||
zone_domain_lock(uma_zone_t zone, int domain) | |||||
{ | |||||
uma_zone_domain_t zdom; | |||||
bool lockfail; | |||||
zdom = ZDOM_GET(zone, domain); | |||||
lockfail = false; | |||||
if (ZDOM_OWNED(zdom)) | |||||
lockfail = true; | |||||
markj: These three lines could be written as `lockfail = ZDOM_OWNED(zdom)`. | |||||
ZDOM_LOCK(zdom); | |||||
/* This is unsynchronized. The counter does not need to be precise. */ | |||||
if (lockfail && zone->uz_bucket_size < zone->uz_bucket_size_max) | |||||
zone->uz_bucket_size++; | |||||
markjUnsubmitted Not Done Inline ActionsThe lack of precision seems ok, but this race makes it possible to exceed bucket_size_max. In practice it'll probably only go over by 1, but it could be more if you're unlucky. We should have a comment explaining why this is acceptable (or consider sharding this field as well, or ...). markj: The lack of precision seems ok, but this race makes it possible to exceed bucket_size_max. In… | |||||
jeffAuthorUnsubmitted Done Inline ActionsI'd rather not shard it because it makes for some weird perf scenarios especially since round-robin may bounce between them. I can also load it once and store a calculated value so it never exceeds max. jeff: I'd rather not shard it because it makes for some weird perf scenarios especially since round… | |||||
return (zdom); | |||||
} | |||||
/* | |||||
* Search for the domain with the least cached items and return it, breaking | |||||
Done Inline ActionsNeeds a comment. jeff: Needs a comment. | |||||
* ties with a preferred domain by returning it. | |||||
*/ | |||||
static int | |||||
zone_domain_lowest(uma_zone_t zone, int pref) | |||||
{ | |||||
long least, nitems; | |||||
int domain; | |||||
int i; | |||||
least = LONG_MAX; | |||||
domain = 0; | |||||
for (i = 0; i < vm_ndomains; i++) { | |||||
nitems = ZDOM_GET(zone, i)->uzd_nitems; | |||||
if (nitems < least) { | |||||
domain = i; | |||||
least = nitems; | |||||
} else if (nitems == least && (i == pref || domain == pref)) | |||||
domain = pref; | |||||
} | |||||
return (domain); | |||||
} | |||||
/* | |||||
* Search for the domain with the most cached items and return it or the | |||||
* preferred domain if it has enough to proceed. | |||||
*/ | |||||
static int | |||||
zone_domain_highest(uma_zone_t zone, int pref) | |||||
{ | |||||
long most, nitems; | |||||
int domain; | |||||
int i; | |||||
if (ZDOM_GET(zone, pref)->uzd_nitems > BUCKET_MAX) | |||||
return (pref); | |||||
most = 0; | |||||
domain = 0; | |||||
for (i = 0; i < vm_ndomains; i++) { | |||||
nitems = ZDOM_GET(zone, i)->uzd_nitems; | |||||
if (nitems > most) { | |||||
domain = i; | |||||
most = nitems; | |||||
} | |||||
} | |||||
return (domain); | |||||
} | |||||
/* | |||||
* Safely subtract cnt from imax. | |||||
*/ | |||||
static void | |||||
zone_domain_imax_sub(uma_zone_domain_t zdom, int cnt) | |||||
{ | |||||
long new; | |||||
long old; | |||||
old = zdom->uzd_imax; | |||||
do { | |||||
if (old <= cnt) | |||||
new = 0; | |||||
else | |||||
new = old - cnt; | |||||
} while (atomic_fcmpset_long(&zdom->uzd_imax, &old, cnt) == 0); | |||||
jeffAuthorUnsubmitted Done Inline Actionsthis is a bug. cnt should be new. I made an error refactoring into the function. jeff: this is a bug. cnt should be new. I made an error refactoring into the function. | |||||
} | |||||
/* | |||||
* Set the maximum imax value. | |||||
*/ | |||||
static void | |||||
zone_domain_imax_set(uma_zone_domain_t zdom, int nitems) | |||||
{ | |||||
long old; | |||||
old = zdom->uzd_imax; | |||||
do { | |||||
if (old >= nitems) | |||||
break; | |||||
} while (atomic_fcmpset_long(&zdom->uzd_imax, &old, nitems) == 0); | |||||
} | |||||
/* | |||||
* Attempt to satisfy an allocation by retrieving a full bucket from one of the | * Attempt to satisfy an allocation by retrieving a full bucket from one of the | ||||
* zone's caches. If a bucket is found the zone is not locked on return. | * zone's caches. If a bucket is found the zone is not locked on return. | ||||
*/ | */ | ||||
static uma_bucket_t | static uma_bucket_t | ||||
zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom) | zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, bool reclaim) | ||||
{ | { | ||||
uma_bucket_t bucket; | uma_bucket_t bucket; | ||||
int i; | int i; | ||||
bool dtor = false; | bool dtor = false; | ||||
ZONE_LOCK_ASSERT(zone); | ZDOM_LOCK_ASSERT(zdom); | ||||
if ((bucket = STAILQ_FIRST(&zdom->uzd_buckets)) == NULL) | if ((bucket = STAILQ_FIRST(&zdom->uzd_buckets)) == NULL) | ||||
return (NULL); | return (NULL); | ||||
/* SMR Buckets can not be re-used until readers expire. */ | /* SMR Buckets can not be re-used until readers expire. */ | ||||
if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && | if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && | ||||
bucket->ub_seq != SMR_SEQ_INVALID) { | bucket->ub_seq != SMR_SEQ_INVALID) { | ||||
if (!smr_poll(zone->uz_smr, bucket->ub_seq, false)) | if (!smr_poll(zone->uz_smr, bucket->ub_seq, false)) | ||||
return (NULL); | return (NULL); | ||||
bucket->ub_seq = SMR_SEQ_INVALID; | bucket->ub_seq = SMR_SEQ_INVALID; | ||||
dtor = (zone->uz_dtor != NULL) || UMA_ALWAYS_CTORDTOR; | dtor = (zone->uz_dtor != NULL) || UMA_ALWAYS_CTORDTOR; | ||||
if (STAILQ_NEXT(bucket, ub_link) != NULL) | |||||
zdom->uzd_seq = STAILQ_NEXT(bucket, ub_link)->ub_seq; | |||||
} | } | ||||
MPASS(zdom->uzd_nitems >= bucket->ub_cnt); | MPASS(zdom->uzd_nitems >= bucket->ub_cnt); | ||||
STAILQ_REMOVE_HEAD(&zdom->uzd_buckets, ub_link); | STAILQ_REMOVE_HEAD(&zdom->uzd_buckets, ub_link); | ||||
zdom->uzd_nitems -= bucket->ub_cnt; | zdom->uzd_nitems -= bucket->ub_cnt; | ||||
if (zdom->uzd_imin > zdom->uzd_nitems) | |||||
/* | |||||
* Shift the bounds of the current WSS interval to avoid | |||||
* perturbing the estimate. | |||||
*/ | |||||
if (reclaim) { | |||||
zone_domain_imax_sub(zdom, bucket->ub_cnt); | |||||
zdom->uzd_imin -= lmin(zdom->uzd_imin, bucket->ub_cnt); | |||||
} else if (zdom->uzd_imin > zdom->uzd_nitems) | |||||
zdom->uzd_imin = zdom->uzd_nitems; | zdom->uzd_imin = zdom->uzd_nitems; | ||||
zone->uz_bkt_count -= bucket->ub_cnt; | |||||
ZONE_UNLOCK(zone); | ZDOM_UNLOCK(zdom); | ||||
if (dtor) | if (dtor) | ||||
for (i = 0; i < bucket->ub_cnt; i++) | for (i = 0; i < bucket->ub_cnt; i++) | ||||
Done Inline ActionsShould be an inline. jeff: Should be an inline. | |||||
item_dtor(zone, bucket->ub_bucket[i], zone->uz_size, | item_dtor(zone, bucket->ub_bucket[i], zone->uz_size, | ||||
NULL, SKIP_NONE); | NULL, SKIP_NONE); | ||||
return (bucket); | return (bucket); | ||||
} | } | ||||
/* | /* | ||||
* Insert a full bucket into the specified cache. The "ws" parameter indicates | * Insert a full bucket into the specified cache. The "ws" parameter indicates | ||||
* whether the bucket's contents should be counted as part of the zone's working | * whether the bucket's contents should be counted as part of the zone's working | ||||
* set. | * set. The bucket may be freed if it exceeds the bucket limit. | ||||
*/ | */ | ||||
static void | static void | ||||
zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket, | zone_put_bucket(uma_zone_t zone, int domain, uma_bucket_t bucket, void *udata, | ||||
const bool ws) | const bool ws) | ||||
{ | { | ||||
uma_zone_domain_t zdom; | |||||
ZONE_LOCK_ASSERT(zone); | /* We don't cache empty buckets. This can happen after a reclaim. */ | ||||
KASSERT(!ws || zone->uz_bkt_count < zone->uz_bkt_max, | if (bucket->ub_cnt == 0) | ||||
goto out; | |||||
zdom = zone_domain_lock(zone, domain); | |||||
KASSERT(!ws || zdom->uzd_nitems < zone->uz_bkt_max, | |||||
("%s: zone %p overflow", __func__, zone)); | ("%s: zone %p overflow", __func__, zone)); | ||||
markjUnsubmitted Not Done Inline ActionsI don't think we need this assertion anymore now that this function handles cache overflow. markj: I don't think we need this assertion anymore now that this function handles cache overflow. | |||||
jeffAuthorUnsubmitted Done Inline ActionsI guess it is possible for the overflow to come via cross domain flush where you don't know the value of ws. jeff: I guess it is possible for the overflow to come via cross domain flush where you don't know the… | |||||
STAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link); | /* | ||||
* Conditionally set the maximum number of items. | |||||
*/ | |||||
zdom->uzd_nitems += bucket->ub_cnt; | zdom->uzd_nitems += bucket->ub_cnt; | ||||
if (ws && zdom->uzd_imax < zdom->uzd_nitems) | if (ws) | ||||
zdom->uzd_imax = zdom->uzd_nitems; | zone_domain_imax_set(zdom, zdom->uzd_nitems); | ||||
Done Inline ActionsShould be an inline. jeff: Should be an inline. | |||||
zone->uz_bkt_count += bucket->ub_cnt; | if (__predict_true(zdom->uzd_nitems < zone->uz_bkt_max)) { | ||||
if (STAILQ_EMPTY(&zdom->uzd_buckets)) | |||||
zdom->uzd_seq = bucket->ub_seq; | |||||
STAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link); | |||||
ZDOM_UNLOCK(zdom); | |||||
return; | |||||
} | } | ||||
zdom->uzd_nitems -= bucket->ub_cnt; | |||||
ZDOM_UNLOCK(zdom); | |||||
out: | |||||
bucket_free(zone, bucket, udata); | |||||
} | |||||
/* Pops an item out of a per-cpu cache bucket. */ | /* Pops an item out of a per-cpu cache bucket. */ | ||||
static inline void * | static inline void * | ||||
cache_bucket_pop(uma_cache_t cache, uma_cache_bucket_t bucket) | cache_bucket_pop(uma_cache_t cache, uma_cache_bucket_t bucket) | ||||
{ | { | ||||
void *item; | void *item; | ||||
CRITICAL_ASSERT(curthread); | CRITICAL_ASSERT(curthread); | ||||
▲ Show 20 Lines • Show All 124 Lines • ▼ Show 20 Lines | cache_bucket_swap(uma_cache_bucket_t b1, uma_cache_bucket_t b2) | ||||
CRITICAL_ASSERT(curthread); | CRITICAL_ASSERT(curthread); | ||||
cache_bucket_copy(&b3, b1); | cache_bucket_copy(&b3, b1); | ||||
cache_bucket_copy(b1, b2); | cache_bucket_copy(b1, b2); | ||||
cache_bucket_copy(b2, &b3); | cache_bucket_copy(b2, &b3); | ||||
} | } | ||||
/* | |||||
* Get the flags corrected domain for the current cpu. | |||||
Done Inline Actionscomment jeff: comment | |||||
*/ | |||||
static int | |||||
cache_domain(uma_cache_t cache) | |||||
{ | |||||
if ((cache_uz_flags(cache) & | |||||
(UMA_ZONE_FIRSTTOUCH | UMA_ZONE_ROUNDROBIN)) != 0) | |||||
return (PCPU_GET(domain)); | |||||
return (0); | |||||
} | |||||
/* | |||||
* Attempt to fetch a bucket from a zone on behalf of the current cpu cache. | |||||
Done Inline ActionsThis nicely puts the poll outside the lock. jeff: This nicely puts the poll outside the lock. | |||||
*/ | |||||
static uma_bucket_t | |||||
cache_fetch_bucket(uma_zone_t zone, uma_cache_t cache, int *domain) | |||||
{ | |||||
uma_zone_domain_t zdom; | |||||
uma_bucket_t bucket; | |||||
/* | |||||
* Avoid the lock if possible. | |||||
*/ | |||||
if ((cache_uz_flags(cache) & UMA_ZONE_ROUNDROBIN) != 0) | |||||
*domain = zone_domain_highest(zone, *domain); | |||||
jeffAuthorUnsubmitted Done Inline ActionsI should just move this up a level and not use the * jeff: I should just move this up a level and not use the * | |||||
zdom = ZDOM_GET(zone, *domain); | |||||
if (zdom->uzd_nitems == 0) | |||||
return (NULL); | |||||
if ((cache_uz_flags(cache) & UMA_ZONE_SMR) != 0 && | |||||
!smr_poll(zone->uz_smr, zdom->uzd_seq, false)) | |||||
return (NULL); | |||||
/* | |||||
* Check the zone's cache of buckets. | |||||
*/ | |||||
zdom = zone_domain_lock(zone, *domain); | |||||
if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL) { | |||||
KASSERT(bucket->ub_cnt != 0, | |||||
("cache_fetch_bucket: Returning an empty bucket.")); | |||||
return (bucket); | |||||
} | |||||
ZDOM_UNLOCK(zdom); | |||||
return (NULL); | |||||
} | |||||
static void | static void | ||||
zone_log_warning(uma_zone_t zone) | zone_log_warning(uma_zone_t zone) | ||||
{ | { | ||||
static const struct timeval warninterval = { 300, 0 }; | static const struct timeval warninterval = { 300, 0 }; | ||||
if (!zone_warnings || zone->uz_warning == NULL) | if (!zone_warnings || zone->uz_warning == NULL) | ||||
return; | return; | ||||
Show All 35 Lines | |||||
* 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the | * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the | ||||
* last 100s. | * last 100s. | ||||
*/ | */ | ||||
static void | static void | ||||
zone_domain_update_wss(uma_zone_domain_t zdom) | zone_domain_update_wss(uma_zone_domain_t zdom) | ||||
{ | { | ||||
long wss; | long wss; | ||||
ZDOM_LOCK(zdom); | |||||
MPASS(zdom->uzd_imax >= zdom->uzd_imin); | MPASS(zdom->uzd_imax >= zdom->uzd_imin); | ||||
wss = zdom->uzd_imax - zdom->uzd_imin; | wss = zdom->uzd_imax - zdom->uzd_imin; | ||||
zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems; | zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems; | ||||
zdom->uzd_wss = (4 * wss + zdom->uzd_wss) / 5; | zdom->uzd_wss = (4 * wss + zdom->uzd_wss) / 5; | ||||
ZDOM_UNLOCK(zdom); | |||||
} | } | ||||
/* | /* | ||||
* Routine to perform timeout driven calculations. This expands the | * Routine to perform timeout driven calculations. This expands the | ||||
* hashes and does per cpu statistics aggregation. | * hashes and does per cpu statistics aggregation. | ||||
* | * | ||||
* Returns nothing. | * Returns nothing. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | if (ret) { | ||||
KEG_UNLOCK(keg, 0); | KEG_UNLOCK(keg, 0); | ||||
hash_free(&oldhash); | hash_free(&oldhash); | ||||
goto update_wss; | goto update_wss; | ||||
} | } | ||||
} | } | ||||
KEG_UNLOCK(keg, 0); | KEG_UNLOCK(keg, 0); | ||||
update_wss: | update_wss: | ||||
ZONE_LOCK(zone); | |||||
for (int i = 0; i < vm_ndomains; i++) | for (int i = 0; i < vm_ndomains; i++) | ||||
zone_domain_update_wss(&zone->uz_domain[i]); | zone_domain_update_wss(ZDOM_GET(zone, i)); | ||||
ZONE_UNLOCK(zone); | |||||
} | } | ||||
/* | /* | ||||
* Allocate and zero fill the next sized hash table from the appropriate | * Allocate and zero fill the next sized hash table from the appropriate | ||||
* backing store. | * backing store. | ||||
* | * | ||||
* Arguments: | * Arguments: | ||||
* hash A new hash structure with the old hash size in uh_hashsize | * hash A new hash structure with the old hash size in uh_hashsize | ||||
▲ Show 20 Lines • Show All 102 Lines • ▼ Show 20 Lines | |||||
* Nothing | * Nothing | ||||
*/ | */ | ||||
static void | static void | ||||
bucket_drain(uma_zone_t zone, uma_bucket_t bucket) | bucket_drain(uma_zone_t zone, uma_bucket_t bucket) | ||||
{ | { | ||||
int i; | int i; | ||||
if (bucket == NULL || bucket->ub_cnt == 0) | if (bucket->ub_cnt == 0) | ||||
return; | return; | ||||
if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && | if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && | ||||
bucket->ub_seq != SMR_SEQ_INVALID) { | bucket->ub_seq != SMR_SEQ_INVALID) { | ||||
smr_wait(zone->uz_smr, bucket->ub_seq); | smr_wait(zone->uz_smr, bucket->ub_seq); | ||||
bucket->ub_seq = SMR_SEQ_INVALID; | bucket->ub_seq = SMR_SEQ_INVALID; | ||||
for (i = 0; i < bucket->ub_cnt; i++) | for (i = 0; i < bucket->ub_cnt; i++) | ||||
item_dtor(zone, bucket->ub_bucket[i], | item_dtor(zone, bucket->ub_bucket[i], | ||||
▲ Show 20 Lines • Show All 41 Lines • ▼ Show 20 Lines | cache_drain(uma_zone_t zone) | ||||
* torn down to prevent improper use of cache_drain(). | * torn down to prevent improper use of cache_drain(). | ||||
*/ | */ | ||||
seq = SMR_SEQ_INVALID; | seq = SMR_SEQ_INVALID; | ||||
if ((zone->uz_flags & UMA_ZONE_SMR) != 0) | if ((zone->uz_flags & UMA_ZONE_SMR) != 0) | ||||
seq = smr_current(zone->uz_smr); | seq = smr_current(zone->uz_smr); | ||||
CPU_FOREACH(cpu) { | CPU_FOREACH(cpu) { | ||||
cache = &zone->uz_cpu[cpu]; | cache = &zone->uz_cpu[cpu]; | ||||
bucket = cache_bucket_unload_alloc(cache); | bucket = cache_bucket_unload_alloc(cache); | ||||
if (bucket != NULL) { | if (bucket != NULL) | ||||
bucket_drain(zone, bucket); | |||||
bucket_free(zone, bucket, NULL); | bucket_free(zone, bucket, NULL); | ||||
} | |||||
bucket = cache_bucket_unload_free(cache); | bucket = cache_bucket_unload_free(cache); | ||||
if (bucket != NULL) { | if (bucket != NULL) { | ||||
bucket->ub_seq = seq; | bucket->ub_seq = seq; | ||||
bucket_drain(zone, bucket); | |||||
bucket_free(zone, bucket, NULL); | bucket_free(zone, bucket, NULL); | ||||
} | } | ||||
bucket = cache_bucket_unload_cross(cache); | bucket = cache_bucket_unload_cross(cache); | ||||
if (bucket != NULL) { | if (bucket != NULL) { | ||||
bucket->ub_seq = seq; | bucket->ub_seq = seq; | ||||
bucket_drain(zone, bucket); | |||||
bucket_free(zone, bucket, NULL); | bucket_free(zone, bucket, NULL); | ||||
} | } | ||||
} | } | ||||
bucket_cache_reclaim(zone, true); | bucket_cache_reclaim(zone, true); | ||||
} | } | ||||
static void | static void | ||||
cache_shrink(uma_zone_t zone, void *unused) | cache_shrink(uma_zone_t zone, void *unused) | ||||
{ | { | ||||
if (zone->uz_flags & UMA_ZFLAG_INTERNAL) | if (zone->uz_flags & UMA_ZFLAG_INTERNAL) | ||||
return; | return; | ||||
ZONE_LOCK(zone); | |||||
zone->uz_bucket_size = | zone->uz_bucket_size = | ||||
(zone->uz_bucket_size_min + zone->uz_bucket_size) / 2; | (zone->uz_bucket_size_min + zone->uz_bucket_size) / 2; | ||||
ZONE_UNLOCK(zone); | |||||
} | } | ||||
static void | static void | ||||
cache_drain_safe_cpu(uma_zone_t zone, void *unused) | cache_drain_safe_cpu(uma_zone_t zone, void *unused) | ||||
{ | { | ||||
uma_cache_t cache; | uma_cache_t cache; | ||||
uma_bucket_t b1, b2, b3; | uma_bucket_t b1, b2, b3; | ||||
int domain; | int domain; | ||||
if (zone->uz_flags & UMA_ZFLAG_INTERNAL) | if (zone->uz_flags & UMA_ZFLAG_INTERNAL) | ||||
return; | return; | ||||
b1 = b2 = b3 = NULL; | b1 = b2 = b3 = NULL; | ||||
critical_enter(); | critical_enter(); | ||||
if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH) | |||||
domain = PCPU_GET(domain); | |||||
else | |||||
domain = 0; | |||||
cache = &zone->uz_cpu[curcpu]; | cache = &zone->uz_cpu[curcpu]; | ||||
domain = cache_domain(cache); | |||||
b1 = cache_bucket_unload_alloc(cache); | b1 = cache_bucket_unload_alloc(cache); | ||||
/* | /* | ||||
* Don't flush SMR zone buckets. This leaves the zone without a | * Don't flush SMR zone buckets. This leaves the zone without a | ||||
* bucket and forces every free to synchronize(). | * bucket and forces every free to synchronize(). | ||||
*/ | */ | ||||
if ((zone->uz_flags & UMA_ZONE_SMR) == 0) { | if ((zone->uz_flags & UMA_ZONE_SMR) == 0) { | ||||
b2 = cache_bucket_unload_free(cache); | b2 = cache_bucket_unload_free(cache); | ||||
b3 = cache_bucket_unload_cross(cache); | b3 = cache_bucket_unload_cross(cache); | ||||
} | } | ||||
critical_exit(); | critical_exit(); | ||||
ZONE_LOCK(zone); | |||||
if (b1 != NULL && b1->ub_cnt != 0) { | |||||
zone_put_bucket(zone, &zone->uz_domain[domain], b1, false); | |||||
b1 = NULL; | |||||
} | |||||
if (b2 != NULL && b2->ub_cnt != 0) { | |||||
zone_put_bucket(zone, &zone->uz_domain[domain], b2, false); | |||||
b2 = NULL; | |||||
} | |||||
ZONE_UNLOCK(zone); | |||||
if (b1 != NULL) | if (b1 != NULL) | ||||
bucket_free(zone, b1, NULL); | zone_put_bucket(zone, domain, b1, NULL, false); | ||||
if (b2 != NULL) | if (b2 != NULL) | ||||
bucket_free(zone, b2, NULL); | zone_put_bucket(zone, domain, b2, NULL, false); | ||||
if (b3 != NULL) { | if (b3 != NULL) | ||||
bucket_drain(zone, b3); | |||||
bucket_free(zone, b3, NULL); | bucket_free(zone, b3, NULL); | ||||
} | } | ||||
} | |||||
/* | /* | ||||
* Safely drain per-CPU caches of a zone(s) to alloc bucket. | * Safely drain per-CPU caches of a zone(s) to alloc bucket. | ||||
* This is an expensive call because it needs to bind to all CPUs | * This is an expensive call because it needs to bind to all CPUs | ||||
* one by one and enter a critical section on each of them in order | * one by one and enter a critical section on each of them in order | ||||
* to safely access their cache buckets. | * to safely access their cache buckets. | ||||
* Zone lock must not be held on call this function. | * Zone lock must not be held on call this function. | ||||
*/ | */ | ||||
Show All 30 Lines | |||||
* requested a drain, otherwise the per-domain caches are trimmed to either | * requested a drain, otherwise the per-domain caches are trimmed to either | ||||
* estimated working set size. | * estimated working set size. | ||||
*/ | */ | ||||
static void | static void | ||||
bucket_cache_reclaim(uma_zone_t zone, bool drain) | bucket_cache_reclaim(uma_zone_t zone, bool drain) | ||||
{ | { | ||||
uma_zone_domain_t zdom; | uma_zone_domain_t zdom; | ||||
uma_bucket_t bucket; | uma_bucket_t bucket; | ||||
long target, tofree; | long target; | ||||
int i; | int i; | ||||
/* | |||||
* Shrink the zone bucket size to ensure that the per-CPU caches | |||||
* don't grow too large. | |||||
*/ | |||||
if (zone->uz_bucket_size > zone->uz_bucket_size_min) | |||||
zone->uz_bucket_size--; | |||||
for (i = 0; i < vm_ndomains; i++) { | for (i = 0; i < vm_ndomains; i++) { | ||||
/* | /* | ||||
* The cross bucket is partially filled and not part of | * The cross bucket is partially filled and not part of | ||||
* the item count. Reclaim it individually here. | * the item count. Reclaim it individually here. | ||||
*/ | */ | ||||
zdom = &zone->uz_domain[i]; | zdom = ZDOM_GET(zone, i); | ||||
Done Inline ActionsI find this slightly questionable on SMR. It may block for a long time if we do preemptive sections. So this should probably poll first. jeff: I find this slightly questionable on SMR. It may block for a long time if we do preemptive… | |||||
if ((zone->uz_flags & UMA_ZONE_SMR) == 0) { | |||||
ZONE_CROSS_LOCK(zone); | ZONE_CROSS_LOCK(zone); | ||||
bucket = zdom->uzd_cross; | bucket = zdom->uzd_cross; | ||||
zdom->uzd_cross = NULL; | zdom->uzd_cross = NULL; | ||||
ZONE_CROSS_UNLOCK(zone); | ZONE_CROSS_UNLOCK(zone); | ||||
if (bucket != NULL) { | if (bucket != NULL) | ||||
bucket_drain(zone, bucket); | |||||
bucket_free(zone, bucket, NULL); | bucket_free(zone, bucket, NULL); | ||||
} | } | ||||
/* | /* | ||||
* Shrink the zone bucket size to ensure that the per-CPU caches | |||||
* don't grow too large. | |||||
*/ | |||||
ZONE_LOCK(zone); | |||||
if (i == 0 && zone->uz_bucket_size > zone->uz_bucket_size_min) | |||||
zone->uz_bucket_size--; | |||||
/* | |||||
* If we were asked to drain the zone, we are done only once | * If we were asked to drain the zone, we are done only once | ||||
* this bucket cache is empty. Otherwise, we reclaim items in | * this bucket cache is empty. Otherwise, we reclaim items in | ||||
* excess of the zone's estimated working set size. If the | * excess of the zone's estimated working set size. If the | ||||
* difference nitems - imin is larger than the WSS estimate, | * difference nitems - imin is larger than the WSS estimate, | ||||
* then the estimate will grow at the end of this interval and | * then the estimate will grow at the end of this interval and | ||||
* we ignore the historical average. | * we ignore the historical average. | ||||
*/ | */ | ||||
ZDOM_LOCK(zdom); | |||||
target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems - | target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems - | ||||
zdom->uzd_imin); | zdom->uzd_imin); | ||||
while (zdom->uzd_nitems > target) { | while (zdom->uzd_nitems > target) { | ||||
bucket = STAILQ_FIRST(&zdom->uzd_buckets); | bucket = zone_fetch_bucket(zone, zdom, true); | ||||
if (bucket == NULL) | if (bucket == NULL) | ||||
break; | break; | ||||
tofree = bucket->ub_cnt; | |||||
Done Inline ActionsMark if you could review this section in particular. I think what I did is equivalent but maybe not. jeff: Mark if you could review this section in particular. I think what I did is equivalent but… | |||||
STAILQ_REMOVE_HEAD(&zdom->uzd_buckets, ub_link); | |||||
zdom->uzd_nitems -= tofree; | |||||
zone->uz_bkt_count -= tofree; | |||||
/* | |||||
* Shift the bounds of the current WSS interval to avoid | |||||
* perturbing the estimate. | |||||
*/ | |||||
zdom->uzd_imax -= lmin(zdom->uzd_imax, tofree); | |||||
zdom->uzd_imin -= lmin(zdom->uzd_imin, tofree); | |||||
ZONE_UNLOCK(zone); | |||||
bucket_drain(zone, bucket); | |||||
bucket_free(zone, bucket, NULL); | bucket_free(zone, bucket, NULL); | ||||
ZONE_LOCK(zone); | ZDOM_LOCK(zdom); | ||||
} | } | ||||
ZONE_UNLOCK(zone); | ZDOM_UNLOCK(zdom); | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) | keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) | ||||
{ | { | ||||
uint8_t *mem; | uint8_t *mem; | ||||
int i; | int i; | ||||
▲ Show 20 Lines • Show All 77 Lines • ▼ Show 20 Lines | zone_reclaim(uma_zone_t zone, int waitok, bool drain) | ||||
* locks as we go. Only dtor() should do a WAITOK call since it | * locks as we go. Only dtor() should do a WAITOK call since it | ||||
* is the only call that knows the structure will still be available | * is the only call that knows the structure will still be available | ||||
* when it wakes up. | * when it wakes up. | ||||
*/ | */ | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
while (zone->uz_flags & UMA_ZFLAG_RECLAIMING) { | while (zone->uz_flags & UMA_ZFLAG_RECLAIMING) { | ||||
if (waitok == M_NOWAIT) | if (waitok == M_NOWAIT) | ||||
goto out; | goto out; | ||||
msleep(zone, &zone->uz_lock, PVM, "zonedrain", 1); | msleep(zone, &ZDOM_GET(zone, 0)->uzd_lock, PVM, "zonedrain", | ||||
1); | |||||
} | } | ||||
zone->uz_flags |= UMA_ZFLAG_RECLAIMING; | zone->uz_flags |= UMA_ZFLAG_RECLAIMING; | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
bucket_cache_reclaim(zone, drain); | bucket_cache_reclaim(zone, drain); | ||||
/* | /* | ||||
* The DRAINING flag protects us from being freed while | * The DRAINING flag protects us from being freed while | ||||
* we're running. Normally the uma_rwlock would protect us but we | * we're running. Normally the uma_rwlock would protect us but we | ||||
▲ Show 20 Lines • Show All 761 Lines • ▼ Show 20 Lines | |||||
#ifndef SMP | #ifndef SMP | ||||
keg->uk_flags &= ~UMA_ZONE_PCPU; | keg->uk_flags &= ~UMA_ZONE_PCPU; | ||||
#endif | #endif | ||||
keg_layout(keg); | keg_layout(keg); | ||||
/* | /* | ||||
* Use a first-touch NUMA policy for all kegs that pmap_extract() | * Use a first-touch NUMA policy for all kegs that pmap_extract() | ||||
* will work on with the exception of critical VM structures | * will work on with the exception of hash zones which require a | ||||
* necessary for paging. | * single keg lock. | ||||
* | * | ||||
* Use round-robin for everything else except for cache zones. | |||||
* | |||||
* Zones with neither attribute will use a single zone domain. This | |||||
jeffAuthorUnsubmitted Done Inline ActionsI'm unsure whether I will keep this. ROUNDROBIN domains can now block on allocations when there are free items in the bucket caches of other 'domains'. It's unlikely due to the highest/lowest loops but it is possible. Even with a single domain a thread can block in page alloc while another frees to the domain layer. IIRC earlier versions of the allocator would check for this condition before re-entering vm_page_alloc(). This feature is the only reason for zone_domain(). jeff: I'm unsure whether I will keep this.
ROUNDROBIN domains can now block on allocations when… | |||||
* is currently unused. | |||||
* | |||||
* Zones may override the default by specifying either. | * Zones may override the default by specifying either. | ||||
*/ | */ | ||||
#ifdef NUMA | #ifdef NUMA | ||||
if ((keg->uk_flags & | if ((keg->uk_flags & (UMA_ZONE_ROUNDROBIN | UMA_ZFLAG_HASH)) == 0) | ||||
(UMA_ZFLAG_HASH | UMA_ZONE_VM | UMA_ZONE_ROUNDROBIN)) == 0) | |||||
keg->uk_flags |= UMA_ZONE_FIRSTTOUCH; | keg->uk_flags |= UMA_ZONE_FIRSTTOUCH; | ||||
else if ((keg->uk_flags & UMA_ZONE_FIRSTTOUCH) == 0) | else if ((keg->uk_flags & UMA_ZONE_FIRSTTOUCH) == 0) | ||||
keg->uk_flags |= UMA_ZONE_ROUNDROBIN; | keg->uk_flags |= UMA_ZONE_ROUNDROBIN; | ||||
#endif | #endif | ||||
/* | /* | ||||
* If we haven't booted yet we need allocations to go through the | * If we haven't booted yet we need allocations to go through the | ||||
* startup cache until the vm is ready. | * startup cache until the vm is ready. | ||||
▲ Show 20 Lines • Show All 85 Lines • ▼ Show 20 Lines | |||||
static void | static void | ||||
zone_alloc_counters(uma_zone_t zone, void *unused) | zone_alloc_counters(uma_zone_t zone, void *unused) | ||||
{ | { | ||||
zone->uz_allocs = counter_u64_alloc(M_WAITOK); | zone->uz_allocs = counter_u64_alloc(M_WAITOK); | ||||
zone->uz_frees = counter_u64_alloc(M_WAITOK); | zone->uz_frees = counter_u64_alloc(M_WAITOK); | ||||
zone->uz_fails = counter_u64_alloc(M_WAITOK); | zone->uz_fails = counter_u64_alloc(M_WAITOK); | ||||
zone->uz_xdomain = counter_u64_alloc(M_WAITOK); | |||||
} | } | ||||
static void | static void | ||||
zone_alloc_sysctl(uma_zone_t zone, void *unused) | zone_alloc_sysctl(uma_zone_t zone, void *unused) | ||||
{ | { | ||||
uma_zone_domain_t zdom; | uma_zone_domain_t zdom; | ||||
uma_domain_t dom; | uma_domain_t dom; | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
▲ Show 20 Lines • Show All 102 Lines • ▼ Show 20 Lines | zone_alloc_sysctl(uma_zone_t zone, void *unused) | ||||
SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | ||||
"sleepers", CTLFLAG_RD, &zone->uz_sleepers, 0, | "sleepers", CTLFLAG_RD, &zone->uz_sleepers, 0, | ||||
"Number of threads sleeping at limit"); | "Number of threads sleeping at limit"); | ||||
SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | ||||
"sleeps", CTLFLAG_RD, &zone->uz_sleeps, 0, | "sleeps", CTLFLAG_RD, &zone->uz_sleeps, 0, | ||||
"Total zone limit sleeps"); | "Total zone limit sleeps"); | ||||
SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | ||||
"bucket_max", CTLFLAG_RD, &zone->uz_bkt_max, 0, | "bucket_max", CTLFLAG_RD, &zone->uz_bkt_max, 0, | ||||
"Maximum number of items in the bucket cache"); | "Maximum number of items in each domain's bucket cache"); | ||||
SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | |||||
"bucket_cnt", CTLFLAG_RD, &zone->uz_bkt_count, 0, | |||||
"Number of items in the bucket cache"); | |||||
/* | /* | ||||
* Per-domain zone information. | * Per-domain zone information. | ||||
*/ | */ | ||||
domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), | domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), | ||||
OID_AUTO, "domain", CTLFLAG_RD, NULL, ""); | OID_AUTO, "domain", CTLFLAG_RD, NULL, ""); | ||||
if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) == 0) | |||||
domains = 1; | |||||
for (i = 0; i < domains; i++) { | for (i = 0; i < domains; i++) { | ||||
zdom = &zone->uz_domain[i]; | zdom = ZDOM_GET(zone, i); | ||||
oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid), | oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid), | ||||
OID_AUTO, VM_DOMAIN(i)->vmd_name, CTLFLAG_RD, NULL, ""); | OID_AUTO, VM_DOMAIN(i)->vmd_name, CTLFLAG_RD, NULL, ""); | ||||
SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | ||||
"nitems", CTLFLAG_RD, &zdom->uzd_nitems, | "nitems", CTLFLAG_RD, &zdom->uzd_nitems, | ||||
"number of items in this domain"); | "number of items in this domain"); | ||||
SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | ||||
"imax", CTLFLAG_RD, &zdom->uzd_imax, | "imax", CTLFLAG_RD, &zdom->uzd_imax, | ||||
"maximum item count in this period"); | "maximum item count in this period"); | ||||
Show All 20 Lines | SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | ||||
"Total allocation calls"); | "Total allocation calls"); | ||||
SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | ||||
"frees", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE, | "frees", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE, | ||||
zone, 0, sysctl_handle_uma_zone_frees, "QU", | zone, 0, sysctl_handle_uma_zone_frees, "QU", | ||||
"Total free calls"); | "Total free calls"); | ||||
SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | ||||
"fails", CTLFLAG_RD, &zone->uz_fails, | "fails", CTLFLAG_RD, &zone->uz_fails, | ||||
"Number of allocation failures"); | "Number of allocation failures"); | ||||
SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, | ||||
"xdomain", CTLFLAG_RD, &zone->uz_xdomain, 0, | "xdomain", CTLFLAG_RD, &zone->uz_xdomain, | ||||
"Free calls from the wrong domain"); | "Free calls from the wrong domain"); | ||||
} | } | ||||
struct uma_zone_count { | struct uma_zone_count { | ||||
const char *name; | const char *name; | ||||
int count; | int count; | ||||
}; | }; | ||||
Show All 30 Lines | |||||
* Arguments/Returns follow uma_ctor specifications | * Arguments/Returns follow uma_ctor specifications | ||||
* udata Actually uma_zctor_args | * udata Actually uma_zctor_args | ||||
*/ | */ | ||||
static int | static int | ||||
zone_ctor(void *mem, int size, void *udata, int flags) | zone_ctor(void *mem, int size, void *udata, int flags) | ||||
{ | { | ||||
struct uma_zone_count cnt; | struct uma_zone_count cnt; | ||||
struct uma_zctor_args *arg = udata; | struct uma_zctor_args *arg = udata; | ||||
uma_zone_domain_t zdom; | |||||
uma_zone_t zone = mem; | uma_zone_t zone = mem; | ||||
uma_zone_t z; | uma_zone_t z; | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
int i; | int i; | ||||
bzero(zone, size); | bzero(zone, size); | ||||
zone->uz_name = arg->name; | zone->uz_name = arg->name; | ||||
zone->uz_ctor = arg->ctor; | zone->uz_ctor = arg->ctor; | ||||
zone->uz_dtor = arg->dtor; | zone->uz_dtor = arg->dtor; | ||||
zone->uz_init = NULL; | zone->uz_init = NULL; | ||||
zone->uz_fini = NULL; | zone->uz_fini = NULL; | ||||
zone->uz_sleeps = 0; | zone->uz_sleeps = 0; | ||||
zone->uz_xdomain = 0; | |||||
zone->uz_bucket_size = 0; | zone->uz_bucket_size = 0; | ||||
zone->uz_bucket_size_min = 0; | zone->uz_bucket_size_min = 0; | ||||
zone->uz_bucket_size_max = BUCKET_MAX; | zone->uz_bucket_size_max = BUCKET_MAX; | ||||
zone->uz_flags = (arg->flags & UMA_ZONE_SMR); | zone->uz_flags = (arg->flags & UMA_ZONE_SMR); | ||||
zone->uz_warning = NULL; | zone->uz_warning = NULL; | ||||
/* The domain structures follow the cpu structures. */ | /* The domain structures follow the cpu structures. */ | ||||
zone->uz_domain = | |||||
(struct uma_zone_domain *)&zone->uz_cpu[mp_maxid + 1]; | |||||
zone->uz_bkt_max = ULONG_MAX; | zone->uz_bkt_max = ULONG_MAX; | ||||
timevalclear(&zone->uz_ratecheck); | timevalclear(&zone->uz_ratecheck); | ||||
/* Count the number of duplicate names. */ | /* Count the number of duplicate names. */ | ||||
cnt.name = arg->name; | cnt.name = arg->name; | ||||
cnt.count = 0; | cnt.count = 0; | ||||
zone_foreach(zone_count, &cnt); | zone_foreach(zone_count, &cnt); | ||||
zone->uz_namecnt = cnt.count; | zone->uz_namecnt = cnt.count; | ||||
ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); | |||||
ZONE_CROSS_LOCK_INIT(zone); | ZONE_CROSS_LOCK_INIT(zone); | ||||
for (i = 0; i < vm_ndomains; i++) | for (i = 0; i < vm_ndomains; i++) { | ||||
STAILQ_INIT(&zone->uz_domain[i].uzd_buckets); | zdom = ZDOM_GET(zone, i); | ||||
ZDOM_LOCK_INIT(zone, zdom, (arg->flags & UMA_ZONE_MTXCLASS)); | |||||
STAILQ_INIT(&zdom->uzd_buckets); | |||||
} | |||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
if (arg->uminit == trash_init && arg->fini == trash_fini) | if (arg->uminit == trash_init && arg->fini == trash_fini) | ||||
zone->uz_flags |= UMA_ZFLAG_TRASH | UMA_ZFLAG_CTORDTOR; | zone->uz_flags |= UMA_ZFLAG_TRASH | UMA_ZFLAG_CTORDTOR; | ||||
#endif | #endif | ||||
/* | /* | ||||
* This is a pure cache zone, no kegs. | * This is a pure cache zone, no kegs. | ||||
▲ Show 20 Lines • Show All 135 Lines • ▼ Show 20 Lines | |||||
* Arguments/Returns follow uma_dtor specifications | * Arguments/Returns follow uma_dtor specifications | ||||
* udata unused | * udata unused | ||||
*/ | */ | ||||
static void | static void | ||||
zone_dtor(void *arg, int size, void *udata) | zone_dtor(void *arg, int size, void *udata) | ||||
{ | { | ||||
uma_zone_t zone; | uma_zone_t zone; | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
int i; | |||||
zone = (uma_zone_t)arg; | zone = (uma_zone_t)arg; | ||||
sysctl_remove_oid(zone->uz_oid, 1, 1); | sysctl_remove_oid(zone->uz_oid, 1, 1); | ||||
if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) | if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) | ||||
cache_drain(zone); | cache_drain(zone); | ||||
rw_wlock(&uma_rwlock); | rw_wlock(&uma_rwlock); | ||||
LIST_REMOVE(zone, uz_link); | LIST_REMOVE(zone, uz_link); | ||||
rw_wunlock(&uma_rwlock); | rw_wunlock(&uma_rwlock); | ||||
/* | |||||
* XXX there are some races here where | |||||
* the zone can be drained but zone lock | |||||
* released and then refilled before we | |||||
* remove it... we dont care for now | |||||
*/ | |||||
zone_reclaim(zone, M_WAITOK, true); | zone_reclaim(zone, M_WAITOK, true); | ||||
/* | /* | ||||
* We only destroy kegs from non secondary/non cache zones. | * We only destroy kegs from non secondary/non cache zones. | ||||
*/ | */ | ||||
if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) { | if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) { | ||||
keg = zone->uz_keg; | keg = zone->uz_keg; | ||||
rw_wlock(&uma_rwlock); | rw_wlock(&uma_rwlock); | ||||
LIST_REMOVE(keg, uk_link); | LIST_REMOVE(keg, uk_link); | ||||
rw_wunlock(&uma_rwlock); | rw_wunlock(&uma_rwlock); | ||||
zone_free_item(kegs, keg, NULL, SKIP_NONE); | zone_free_item(kegs, keg, NULL, SKIP_NONE); | ||||
} | } | ||||
counter_u64_free(zone->uz_allocs); | counter_u64_free(zone->uz_allocs); | ||||
counter_u64_free(zone->uz_frees); | counter_u64_free(zone->uz_frees); | ||||
counter_u64_free(zone->uz_fails); | counter_u64_free(zone->uz_fails); | ||||
counter_u64_free(zone->uz_xdomain); | |||||
free(zone->uz_ctlname, M_UMA); | free(zone->uz_ctlname, M_UMA); | ||||
ZONE_LOCK_FINI(zone); | for (i = 0; i < vm_ndomains; i++) | ||||
ZDOM_LOCK_FINI(ZDOM_GET(zone, i)); | |||||
ZONE_CROSS_LOCK_FINI(zone); | ZONE_CROSS_LOCK_FINI(zone); | ||||
} | } | ||||
static void | static void | ||||
zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *arg), void *arg) | zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *arg), void *arg) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
uma_zone_t zone; | uma_zone_t zone; | ||||
▲ Show 20 Lines • Show All 589 Lines • ▼ Show 20 Lines | |||||
* A true return value indicates success and the caller should retry. | * A true return value indicates success and the caller should retry. | ||||
*/ | */ | ||||
static __noinline bool | static __noinline bool | ||||
cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags) | cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags) | ||||
{ | { | ||||
uma_zone_domain_t zdom; | uma_zone_domain_t zdom; | ||||
uma_bucket_t bucket; | uma_bucket_t bucket; | ||||
int domain; | int domain; | ||||
bool lockfail; | bool new; | ||||
CRITICAL_ASSERT(curthread); | CRITICAL_ASSERT(curthread); | ||||
/* | /* | ||||
* If we have run out of items in our alloc bucket see | * If we have run out of items in our alloc bucket see | ||||
* if we can switch with the free bucket. | * if we can switch with the free bucket. | ||||
* | * | ||||
* SMR Zones can't re-use the free bucket until the sequence has | * SMR Zones can't re-use the free bucket until the sequence has | ||||
* expired. | * expired. | ||||
*/ | */ | ||||
if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && | if ((cache_uz_flags(cache) & UMA_ZONE_SMR) == 0 && | ||||
cache->uc_freebucket.ucb_cnt != 0) { | cache->uc_freebucket.ucb_cnt != 0) { | ||||
cache_bucket_swap(&cache->uc_freebucket, | cache_bucket_swap(&cache->uc_freebucket, | ||||
&cache->uc_allocbucket); | &cache->uc_allocbucket); | ||||
return (true); | return (true); | ||||
} | } | ||||
/* | /* | ||||
* Discard any empty allocation bucket while we hold no locks. | * Discard any empty allocation bucket while we hold no locks. | ||||
*/ | */ | ||||
bucket = cache_bucket_unload_alloc(cache); | bucket = cache_bucket_unload_alloc(cache); | ||||
domain = cache_domain(cache); | |||||
critical_exit(); | critical_exit(); | ||||
if (bucket != NULL) | |||||
if (bucket != NULL) { | |||||
KASSERT(bucket->ub_cnt == 0, | |||||
("cache_alloc: Entered with non-empty alloc bucket.")); | |||||
bucket_free(zone, bucket, udata); | bucket_free(zone, bucket, udata); | ||||
} | |||||
/* Short-circuit for zones without buckets and low memory. */ | /* Short-circuit for zones without buckets and low memory. */ | ||||
if (zone->uz_bucket_size == 0 || bucketdisable) { | if (zone->uz_bucket_size == 0 || bucketdisable) { | ||||
critical_enter(); | critical_enter(); | ||||
return (false); | return (false); | ||||
} | } | ||||
/* | /* | ||||
* Attempt to retrieve the item from the per-CPU cache has failed, so | * Attempt to retrieve the item from the per-CPU cache has failed, so | ||||
* we must go back to the zone. This requires the zone lock, so we | * we must go back to the zone. This requires the zdom lock, so we | ||||
* must drop the critical section, then re-acquire it when we go back | * must drop the critical section, then re-acquire it when we go back | ||||
* to the cache. Since the critical section is released, we may be | * to the cache. Since the critical section is released, we may be | ||||
* preempted or migrate. As such, make sure not to maintain any | * preempted or migrate. As such, make sure not to maintain any | ||||
* thread-local state specific to the cache from prior to releasing | * thread-local state specific to the cache from prior to releasing | ||||
* the critical section. | * the critical section. | ||||
*/ | */ | ||||
lockfail = 0; | bucket = cache_fetch_bucket(zone, cache, &domain); | ||||
if (ZONE_TRYLOCK(zone) == 0) { | if (bucket == NULL) { | ||||
/* Record contention to size the buckets. */ | |||||
ZONE_LOCK(zone); | |||||
lockfail = 1; | |||||
} | |||||
/* See if we lost the race to fill the cache. */ | |||||
critical_enter(); | |||||
cache = &zone->uz_cpu[curcpu]; | |||||
if (cache->uc_allocbucket.ucb_bucket != NULL) { | |||||
ZONE_UNLOCK(zone); | |||||
return (true); | |||||
} | |||||
/* | |||||
* Check the zone's cache of buckets. | |||||
*/ | |||||
if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH) { | |||||
domain = PCPU_GET(domain); | |||||
zdom = &zone->uz_domain[domain]; | |||||
} else { | |||||
domain = UMA_ANYDOMAIN; | |||||
zdom = &zone->uz_domain[0]; | |||||
} | |||||
if ((bucket = zone_fetch_bucket(zone, zdom)) != NULL) { | |||||
KASSERT(bucket->ub_cnt != 0, | |||||
("uma_zalloc_arg: Returning an empty bucket.")); | |||||
cache_bucket_load_alloc(cache, bucket); | |||||
return (true); | |||||
} | |||||
/* We are no longer associated with this CPU. */ | |||||
critical_exit(); | |||||
/* | |||||
* We bump the uz count when the cache size is insufficient to | |||||
* handle the working set. | |||||
*/ | |||||
if (lockfail && zone->uz_bucket_size < zone->uz_bucket_size_max) | |||||
zone->uz_bucket_size++; | |||||
ZONE_UNLOCK(zone); | |||||
/* | |||||
* Fill a bucket and attempt to use it as the alloc bucket. | |||||
*/ | |||||
bucket = zone_alloc_bucket(zone, udata, domain, flags); | bucket = zone_alloc_bucket(zone, udata, domain, flags); | ||||
new = true; | |||||
} else | |||||
new = false; | |||||
CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", | CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", | ||||
zone->uz_name, zone, bucket); | zone->uz_name, zone, bucket); | ||||
if (bucket == NULL) { | if (bucket == NULL) { | ||||
critical_enter(); | critical_enter(); | ||||
return (false); | return (false); | ||||
} | } | ||||
/* | /* | ||||
* See if we lost the race or were migrated. Cache the | * See if we lost the race or were migrated. Cache the | ||||
* initialized bucket to make this less likely or claim | * initialized bucket to make this less likely or claim | ||||
* the memory directly. | * the memory directly. | ||||
*/ | */ | ||||
ZONE_LOCK(zone); | |||||
critical_enter(); | critical_enter(); | ||||
cache = &zone->uz_cpu[curcpu]; | cache = &zone->uz_cpu[curcpu]; | ||||
if (cache->uc_allocbucket.ucb_bucket == NULL && | if (cache->uc_allocbucket.ucb_bucket == NULL && | ||||
((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) == 0 || | ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) == 0 || | ||||
domain == PCPU_GET(domain))) { | domain == PCPU_GET(domain))) { | ||||
if (new) { | |||||
zdom = ZDOM_GET(zone, domain); | |||||
atomic_add_long(&zdom->uzd_imax, bucket->ub_cnt); | |||||
markjUnsubmitted Not Done Inline ActionsSo, the reason we do this here is because we might lose the race and put the bucket back in the cache. There are no other failure modes. Instead of waiting until you're certain of success, why can't we re-adjust imin when putting the bucket back in the cache after losing the race? This could cause the WSS estimate to be artificially inflated, but only temporarily, and it would simplify imin/imax management a lot. In fact, if you call zone_put_bucket() with ws = true instead of false, I think we'll get the desired result. You just need to handle uz_bkt_max. If imax/imin are reset while the lock is dropped, we will end up with a larger WSS estimate than we should, but that's a narrow race and the estimate decays anyway. markj: So, the reason we do this here is because we might lose the race and put the bucket back in the… | |||||
jeffAuthorUnsubmitted Done Inline ActionsAs discussed on irc; The issue is not the race but that we can call zone_alloc_bucket() now without ever having acquired a domain lock. This becomes increasingly important as we get more cores per domain. jeff: As discussed on irc;
The issue is not the race but that we can call zone_alloc_bucket() now… | |||||
} | |||||
cache_bucket_load_alloc(cache, bucket); | cache_bucket_load_alloc(cache, bucket); | ||||
zdom->uzd_imax += bucket->ub_cnt; | return (true); | ||||
} else if (zone->uz_bkt_count >= zone->uz_bkt_max) { | } | ||||
/* | |||||
* We lost the race, release this bucket and start over. | |||||
*/ | |||||
critical_exit(); | critical_exit(); | ||||
ZONE_UNLOCK(zone); | zone_put_bucket(zone, domain, bucket, udata, false); | ||||
bucket_drain(zone, bucket); | |||||
bucket_free(zone, bucket, udata); | |||||
critical_enter(); | critical_enter(); | ||||
return (true); | return (true); | ||||
} else | |||||
zone_put_bucket(zone, zdom, bucket, false); | |||||
ZONE_UNLOCK(zone); | |||||
return (true); | |||||
} | } | ||||
void * | void * | ||||
uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) | uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) | ||||
{ | { | ||||
/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | ||||
random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | ||||
▲ Show 20 Lines • Show All 375 Lines • ▼ Show 20 Lines | zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags) | ||||
int maxbucket, cnt; | int maxbucket, cnt; | ||||
CTR3(KTR_UMA, "zone_alloc_bucket zone %s(%p) domain %d", zone->uz_name, | CTR3(KTR_UMA, "zone_alloc_bucket zone %s(%p) domain %d", zone->uz_name, | ||||
zone, domain); | zone, domain); | ||||
/* Avoid allocs targeting empty domains. */ | /* Avoid allocs targeting empty domains. */ | ||||
if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain)) | if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain)) | ||||
domain = UMA_ANYDOMAIN; | domain = UMA_ANYDOMAIN; | ||||
if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0) | |||||
domain = UMA_ANYDOMAIN; | |||||
if (zone->uz_max_items > 0) | if (zone->uz_max_items > 0) | ||||
maxbucket = zone_alloc_limit(zone, zone->uz_bucket_size, | maxbucket = zone_alloc_limit(zone, zone->uz_bucket_size, | ||||
M_NOWAIT); | M_NOWAIT); | ||||
else | else | ||||
maxbucket = zone->uz_bucket_size; | maxbucket = zone->uz_bucket_size; | ||||
if (maxbucket == 0) | if (maxbucket == 0) | ||||
return (false); | return (false); | ||||
▲ Show 20 Lines • Show All 110 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zfree_smr(uma_zone_t zone, void *item) | uma_zfree_smr(uma_zone_t zone, void *item) | ||||
{ | { | ||||
uma_cache_t cache; | uma_cache_t cache; | ||||
uma_cache_bucket_t bucket; | uma_cache_bucket_t bucket; | ||||
int domain, itemdomain, uz_flags; | int itemdomain, uz_flags; | ||||
#ifdef UMA_ZALLOC_DEBUG | #ifdef UMA_ZALLOC_DEBUG | ||||
KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0, | KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0, | ||||
("uma_zfree_smr: called with non-SMR zone.\n")); | ("uma_zfree_smr: called with non-SMR zone.\n")); | ||||
KASSERT(item != NULL, ("uma_zfree_smr: Called with NULL pointer.")); | KASSERT(item != NULL, ("uma_zfree_smr: Called with NULL pointer.")); | ||||
if (uma_zfree_debug(zone, item, NULL) == EJUSTRETURN) | if (uma_zfree_debug(zone, item, NULL) == EJUSTRETURN) | ||||
return; | return; | ||||
#endif | #endif | ||||
cache = &zone->uz_cpu[curcpu]; | cache = &zone->uz_cpu[curcpu]; | ||||
uz_flags = cache_uz_flags(cache); | uz_flags = cache_uz_flags(cache); | ||||
domain = itemdomain = 0; | itemdomain = 0; | ||||
#ifdef NUMA | #ifdef NUMA | ||||
if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) | if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) | ||||
itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item)); | itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item)); | ||||
#endif | #endif | ||||
critical_enter(); | critical_enter(); | ||||
do { | do { | ||||
cache = &zone->uz_cpu[curcpu]; | cache = &zone->uz_cpu[curcpu]; | ||||
/* SMR Zones must free to the free bucket. */ | /* SMR Zones must free to the free bucket. */ | ||||
bucket = &cache->uc_freebucket; | bucket = &cache->uc_freebucket; | ||||
#ifdef NUMA | #ifdef NUMA | ||||
domain = PCPU_GET(domain); | |||||
if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && | if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && | ||||
domain != itemdomain) { | PCPU_GET(domain) != itemdomain) { | ||||
bucket = &cache->uc_crossbucket; | bucket = &cache->uc_crossbucket; | ||||
} | } | ||||
#endif | #endif | ||||
if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) { | if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) { | ||||
cache_bucket_push(cache, bucket, item); | cache_bucket_push(cache, bucket, item); | ||||
critical_exit(); | critical_exit(); | ||||
return; | return; | ||||
} | } | ||||
} while (cache_free(zone, cache, NULL, item, itemdomain)); | } while (cache_free(zone, cache, NULL, item, itemdomain)); | ||||
critical_exit(); | critical_exit(); | ||||
/* | /* | ||||
* If nothing else caught this, we'll just do an internal free. | * If nothing else caught this, we'll just do an internal free. | ||||
*/ | */ | ||||
zone_free_item(zone, item, NULL, SKIP_NONE); | zone_free_item(zone, item, NULL, SKIP_NONE); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zfree_arg(uma_zone_t zone, void *item, void *udata) | uma_zfree_arg(uma_zone_t zone, void *item, void *udata) | ||||
{ | { | ||||
uma_cache_t cache; | uma_cache_t cache; | ||||
uma_cache_bucket_t bucket; | uma_cache_bucket_t bucket; | ||||
int domain, itemdomain, uz_flags; | int itemdomain, uz_flags; | ||||
/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | ||||
random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | ||||
CTR2(KTR_UMA, "uma_zfree_arg zone %s(%p)", zone->uz_name, zone); | CTR2(KTR_UMA, "uma_zfree_arg zone %s(%p)", zone->uz_name, zone); | ||||
#ifdef UMA_ZALLOC_DEBUG | #ifdef UMA_ZALLOC_DEBUG | ||||
KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0, | KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0, | ||||
Show All 31 Lines | #endif | ||||
* accessing the cache must not be preempted or yield during access, | * accessing the cache must not be preempted or yield during access, | ||||
* and (2) the thread must not migrate CPUs without switching which | * and (2) the thread must not migrate CPUs without switching which | ||||
* cache it accesses. We rely on a critical section to prevent | * cache it accesses. We rely on a critical section to prevent | ||||
* preemption and migration. We release the critical section in | * preemption and migration. We release the critical section in | ||||
* order to acquire the zone mutex if we are unable to free to the | * order to acquire the zone mutex if we are unable to free to the | ||||
* current cache; when we re-acquire the critical section, we must | * current cache; when we re-acquire the critical section, we must | ||||
* detect and handle migration if it has occurred. | * detect and handle migration if it has occurred. | ||||
*/ | */ | ||||
domain = itemdomain = 0; | itemdomain = 0; | ||||
#ifdef NUMA | #ifdef NUMA | ||||
if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) | if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) | ||||
itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item)); | itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item)); | ||||
#endif | #endif | ||||
critical_enter(); | critical_enter(); | ||||
do { | do { | ||||
cache = &zone->uz_cpu[curcpu]; | cache = &zone->uz_cpu[curcpu]; | ||||
/* | /* | ||||
* Try to free into the allocbucket first to give LIFO | * Try to free into the allocbucket first to give LIFO | ||||
* ordering for cache-hot datastructures. Spill over | * ordering for cache-hot datastructures. Spill over | ||||
* into the freebucket if necessary. Alloc will swap | * into the freebucket if necessary. Alloc will swap | ||||
* them if one runs dry. | * them if one runs dry. | ||||
*/ | */ | ||||
bucket = &cache->uc_allocbucket; | bucket = &cache->uc_allocbucket; | ||||
#ifdef NUMA | #ifdef NUMA | ||||
domain = PCPU_GET(domain); | |||||
if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && | if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && | ||||
domain != itemdomain) { | PCPU_GET(domain) != itemdomain) { | ||||
bucket = &cache->uc_crossbucket; | bucket = &cache->uc_crossbucket; | ||||
} else | } else | ||||
#endif | #endif | ||||
if (bucket->ucb_cnt >= bucket->ucb_entries) | if (bucket->ucb_cnt >= bucket->ucb_entries) | ||||
bucket = &cache->uc_freebucket; | bucket = &cache->uc_freebucket; | ||||
if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) { | if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) { | ||||
cache_bucket_push(cache, bucket, item); | cache_bucket_push(cache, bucket, item); | ||||
critical_exit(); | critical_exit(); | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata) | ||||
* the current smr seq rather than accepting the bucket's. | * the current smr seq rather than accepting the bucket's. | ||||
*/ | */ | ||||
seq = SMR_SEQ_INVALID; | seq = SMR_SEQ_INVALID; | ||||
if ((zone->uz_flags & UMA_ZONE_SMR) != 0) | if ((zone->uz_flags & UMA_ZONE_SMR) != 0) | ||||
seq = smr_current(zone->uz_smr); | seq = smr_current(zone->uz_smr); | ||||
while (bucket->ub_cnt > 0) { | while (bucket->ub_cnt > 0) { | ||||
item = bucket->ub_bucket[bucket->ub_cnt - 1]; | item = bucket->ub_bucket[bucket->ub_cnt - 1]; | ||||
domain = _vm_phys_domain(pmap_kextract((vm_offset_t)item)); | domain = _vm_phys_domain(pmap_kextract((vm_offset_t)item)); | ||||
zdom = &zone->uz_domain[domain]; | zdom = ZDOM_GET(zone, domain); | ||||
if (zdom->uzd_cross == NULL) { | if (zdom->uzd_cross == NULL) { | ||||
zdom->uzd_cross = bucket_alloc(zone, udata, M_NOWAIT); | zdom->uzd_cross = bucket_alloc(zone, udata, M_NOWAIT); | ||||
if (zdom->uzd_cross == NULL) | if (zdom->uzd_cross == NULL) | ||||
break; | break; | ||||
} | } | ||||
b = zdom->uzd_cross; | b = zdom->uzd_cross; | ||||
b->ub_bucket[b->ub_cnt++] = item; | b->ub_bucket[b->ub_cnt++] = item; | ||||
b->ub_seq = seq; | b->ub_seq = seq; | ||||
if (b->ub_cnt == b->ub_entries) { | if (b->ub_cnt == b->ub_entries) { | ||||
STAILQ_INSERT_HEAD(&fullbuckets, b, ub_link); | STAILQ_INSERT_HEAD(&fullbuckets, b, ub_link); | ||||
zdom->uzd_cross = NULL; | zdom->uzd_cross = NULL; | ||||
} | } | ||||
bucket->ub_cnt--; | bucket->ub_cnt--; | ||||
} | } | ||||
ZONE_CROSS_UNLOCK(zone); | ZONE_CROSS_UNLOCK(zone); | ||||
if (!STAILQ_EMPTY(&fullbuckets)) { | if (bucket->ub_cnt == 0) | ||||
ZONE_LOCK(zone); | bucket->ub_seq = SMR_SEQ_INVALID; | ||||
bucket_free(zone, bucket, udata); | |||||
while ((b = STAILQ_FIRST(&fullbuckets)) != NULL) { | while ((b = STAILQ_FIRST(&fullbuckets)) != NULL) { | ||||
STAILQ_REMOVE_HEAD(&fullbuckets, ub_link); | STAILQ_REMOVE_HEAD(&fullbuckets, ub_link); | ||||
if (zone->uz_bkt_count >= zone->uz_bkt_max) { | domain = _vm_phys_domain(pmap_kextract( | ||||
ZONE_UNLOCK(zone); | |||||
bucket_drain(zone, b); | |||||
bucket_free(zone, b, udata); | |||||
ZONE_LOCK(zone); | |||||
} else { | |||||
domain = _vm_phys_domain( | |||||
pmap_kextract( | |||||
(vm_offset_t)b->ub_bucket[0])); | (vm_offset_t)b->ub_bucket[0])); | ||||
zdom = &zone->uz_domain[domain]; | zone_put_bucket(zone, domain, b, udata, true); | ||||
zone_put_bucket(zone, zdom, b, true); | |||||
} | } | ||||
} | } | ||||
ZONE_UNLOCK(zone); | |||||
} | |||||
if (bucket->ub_cnt != 0) | |||||
bucket_drain(zone, bucket); | |||||
bucket->ub_seq = SMR_SEQ_INVALID; | |||||
bucket_free(zone, bucket, udata); | |||||
} | |||||
#endif | #endif | ||||
static void | static void | ||||
zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata, | zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata, | ||||
int domain, int itemdomain) | int domain, int itemdomain) | ||||
{ | { | ||||
uma_zone_domain_t zdom; | |||||
#ifdef NUMA | #ifdef NUMA | ||||
/* | /* | ||||
* Buckets coming from the wrong domain will be entirely for the | * Buckets coming from the wrong domain will be entirely for the | ||||
* only other domain on two domain systems. In this case we can | * only other domain on two domain systems. In this case we can | ||||
* simply cache them. Otherwise we need to sort them back to | * simply cache them. Otherwise we need to sort them back to | ||||
* correct domains. | * correct domains. | ||||
*/ | */ | ||||
if (domain != itemdomain && vm_ndomains > 2) { | if (domain != itemdomain && vm_ndomains > 2) { | ||||
zone_free_cross(zone, bucket, udata); | zone_free_cross(zone, bucket, udata); | ||||
return; | return; | ||||
} | } | ||||
#endif | #endif | ||||
/* | /* | ||||
* Attempt to save the bucket in the zone's domain bucket cache. | * Attempt to save the bucket in the zone's domain bucket cache. | ||||
* | |||||
* We bump the uz count when the cache size is insufficient to | |||||
* handle the working set. | |||||
*/ | */ | ||||
if (ZONE_TRYLOCK(zone) == 0) { | |||||
/* Record contention to size the buckets. */ | |||||
ZONE_LOCK(zone); | |||||
if (zone->uz_bucket_size < zone->uz_bucket_size_max) | |||||
zone->uz_bucket_size++; | |||||
} | |||||
CTR3(KTR_UMA, | CTR3(KTR_UMA, | ||||
"uma_zfree: zone %s(%p) putting bucket %p on free list", | "uma_zfree: zone %s(%p) putting bucket %p on free list", | ||||
zone->uz_name, zone, bucket); | zone->uz_name, zone, bucket); | ||||
/* ub_cnt is pointing to the last free item */ | /* ub_cnt is pointing to the last free item */ | ||||
KASSERT(bucket->ub_cnt == bucket->ub_entries, | KASSERT(bucket->ub_cnt == bucket->ub_entries, | ||||
("uma_zfree: Attempting to insert partial bucket onto the full list.\n")); | ("uma_zfree: Attempting to insert partial bucket onto the full list.\n")); | ||||
if (zone->uz_bkt_count >= zone->uz_bkt_max) { | if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0) | ||||
ZONE_UNLOCK(zone); | itemdomain = zone_domain_lowest(zone, itemdomain); | ||||
bucket_drain(zone, bucket); | zone_put_bucket(zone, itemdomain, bucket, udata, true); | ||||
bucket_free(zone, bucket, udata); | |||||
} else { | |||||
zdom = &zone->uz_domain[itemdomain]; | |||||
zone_put_bucket(zone, zdom, bucket, true); | |||||
ZONE_UNLOCK(zone); | |||||
} | } | ||||
} | |||||
/* | /* | ||||
* Populate a free or cross bucket for the current cpu cache. Free any | * Populate a free or cross bucket for the current cpu cache. Free any | ||||
* existing full bucket either to the zone cache or back to the slab layer. | * existing full bucket either to the zone cache or back to the slab layer. | ||||
* | * | ||||
* Enters and returns in a critical section. false return indicates that | * Enters and returns in a critical section. false return indicates that | ||||
* we can not satisfy this free in the cache layer. true indicates that | * we can not satisfy this free in the cache layer. true indicates that | ||||
* the caller should retry. | * the caller should retry. | ||||
Show All 15 Lines | cache_free(uma_zone_t zone, uma_cache_t cache, void *udata, void *item, | ||||
newbucket = NULL; | newbucket = NULL; | ||||
/* | /* | ||||
* FIRSTTOUCH domains need to free to the correct zdom. When | * FIRSTTOUCH domains need to free to the correct zdom. When | ||||
* enabled this is the zdom of the item. The bucket is the | * enabled this is the zdom of the item. The bucket is the | ||||
* cross bucket if the current domain and itemdomain do not match. | * cross bucket if the current domain and itemdomain do not match. | ||||
*/ | */ | ||||
cbucket = &cache->uc_freebucket; | cbucket = &cache->uc_freebucket; | ||||
domain = cache_domain(cache); | |||||
#ifdef NUMA | #ifdef NUMA | ||||
if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) { | if ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) != 0) { | ||||
domain = PCPU_GET(domain); | |||||
if (domain != itemdomain) { | if (domain != itemdomain) { | ||||
cbucket = &cache->uc_crossbucket; | cbucket = &cache->uc_crossbucket; | ||||
if (cbucket->ucb_cnt != 0) | if (cbucket->ucb_cnt != 0) | ||||
atomic_add_64(&zone->uz_xdomain, | counter_u64_add(zone->uz_xdomain, | ||||
cbucket->ucb_cnt); | cbucket->ucb_cnt); | ||||
} | } | ||||
} else | } else | ||||
#endif | #endif | ||||
itemdomain = domain = 0; | itemdomain = domain; | ||||
bucket = cache_bucket_unload(cbucket); | bucket = cache_bucket_unload(cbucket); | ||||
KASSERT(bucket == NULL || bucket->ub_cnt != 0, | |||||
("cache_free: Entered with empty free bucket.")); | |||||
/* We are no longer associated with this CPU. */ | /* We are no longer associated with this CPU. */ | ||||
critical_exit(); | critical_exit(); | ||||
/* | /* | ||||
* Don't let SMR zones operate without a free bucket. Force | * Don't let SMR zones operate without a free bucket. Force | ||||
* a synchronize and re-use this one. We will only degrade | * a synchronize and re-use this one. We will only degrade | ||||
* to a synchronize every bucket_size items rather than every | * to a synchronize every bucket_size items rather than every | ||||
Show All 19 Lines | if ((bucket = newbucket) == NULL) | ||||
return (false); | return (false); | ||||
cache = &zone->uz_cpu[curcpu]; | cache = &zone->uz_cpu[curcpu]; | ||||
#ifdef NUMA | #ifdef NUMA | ||||
/* | /* | ||||
* Check to see if we should be populating the cross bucket. If it | * Check to see if we should be populating the cross bucket. If it | ||||
* is already populated we will fall through and attempt to populate | * is already populated we will fall through and attempt to populate | ||||
* the free bucket. | * the free bucket. | ||||
*/ | */ | ||||
if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) { | if ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) != 0) { | ||||
domain = PCPU_GET(domain); | domain = PCPU_GET(domain); | ||||
if (domain != itemdomain && | if (domain != itemdomain && | ||||
cache->uc_crossbucket.ucb_bucket == NULL) { | cache->uc_crossbucket.ucb_bucket == NULL) { | ||||
cache_bucket_load_cross(cache, bucket); | cache_bucket_load_cross(cache, bucket); | ||||
return (true); | return (true); | ||||
} | } | ||||
} | } | ||||
#endif | #endif | ||||
▲ Show 20 Lines • Show All 178 Lines • ▼ Show 20 Lines | if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) | ||||
bpcpu++; | bpcpu++; | ||||
nitems -= ubz->ubz_entries * bpcpu * mp_ncpus; | nitems -= ubz->ubz_entries * bpcpu * mp_ncpus; | ||||
zone->uz_bucket_size_max = ubz->ubz_entries; | zone->uz_bucket_size_max = ubz->ubz_entries; | ||||
} else { | } else { | ||||
zone->uz_bucket_size_max = zone->uz_bucket_size = 0; | zone->uz_bucket_size_max = zone->uz_bucket_size = 0; | ||||
} | } | ||||
if (zone->uz_bucket_size_min > zone->uz_bucket_size_max) | if (zone->uz_bucket_size_min > zone->uz_bucket_size_max) | ||||
zone->uz_bucket_size_min = zone->uz_bucket_size_max; | zone->uz_bucket_size_min = zone->uz_bucket_size_max; | ||||
zone->uz_bkt_max = nitems; | zone->uz_bkt_max = nitems / vm_ndomains; | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
int | int | ||||
uma_zone_get_max(uma_zone_t zone) | uma_zone_get_max(uma_zone_t zone) | ||||
{ | { | ||||
int nitems; | int nitems; | ||||
▲ Show 20 Lines • Show All 197 Lines • ▼ Show 20 Lines | #else | ||||
if (1) { | if (1) { | ||||
#endif | #endif | ||||
kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); | kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); | ||||
if (kva == 0) | if (kva == 0) | ||||
return (0); | return (0); | ||||
} else | } else | ||||
kva = 0; | kva = 0; | ||||
ZONE_LOCK(zone); | |||||
MPASS(keg->uk_kva == 0); | MPASS(keg->uk_kva == 0); | ||||
keg->uk_kva = kva; | keg->uk_kva = kva; | ||||
keg->uk_offset = 0; | keg->uk_offset = 0; | ||||
zone->uz_max_items = pages * keg->uk_ipers; | zone->uz_max_items = pages * keg->uk_ipers; | ||||
#ifdef UMA_MD_SMALL_ALLOC | #ifdef UMA_MD_SMALL_ALLOC | ||||
keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; | keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; | ||||
#else | #else | ||||
keg->uk_allocf = noobj_alloc; | keg->uk_allocf = noobj_alloc; | ||||
#endif | #endif | ||||
keg->uk_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE; | keg->uk_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE; | ||||
zone->uz_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE; | zone->uz_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE; | ||||
zone_update_caches(zone); | zone_update_caches(zone); | ||||
ZONE_UNLOCK(zone); | |||||
return (1); | return (1); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_prealloc(uma_zone_t zone, int items) | uma_prealloc(uma_zone_t zone, int items) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 190 Lines • ▼ Show 20 Lines | CPU_FOREACH(cpu) { | ||||
cachefree += cache->uc_freebucket.ucb_cnt; | cachefree += cache->uc_freebucket.ucb_cnt; | ||||
xdomain += cache->uc_crossbucket.ucb_cnt; | xdomain += cache->uc_crossbucket.ucb_cnt; | ||||
cachefree += cache->uc_crossbucket.ucb_cnt; | cachefree += cache->uc_crossbucket.ucb_cnt; | ||||
allocs += cache->uc_allocs; | allocs += cache->uc_allocs; | ||||
frees += cache->uc_frees; | frees += cache->uc_frees; | ||||
} | } | ||||
allocs += counter_u64_fetch(z->uz_allocs); | allocs += counter_u64_fetch(z->uz_allocs); | ||||
frees += counter_u64_fetch(z->uz_frees); | frees += counter_u64_fetch(z->uz_frees); | ||||
xdomain += counter_u64_fetch(z->uz_xdomain); | |||||
sleeps += z->uz_sleeps; | sleeps += z->uz_sleeps; | ||||
xdomain += z->uz_xdomain; | |||||
if (cachefreep != NULL) | if (cachefreep != NULL) | ||||
*cachefreep = cachefree; | *cachefreep = cachefree; | ||||
if (allocsp != NULL) | if (allocsp != NULL) | ||||
*allocsp = allocs; | *allocsp = allocs; | ||||
if (freesp != NULL) | if (freesp != NULL) | ||||
*freesp = frees; | *freesp = frees; | ||||
if (sleepsp != NULL) | if (sleepsp != NULL) | ||||
*sleepsp = sleeps; | *sleepsp = sleeps; | ||||
Show All 27 Lines | uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf, | ||||
struct uma_percpu_stat *ups, bool internal) | struct uma_percpu_stat *ups, bool internal) | ||||
{ | { | ||||
uma_zone_domain_t zdom; | uma_zone_domain_t zdom; | ||||
uma_cache_t cache; | uma_cache_t cache; | ||||
int i; | int i; | ||||
for (i = 0; i < vm_ndomains; i++) { | for (i = 0; i < vm_ndomains; i++) { | ||||
zdom = &z->uz_domain[i]; | zdom = ZDOM_GET(z, i); | ||||
uth->uth_zone_free += zdom->uzd_nitems; | uth->uth_zone_free += zdom->uzd_nitems; | ||||
} | } | ||||
uth->uth_allocs = counter_u64_fetch(z->uz_allocs); | uth->uth_allocs = counter_u64_fetch(z->uz_allocs); | ||||
uth->uth_frees = counter_u64_fetch(z->uz_frees); | uth->uth_frees = counter_u64_fetch(z->uz_frees); | ||||
uth->uth_fails = counter_u64_fetch(z->uz_fails); | uth->uth_fails = counter_u64_fetch(z->uz_fails); | ||||
uth->uth_xdomain = counter_u64_fetch(z->uz_xdomain); | |||||
uth->uth_sleeps = z->uz_sleeps; | uth->uth_sleeps = z->uz_sleeps; | ||||
uth->uth_xdomain = z->uz_xdomain; | |||||
/* | |||||
* While it is not normally safe to access the cache bucket pointers | |||||
* while not on the CPU that owns the cache, we only allow the pointers | |||||
* to be exchanged without the zone lock held, not invalidated, so | |||||
* accept the possible race associated with bucket exchange during | |||||
* monitoring. Use atomic_load_ptr() to ensure that the bucket pointers | |||||
* are loaded only once. | |||||
*/ | |||||
for (i = 0; i < mp_maxid + 1; i++) { | for (i = 0; i < mp_maxid + 1; i++) { | ||||
bzero(&ups[i], sizeof(*ups)); | bzero(&ups[i], sizeof(*ups)); | ||||
if (internal || CPU_ABSENT(i)) | if (internal || CPU_ABSENT(i)) | ||||
continue; | continue; | ||||
cache = &z->uz_cpu[i]; | cache = &z->uz_cpu[i]; | ||||
ups[i].ups_cache_free += cache->uc_allocbucket.ucb_cnt; | ups[i].ups_cache_free += cache->uc_allocbucket.ucb_cnt; | ||||
ups[i].ups_cache_free += cache->uc_freebucket.ucb_cnt; | ups[i].ups_cache_free += cache->uc_freebucket.ucb_cnt; | ||||
ups[i].ups_cache_free += cache->uc_crossbucket.ucb_cnt; | ups[i].ups_cache_free += cache->uc_crossbucket.ucb_cnt; | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) | ||||
LIST_FOREACH(kz, &uma_kegs, uk_link) { | LIST_FOREACH(kz, &uma_kegs, uk_link) { | ||||
kfree = pages = 0; | kfree = pages = 0; | ||||
for (i = 0; i < vm_ndomains; i++) { | for (i = 0; i < vm_ndomains; i++) { | ||||
kfree += kz->uk_domain[i].ud_free_items; | kfree += kz->uk_domain[i].ud_free_items; | ||||
pages += kz->uk_domain[i].ud_pages; | pages += kz->uk_domain[i].ud_pages; | ||||
} | } | ||||
LIST_FOREACH(z, &kz->uk_zones, uz_link) { | LIST_FOREACH(z, &kz->uk_zones, uz_link) { | ||||
bzero(&uth, sizeof(uth)); | bzero(&uth, sizeof(uth)); | ||||
ZONE_LOCK(z); | |||||
strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); | strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); | ||||
uth.uth_align = kz->uk_align; | uth.uth_align = kz->uk_align; | ||||
uth.uth_size = kz->uk_size; | uth.uth_size = kz->uk_size; | ||||
uth.uth_rsize = kz->uk_rsize; | uth.uth_rsize = kz->uk_rsize; | ||||
if (z->uz_max_items > 0) { | if (z->uz_max_items > 0) { | ||||
items = UZ_ITEMS_COUNT(z->uz_items); | items = UZ_ITEMS_COUNT(z->uz_items); | ||||
uth.uth_pages = (items / kz->uk_ipers) * | uth.uth_pages = (items / kz->uk_ipers) * | ||||
kz->uk_ppera; | kz->uk_ppera; | ||||
} else | } else | ||||
uth.uth_pages = pages; | uth.uth_pages = pages; | ||||
uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) * | uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) * | ||||
kz->uk_ppera; | kz->uk_ppera; | ||||
uth.uth_limit = z->uz_max_items; | uth.uth_limit = z->uz_max_items; | ||||
uth.uth_keg_free = kfree; | uth.uth_keg_free = kfree; | ||||
/* | /* | ||||
* A zone is secondary is it is not the first entry | * A zone is secondary is it is not the first entry | ||||
* on the keg's zone list. | * on the keg's zone list. | ||||
*/ | */ | ||||
if ((z->uz_flags & UMA_ZONE_SECONDARY) && | if ((z->uz_flags & UMA_ZONE_SECONDARY) && | ||||
(LIST_FIRST(&kz->uk_zones) != z)) | (LIST_FIRST(&kz->uk_zones) != z)) | ||||
uth.uth_zone_flags = UTH_ZONE_SECONDARY; | uth.uth_zone_flags = UTH_ZONE_SECONDARY; | ||||
uma_vm_zone_stats(&uth, z, &sbuf, ups, | uma_vm_zone_stats(&uth, z, &sbuf, ups, | ||||
kz->uk_flags & UMA_ZFLAG_INTERNAL); | kz->uk_flags & UMA_ZFLAG_INTERNAL); | ||||
ZONE_UNLOCK(z); | |||||
(void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); | (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); | ||||
for (i = 0; i < mp_maxid + 1; i++) | for (i = 0; i < mp_maxid + 1; i++) | ||||
(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i])); | (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i])); | ||||
} | } | ||||
} | } | ||||
LIST_FOREACH(z, &uma_cachezones, uz_link) { | LIST_FOREACH(z, &uma_cachezones, uz_link) { | ||||
bzero(&uth, sizeof(uth)); | bzero(&uth, sizeof(uth)); | ||||
ZONE_LOCK(z); | |||||
strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); | strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); | ||||
uth.uth_size = z->uz_size; | uth.uth_size = z->uz_size; | ||||
uma_vm_zone_stats(&uth, z, &sbuf, ups, false); | uma_vm_zone_stats(&uth, z, &sbuf, ups, false); | ||||
ZONE_UNLOCK(z); | |||||
(void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); | (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); | ||||
for (i = 0; i < mp_maxid + 1; i++) | for (i = 0; i < mp_maxid + 1; i++) | ||||
(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i])); | (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i])); | ||||
} | } | ||||
rw_runlock(&uma_rwlock); | rw_runlock(&uma_rwlock); | ||||
error = sbuf_finish(&sbuf); | error = sbuf_finish(&sbuf); | ||||
sbuf_delete(&sbuf); | sbuf_delete(&sbuf); | ||||
▲ Show 20 Lines • Show All 242 Lines • ▼ Show 20 Lines | if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { | ||||
frees = counter_u64_fetch(z->uz_frees); | frees = counter_u64_fetch(z->uz_frees); | ||||
*sleeps = z->uz_sleeps; | *sleeps = z->uz_sleeps; | ||||
*cachefree = 0; | *cachefree = 0; | ||||
*xdomain = 0; | *xdomain = 0; | ||||
} else | } else | ||||
uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps, | uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps, | ||||
xdomain); | xdomain); | ||||
for (i = 0; i < vm_ndomains; i++) { | for (i = 0; i < vm_ndomains; i++) { | ||||
*cachefree += z->uz_domain[i].uzd_nitems; | *cachefree += ZDOM_GET(z, i)->uzd_nitems; | ||||
if (!((z->uz_flags & UMA_ZONE_SECONDARY) && | if (!((z->uz_flags & UMA_ZONE_SECONDARY) && | ||||
(LIST_FIRST(&kz->uk_zones) != z))) | (LIST_FIRST(&kz->uk_zones) != z))) | ||||
*cachefree += kz->uk_domain[i].ud_free_items; | *cachefree += kz->uk_domain[i].ud_free_items; | ||||
} | } | ||||
*used = *allocs - frees; | *used = *allocs - frees; | ||||
return (((int64_t)*used + *cachefree) * kz->uk_size); | return (((int64_t)*used + *cachefree) * kz->uk_size); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 77 Lines • ▼ Show 20 Lines | DB_SHOW_COMMAND(umacache, db_show_umacache) | ||||
long cachefree; | long cachefree; | ||||
int i; | int i; | ||||
db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", | db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", | ||||
"Requests", "Bucket"); | "Requests", "Bucket"); | ||||
LIST_FOREACH(z, &uma_cachezones, uz_link) { | LIST_FOREACH(z, &uma_cachezones, uz_link) { | ||||
uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL); | uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL); | ||||
for (i = 0; i < vm_ndomains; i++) | for (i = 0; i < vm_ndomains; i++) | ||||
cachefree += z->uz_domain[i].uzd_nitems; | cachefree += ZDOM_GET(z, i)->uzd_nitems; | ||||
db_printf("%18s %8ju %8jd %8ld %12ju %8u\n", | db_printf("%18s %8ju %8jd %8ld %12ju %8u\n", | ||||
z->uz_name, (uintmax_t)z->uz_size, | z->uz_name, (uintmax_t)z->uz_size, | ||||
(intmax_t)(allocs - frees), cachefree, | (intmax_t)(allocs - frees), cachefree, | ||||
(uintmax_t)allocs, z->uz_bucket_size); | (uintmax_t)allocs, z->uz_bucket_size); | ||||
if (db_pager_quit) | if (db_pager_quit) | ||||
return; | return; | ||||
} | } | ||||
} | } | ||||
#endif /* DDB */ | #endif /* DDB */ |
These three lines could be written as lockfail = ZDOM_OWNED(zdom).