Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/uma_core.c
Show First 20 Lines • Show All 136 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Pointer and counter to pool of pages, that is preallocated at | * Pointer and counter to pool of pages, that is preallocated at | ||||
* startup to bootstrap UMA. | * startup to bootstrap UMA. | ||||
*/ | */ | ||||
static char *bootmem; | static char *bootmem; | ||||
static int boot_pages; | static int boot_pages; | ||||
static struct sx uma_drain_lock; | static struct sx uma_reclaim_lock; | ||||
/* | /* | ||||
* kmem soft limit, initialized by uma_set_limit(). Ensure that early | * kmem soft limit, initialized by uma_set_limit(). Ensure that early | ||||
* allocations don't trigger a wakeup of the reclaim thread. | * allocations don't trigger a wakeup of the reclaim thread. | ||||
*/ | */ | ||||
static unsigned long uma_kmem_limit = LONG_MAX; | static unsigned long uma_kmem_limit = LONG_MAX; | ||||
SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0, | SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0, | ||||
"UMA kernel memory soft limit"); | "UMA kernel memory soft limit"); | ||||
▲ Show 20 Lines • Show All 92 Lines • ▼ Show 20 Lines | |||||
static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); | static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); | ||||
static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); | static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); | ||||
static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); | static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); | ||||
static void page_free(void *, vm_size_t, uint8_t); | static void page_free(void *, vm_size_t, uint8_t); | ||||
static void pcpu_page_free(void *, vm_size_t, uint8_t); | static void pcpu_page_free(void *, vm_size_t, uint8_t); | ||||
static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int); | static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int); | ||||
static void cache_drain(uma_zone_t); | static void cache_drain(uma_zone_t); | ||||
static void bucket_drain(uma_zone_t, uma_bucket_t); | static void bucket_drain(uma_zone_t, uma_bucket_t); | ||||
static void bucket_cache_drain(uma_zone_t zone); | static void bucket_cache_reclaim(uma_zone_t zone, bool); | ||||
static int keg_ctor(void *, int, void *, int); | static int keg_ctor(void *, int, void *, int); | ||||
static void keg_dtor(void *, int, void *); | static void keg_dtor(void *, int, void *); | ||||
static int zone_ctor(void *, int, void *, int); | static int zone_ctor(void *, int, void *, int); | ||||
static void zone_dtor(void *, int, void *); | static void zone_dtor(void *, int, void *); | ||||
static int zero_init(void *, int, int); | static int zero_init(void *, int, int); | ||||
static void keg_small_init(uma_keg_t keg); | static void keg_small_init(uma_keg_t keg); | ||||
static void keg_large_init(uma_keg_t keg); | static void keg_large_init(uma_keg_t keg); | ||||
static void zone_foreach(void (*zfunc)(uma_zone_t)); | static void zone_foreach(void (*zfunc)(uma_zone_t)); | ||||
▲ Show 20 Lines • Show All 200 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
static void | static void | ||||
bucket_zone_drain(void) | bucket_zone_drain(void) | ||||
{ | { | ||||
struct uma_bucket_zone *ubz; | struct uma_bucket_zone *ubz; | ||||
for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) | for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) | ||||
zone_drain(ubz->ubz_zone); | uma_zreclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN); | ||||
} | } | ||||
/* | |||||
* Attempt to satisfy an allocation by retrieving a full bucket from one of the | |||||
* zone's caches. | |||||
*/ | |||||
static uma_bucket_t | static uma_bucket_t | ||||
zone_try_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, const bool ws) | zone_try_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom) | ||||
{ | { | ||||
uma_bucket_t bucket; | uma_bucket_t bucket; | ||||
ZONE_LOCK_ASSERT(zone); | ZONE_LOCK_ASSERT(zone); | ||||
if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { | if ((bucket = TAILQ_FIRST(&zdom->uzd_buckets)) != NULL) { | ||||
MPASS(zdom->uzd_nitems >= bucket->ub_cnt); | MPASS(zdom->uzd_nitems >= bucket->ub_cnt); | ||||
LIST_REMOVE(bucket, ub_link); | TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link); | ||||
zdom->uzd_nitems -= bucket->ub_cnt; | zdom->uzd_nitems -= bucket->ub_cnt; | ||||
if (ws && zdom->uzd_imin > zdom->uzd_nitems) | if (zdom->uzd_imin > zdom->uzd_nitems) | ||||
zdom->uzd_imin = zdom->uzd_nitems; | zdom->uzd_imin = zdom->uzd_nitems; | ||||
zone->uz_bkt_count -= bucket->ub_cnt; | zone->uz_bkt_count -= bucket->ub_cnt; | ||||
} | } | ||||
return (bucket); | return (bucket); | ||||
} | } | ||||
/* | |||||
* Insert a full bucket into the specified cache. The "ws" parameter indicates | |||||
* whether the bucket's contents should be counted as part of the zone's working | |||||
* set. | |||||
*/ | |||||
static void | static void | ||||
zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket, | zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket, | ||||
const bool ws) | const bool ws) | ||||
{ | { | ||||
ZONE_LOCK_ASSERT(zone); | ZONE_LOCK_ASSERT(zone); | ||||
KASSERT(zone->uz_bkt_count < zone->uz_bkt_max, ("%s: zone %p overflow", | KASSERT(zone->uz_bkt_count < zone->uz_bkt_max, ("%s: zone %p overflow", | ||||
__func__, zone)); | __func__, zone)); | ||||
LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); | if (ws) | ||||
TAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); | |||||
else | |||||
TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link); | |||||
zdom->uzd_nitems += bucket->ub_cnt; | zdom->uzd_nitems += bucket->ub_cnt; | ||||
if (ws && zdom->uzd_imax < zdom->uzd_nitems) | if (ws && zdom->uzd_imax < zdom->uzd_nitems) | ||||
zdom->uzd_imax = zdom->uzd_nitems; | zdom->uzd_imax = zdom->uzd_nitems; | ||||
zone->uz_bkt_count += bucket->ub_cnt; | zone->uz_bkt_count += bucket->ub_cnt; | ||||
} | } | ||||
static void | static void | ||||
zone_log_warning(uma_zone_t zone) | zone_log_warning(uma_zone_t zone) | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | |||||
static void | static void | ||||
zone_domain_update_wss(uma_zone_domain_t zdom) | zone_domain_update_wss(uma_zone_domain_t zdom) | ||||
{ | { | ||||
long wss; | long wss; | ||||
MPASS(zdom->uzd_imax >= zdom->uzd_imin); | MPASS(zdom->uzd_imax >= zdom->uzd_imin); | ||||
wss = zdom->uzd_imax - zdom->uzd_imin; | wss = zdom->uzd_imax - zdom->uzd_imin; | ||||
zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems; | zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems; | ||||
zdom->uzd_wss = (3 * wss + 2 * zdom->uzd_wss) / 5; | zdom->uzd_wss = (4 * wss + zdom->uzd_wss) / 5; | ||||
} | } | ||||
/* | /* | ||||
* Routine to perform timeout driven calculations. This expands the | * Routine to perform timeout driven calculations. This expands the | ||||
* hashes and does per cpu statistics aggregation. | * hashes and does per cpu statistics aggregation. | ||||
* | * | ||||
* Returns nothing. | * Returns nothing. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 202 Lines • ▼ Show 20 Lines | cache_drain(uma_zone_t zone) | ||||
/* | /* | ||||
* XXX: It is safe to not lock the per-CPU caches, because we're | * XXX: It is safe to not lock the per-CPU caches, because we're | ||||
* tearing down the zone anyway. I.e., there will be no further use | * tearing down the zone anyway. I.e., there will be no further use | ||||
* of the caches at this point. | * of the caches at this point. | ||||
* | * | ||||
* XXX: It would good to be able to assert that the zone is being | * XXX: It would good to be able to assert that the zone is being | ||||
* torn down to prevent improper use of cache_drain(). | * torn down to prevent improper use of cache_drain(). | ||||
* | * | ||||
* XXX: We lock the zone before passing into bucket_cache_drain() as | * XXX: We lock the zone before passing into bucket_cache_reclaim() as | ||||
* it is used elsewhere. Should the tear-down path be made special | * it is used elsewhere. Should the tear-down path be made special | ||||
* there in some form? | * there in some form? | ||||
*/ | */ | ||||
CPU_FOREACH(cpu) { | CPU_FOREACH(cpu) { | ||||
cache = &zone->uz_cpu[cpu]; | cache = &zone->uz_cpu[cpu]; | ||||
bucket_drain(zone, cache->uc_allocbucket); | bucket_drain(zone, cache->uc_allocbucket); | ||||
bucket_drain(zone, cache->uc_freebucket); | bucket_drain(zone, cache->uc_freebucket); | ||||
if (cache->uc_allocbucket != NULL) | if (cache->uc_allocbucket != NULL) | ||||
bucket_free(zone, cache->uc_allocbucket, NULL); | bucket_free(zone, cache->uc_allocbucket, NULL); | ||||
if (cache->uc_freebucket != NULL) | if (cache->uc_freebucket != NULL) | ||||
bucket_free(zone, cache->uc_freebucket, NULL); | bucket_free(zone, cache->uc_freebucket, NULL); | ||||
cache->uc_allocbucket = cache->uc_freebucket = NULL; | cache->uc_allocbucket = cache->uc_freebucket = NULL; | ||||
} | } | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
bucket_cache_drain(zone); | bucket_cache_reclaim(zone, true); | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
} | } | ||||
static void | static void | ||||
cache_shrink(uma_zone_t zone) | cache_shrink(uma_zone_t zone) | ||||
{ | { | ||||
if (zone->uz_flags & UMA_ZFLAG_INTERNAL) | if (zone->uz_flags & UMA_ZFLAG_INTERNAL) | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Safely drain per-CPU caches of a zone(s) to alloc bucket. | * Safely drain per-CPU caches of a zone(s) to alloc bucket. | ||||
* This is an expensive call because it needs to bind to all CPUs | * This is an expensive call because it needs to bind to all CPUs | ||||
* one by one and enter a critical section on each of them in order | * one by one and enter a critical section on each of them in order | ||||
* to safely access their cache buckets. | * to safely access their cache buckets. | ||||
* Zone lock must not be held on call this function. | * Zone lock must not be held on call this function. | ||||
*/ | */ | ||||
static void | static void | ||||
cache_drain_safe(uma_zone_t zone) | pcpu_cache_drain_safe(uma_zone_t zone) | ||||
{ | { | ||||
int cpu; | int cpu; | ||||
/* | /* | ||||
* Polite bucket sizes shrinking was not enouth, shrink aggressively. | * Polite bucket sizes shrinking was not enouth, shrink aggressively. | ||||
*/ | */ | ||||
if (zone) | if (zone) | ||||
cache_shrink(zone); | cache_shrink(zone); | ||||
Show All 11 Lines | else | ||||
zone_foreach(cache_drain_safe_cpu); | zone_foreach(cache_drain_safe_cpu); | ||||
} | } | ||||
thread_lock(curthread); | thread_lock(curthread); | ||||
sched_unbind(curthread); | sched_unbind(curthread); | ||||
thread_unlock(curthread); | thread_unlock(curthread); | ||||
} | } | ||||
/* | /* | ||||
* Drain the cached buckets from a zone. Expects a locked zone on entry. | * Reclaim cached buckets from a zone. All buckets are reclaimed if the caller | ||||
* requested a drain, otherwise the per-domain caches are trimmed to either | |||||
* estimated working set size. | |||||
*/ | */ | ||||
static void | static void | ||||
bucket_cache_drain(uma_zone_t zone) | bucket_cache_reclaim(uma_zone_t zone, bool drain) | ||||
{ | { | ||||
uma_zone_domain_t zdom; | uma_zone_domain_t zdom; | ||||
uma_bucket_t bucket; | uma_bucket_t bucket; | ||||
long target, tofree; | |||||
int i; | int i; | ||||
/* | |||||
* Drain the bucket queues and free the buckets. | |||||
*/ | |||||
for (i = 0; i < vm_ndomains; i++) { | for (i = 0; i < vm_ndomains; i++) { | ||||
zdom = &zone->uz_domain[i]; | zdom = &zone->uz_domain[i]; | ||||
while ((bucket = zone_try_fetch_bucket(zone, zdom, false)) != | |||||
NULL) { | /* | ||||
* If we were asked to drain the zone, we are done only once | |||||
* this bucket cache is empty. Otherwise, we reclaim items in | |||||
* excess of the zone's estimated working set size. If the | |||||
* difference nitems - imin is larger than the WSS estimate, | |||||
* then the estimate will grow at the end of this interval and | |||||
* we ignore the historical average. | |||||
*/ | |||||
target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems - | |||||
zdom->uzd_imin); | |||||
while (zdom->uzd_nitems > target) { | |||||
bucket = TAILQ_LAST(&zdom->uzd_buckets, uma_bucketlist); | |||||
if (bucket == NULL) | |||||
break; | |||||
tofree = bucket->ub_cnt; | |||||
TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link); | |||||
zdom->uzd_nitems -= tofree; | |||||
/* | |||||
* Shift the bounds of the current WSS interval to avoid | |||||
* perturbing the estimate. | |||||
*/ | |||||
zdom->uzd_imax -= lmin(zdom->uzd_imax, tofree); | |||||
zdom->uzd_imin -= lmin(zdom->uzd_imin, tofree); | |||||
jeff: I missed the review for the working set-size. Where is that? | |||||
Not Done Inline ActionsD16666, just for the record, but you already commented on it. markj: D16666, just for the record, but you already commented on it. | |||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
bucket_drain(zone, bucket); | bucket_drain(zone, bucket); | ||||
bucket_free(zone, bucket, NULL); | bucket_free(zone, bucket, NULL); | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Shrink further bucket sizes. Price of single zone lock collision | * Shrink the zone bucket size to ensure that the per-CPU caches | ||||
* is probably lower then price of global cache drain. | * don't grow too large. | ||||
*/ | */ | ||||
if (zone->uz_count > zone->uz_count_min) | if (zone->uz_count > zone->uz_count_min) | ||||
zone->uz_count--; | zone->uz_count--; | ||||
} | } | ||||
static void | static void | ||||
keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) | keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 81 Lines • ▼ Show 20 Lines | finished: | ||||
while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { | while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { | ||||
SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); | SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); | ||||
keg_free_slab(keg, slab, keg->uk_ipers); | keg_free_slab(keg, slab, keg->uk_ipers); | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
zone_drain_wait(uma_zone_t zone, int waitok) | zone_reclaim(uma_zone_t zone, int waitok, bool drain) | ||||
{ | { | ||||
/* | /* | ||||
* Set draining to interlock with zone_dtor() so we can release our | * Set draining to interlock with zone_dtor() so we can release our | ||||
* locks as we go. Only dtor() should do a WAITOK call since it | * locks as we go. Only dtor() should do a WAITOK call since it | ||||
* is the only call that knows the structure will still be available | * is the only call that knows the structure will still be available | ||||
* when it wakes up. | * when it wakes up. | ||||
*/ | */ | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
while (zone->uz_flags & UMA_ZFLAG_DRAINING) { | while (zone->uz_flags & UMA_ZFLAG_RECLAIMING) { | ||||
if (waitok == M_NOWAIT) | if (waitok == M_NOWAIT) | ||||
goto out; | goto out; | ||||
msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); | msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); | ||||
} | } | ||||
zone->uz_flags |= UMA_ZFLAG_DRAINING; | zone->uz_flags |= UMA_ZFLAG_RECLAIMING; | ||||
bucket_cache_drain(zone); | bucket_cache_reclaim(zone, drain); | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
/* | /* | ||||
* The DRAINING flag protects us from being freed while | * The DRAINING flag protects us from being freed while | ||||
* we're running. Normally the uma_rwlock would protect us but we | * we're running. Normally the uma_rwlock would protect us but we | ||||
* must be able to release and acquire the right lock for each keg. | * must be able to release and acquire the right lock for each keg. | ||||
*/ | */ | ||||
keg_drain(zone->uz_keg); | keg_drain(zone->uz_keg); | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
zone->uz_flags &= ~UMA_ZFLAG_DRAINING; | zone->uz_flags &= ~UMA_ZFLAG_RECLAIMING; | ||||
wakeup(zone); | wakeup(zone); | ||||
out: | out: | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
} | } | ||||
void | static void | ||||
zone_drain(uma_zone_t zone) | zone_drain(uma_zone_t zone) | ||||
{ | { | ||||
zone_drain_wait(zone, M_NOWAIT); | zone_reclaim(zone, M_NOWAIT, true); | ||||
} | } | ||||
static void | |||||
zone_trim(uma_zone_t zone) | |||||
{ | |||||
zone_reclaim(zone, M_NOWAIT, false); | |||||
} | |||||
/* | /* | ||||
* Allocate a new slab for a keg. This does not insert the slab onto a list. | * Allocate a new slab for a keg. This does not insert the slab onto a list. | ||||
* If the allocation was successful, the keg lock will be held upon return, | * If the allocation was successful, the keg lock will be held upon return, | ||||
* otherwise the keg will be left unlocked. | * otherwise the keg will be left unlocked. | ||||
* | * | ||||
* Arguments: | * Arguments: | ||||
* flags Wait flags for the item initialization routine | * flags Wait flags for the item initialization routine | ||||
* aflags Wait flags for the slab allocation | * aflags Wait flags for the slab allocation | ||||
▲ Show 20 Lines • Show All 682 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
static int | static int | ||||
zone_ctor(void *mem, int size, void *udata, int flags) | zone_ctor(void *mem, int size, void *udata, int flags) | ||||
{ | { | ||||
struct uma_zctor_args *arg = udata; | struct uma_zctor_args *arg = udata; | ||||
uma_zone_t zone = mem; | uma_zone_t zone = mem; | ||||
uma_zone_t z; | uma_zone_t z; | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
int i; | |||||
bzero(zone, size); | bzero(zone, size); | ||||
zone->uz_name = arg->name; | zone->uz_name = arg->name; | ||||
zone->uz_ctor = arg->ctor; | zone->uz_ctor = arg->ctor; | ||||
zone->uz_dtor = arg->dtor; | zone->uz_dtor = arg->dtor; | ||||
zone->uz_init = NULL; | zone->uz_init = NULL; | ||||
zone->uz_fini = NULL; | zone->uz_fini = NULL; | ||||
zone->uz_sleeps = 0; | zone->uz_sleeps = 0; | ||||
Show All 10 Lines | zone_ctor(void *mem, int size, void *udata, int flags) | ||||
if (__predict_true(booted == BOOT_RUNNING)) | if (__predict_true(booted == BOOT_RUNNING)) | ||||
zone_alloc_counters(zone); | zone_alloc_counters(zone); | ||||
else { | else { | ||||
zone->uz_allocs = EARLY_COUNTER; | zone->uz_allocs = EARLY_COUNTER; | ||||
zone->uz_frees = EARLY_COUNTER; | zone->uz_frees = EARLY_COUNTER; | ||||
zone->uz_fails = EARLY_COUNTER; | zone->uz_fails = EARLY_COUNTER; | ||||
} | } | ||||
for (i = 0; i < vm_ndomains; i++) | |||||
TAILQ_INIT(&zone->uz_domain[i].uzd_buckets); | |||||
/* | /* | ||||
* This is a pure cache zone, no kegs. | * This is a pure cache zone, no kegs. | ||||
*/ | */ | ||||
if (arg->import) { | if (arg->import) { | ||||
if (arg->flags & UMA_ZONE_VM) | if (arg->flags & UMA_ZONE_VM) | ||||
arg->flags |= UMA_ZFLAG_CACHEONLY; | arg->flags |= UMA_ZFLAG_CACHEONLY; | ||||
zone->uz_flags = arg->flags; | zone->uz_flags = arg->flags; | ||||
zone->uz_size = arg->size; | zone->uz_size = arg->size; | ||||
▲ Show 20 Lines • Show All 131 Lines • ▼ Show 20 Lines | zone_dtor(void *arg, int size, void *udata) | ||||
LIST_REMOVE(zone, uz_link); | LIST_REMOVE(zone, uz_link); | ||||
rw_wunlock(&uma_rwlock); | rw_wunlock(&uma_rwlock); | ||||
/* | /* | ||||
* XXX there are some races here where | * XXX there are some races here where | ||||
* the zone can be drained but zone lock | * the zone can be drained but zone lock | ||||
* released and then refilled before we | * released and then refilled before we | ||||
* remove it... we dont care for now | * remove it... we dont care for now | ||||
*/ | */ | ||||
zone_drain_wait(zone, M_WAITOK); | zone_reclaim(zone, M_WAITOK, true); | ||||
/* | /* | ||||
* We only destroy kegs from non secondary/non cache zones. | * We only destroy kegs from non secondary/non cache zones. | ||||
*/ | */ | ||||
if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) { | if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) { | ||||
keg = zone->uz_keg; | keg = zone->uz_keg; | ||||
rw_wlock(&uma_rwlock); | rw_wlock(&uma_rwlock); | ||||
LIST_REMOVE(keg, uk_link); | LIST_REMOVE(keg, uk_link); | ||||
rw_wunlock(&uma_rwlock); | rw_wunlock(&uma_rwlock); | ||||
▲ Show 20 Lines • Show All 188 Lines • ▼ Show 20 Lines | |||||
void | void | ||||
uma_startup2(void) | uma_startup2(void) | ||||
{ | { | ||||
#ifdef DIAGNOSTIC | #ifdef DIAGNOSTIC | ||||
printf("Entering %s with %d boot pages left\n", __func__, boot_pages); | printf("Entering %s with %d boot pages left\n", __func__, boot_pages); | ||||
#endif | #endif | ||||
booted = BOOT_BUCKETS; | booted = BOOT_BUCKETS; | ||||
sx_init(&uma_drain_lock, "umadrain"); | sx_init(&uma_reclaim_lock, "umareclaim"); | ||||
bucket_enable(); | bucket_enable(); | ||||
} | } | ||||
/* | /* | ||||
* Initialize our callout handle | * Initialize our callout handle | ||||
* | * | ||||
*/ | */ | ||||
static void | static void | ||||
▲ Show 20 Lines • Show All 73 Lines • ▼ Show 20 Lines | |||||
#endif | #endif | ||||
args.align = align; | args.align = align; | ||||
args.flags = flags; | args.flags = flags; | ||||
args.keg = NULL; | args.keg = NULL; | ||||
if (booted < BOOT_BUCKETS) { | if (booted < BOOT_BUCKETS) { | ||||
locked = false; | locked = false; | ||||
} else { | } else { | ||||
sx_slock(&uma_drain_lock); | sx_slock(&uma_reclaim_lock); | ||||
locked = true; | locked = true; | ||||
} | } | ||||
res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); | res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); | ||||
if (locked) | if (locked) | ||||
sx_sunlock(&uma_drain_lock); | sx_sunlock(&uma_reclaim_lock); | ||||
return (res); | return (res); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
uma_zone_t | uma_zone_t | ||||
uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, | uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, | ||||
uma_init zinit, uma_fini zfini, uma_zone_t master) | uma_init zinit, uma_fini zfini, uma_zone_t master) | ||||
{ | { | ||||
Show All 12 Lines | uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, | ||||
args.fini = zfini; | args.fini = zfini; | ||||
args.align = keg->uk_align; | args.align = keg->uk_align; | ||||
args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; | args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; | ||||
args.keg = keg; | args.keg = keg; | ||||
if (booted < BOOT_BUCKETS) { | if (booted < BOOT_BUCKETS) { | ||||
locked = false; | locked = false; | ||||
} else { | } else { | ||||
sx_slock(&uma_drain_lock); | sx_slock(&uma_reclaim_lock); | ||||
locked = true; | locked = true; | ||||
} | } | ||||
/* XXX Attaches only one keg of potentially many. */ | /* XXX Attaches only one keg of potentially many. */ | ||||
res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); | res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); | ||||
if (locked) | if (locked) | ||||
sx_sunlock(&uma_drain_lock); | sx_sunlock(&uma_reclaim_lock); | ||||
return (res); | return (res); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
uma_zone_t | uma_zone_t | ||||
uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, | uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, | ||||
uma_init zinit, uma_fini zfini, uma_import zimport, | uma_init zinit, uma_fini zfini, uma_import zimport, | ||||
uma_release zrelease, void *arg, int flags) | uma_release zrelease, void *arg, int flags) | ||||
Show All 16 Lines | uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, | ||||
return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK)); | return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK)); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zdestroy(uma_zone_t zone) | uma_zdestroy(uma_zone_t zone) | ||||
{ | { | ||||
sx_slock(&uma_drain_lock); | sx_slock(&uma_reclaim_lock); | ||||
zone_free_item(zones, zone, NULL, SKIP_NONE); | zone_free_item(zones, zone, NULL, SKIP_NONE); | ||||
sx_sunlock(&uma_drain_lock); | sx_sunlock(&uma_reclaim_lock); | ||||
} | } | ||||
void | void | ||||
uma_zwait(uma_zone_t zone) | uma_zwait(uma_zone_t zone) | ||||
{ | { | ||||
void *item; | void *item; | ||||
item = uma_zalloc_arg(zone, NULL, M_WAITOK); | item = uma_zalloc_arg(zone, NULL, M_WAITOK); | ||||
▲ Show 20 Lines • Show All 195 Lines • ▼ Show 20 Lines | #endif | ||||
/* | /* | ||||
* Check the zone's cache of buckets. | * Check the zone's cache of buckets. | ||||
*/ | */ | ||||
if (domain == UMA_ANYDOMAIN) | if (domain == UMA_ANYDOMAIN) | ||||
zdom = &zone->uz_domain[0]; | zdom = &zone->uz_domain[0]; | ||||
else | else | ||||
zdom = &zone->uz_domain[domain]; | zdom = &zone->uz_domain[domain]; | ||||
if ((bucket = zone_try_fetch_bucket(zone, zdom, true)) != NULL) { | if ((bucket = zone_try_fetch_bucket(zone, zdom)) != NULL) { | ||||
KASSERT(bucket->ub_cnt != 0, | KASSERT(bucket->ub_cnt != 0, | ||||
("uma_zalloc_arg: Returning an empty bucket.")); | ("uma_zalloc_arg: Returning an empty bucket.")); | ||||
cache->uc_allocbucket = bucket; | cache->uc_allocbucket = bucket; | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
goto zalloc_start; | goto zalloc_start; | ||||
} | } | ||||
/* We are no longer associated with this CPU. */ | /* We are no longer associated with this CPU. */ | ||||
critical_exit(); | critical_exit(); | ||||
▲ Show 20 Lines • Show All 1,069 Lines • ▼ Show 20 Lines | for (;;) { | ||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
static void | void | ||||
uma_reclaim_locked(bool kmem_danger) | uma_reclaim(int req) | ||||
{ | { | ||||
CTR0(KTR_UMA, "UMA: vm asked us to release pages!"); | CTR0(KTR_UMA, "UMA: vm asked us to release pages!"); | ||||
sx_assert(&uma_drain_lock, SA_XLOCKED); | sx_xlock(&uma_reclaim_lock); | ||||
bucket_enable(); | bucket_enable(); | ||||
switch (req) { | |||||
case UMA_RECLAIM_TRIM: | |||||
zone_foreach(zone_trim); | |||||
break; | |||||
case UMA_RECLAIM_DRAIN: | |||||
case UMA_RECLAIM_DRAIN_CPU: | |||||
zone_foreach(zone_drain); | zone_foreach(zone_drain); | ||||
if (vm_page_count_min() || kmem_danger) { | if (req == UMA_RECLAIM_DRAIN_CPU) { | ||||
cache_drain_safe(NULL); | pcpu_cache_drain_safe(NULL); | ||||
zone_foreach(zone_drain); | zone_foreach(zone_drain); | ||||
} | } | ||||
break; | |||||
default: | |||||
panic("unhandled reclamation request %d", req); | |||||
} | |||||
/* | /* | ||||
* Some slabs may have been freed but this zone will be visited early | * Some slabs may have been freed but this zone will be visited early | ||||
* we visit again so that we can free pages that are empty once other | * we visit again so that we can free pages that are empty once other | ||||
* zones are drained. We have to do the same for buckets. | * zones are drained. We have to do the same for buckets. | ||||
*/ | */ | ||||
zone_drain(slabzone); | zone_drain(slabzone); | ||||
bucket_zone_drain(); | bucket_zone_drain(); | ||||
sx_xunlock(&uma_reclaim_lock); | |||||
} | } | ||||
void | |||||
uma_reclaim(void) | |||||
{ | |||||
sx_xlock(&uma_drain_lock); | |||||
uma_reclaim_locked(false); | |||||
sx_xunlock(&uma_drain_lock); | |||||
} | |||||
static volatile int uma_reclaim_needed; | static volatile int uma_reclaim_needed; | ||||
void | void | ||||
uma_reclaim_wakeup(void) | uma_reclaim_wakeup(void) | ||||
{ | { | ||||
if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0) | if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0) | ||||
wakeup(uma_reclaim); | wakeup(uma_reclaim); | ||||
} | } | ||||
void | void | ||||
uma_reclaim_worker(void *arg __unused) | uma_reclaim_worker(void *arg __unused) | ||||
{ | { | ||||
for (;;) { | for (;;) { | ||||
sx_xlock(&uma_drain_lock); | sx_xlock(&uma_reclaim_lock); | ||||
while (atomic_load_int(&uma_reclaim_needed) == 0) | while (atomic_load_int(&uma_reclaim_needed) == 0) | ||||
sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl", | sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl", | ||||
hz); | hz); | ||||
sx_xunlock(&uma_drain_lock); | sx_xunlock(&uma_reclaim_lock); | ||||
EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); | EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); | ||||
sx_xlock(&uma_drain_lock); | uma_reclaim(UMA_RECLAIM_DRAIN_CPU); | ||||
uma_reclaim_locked(true); | |||||
atomic_store_int(&uma_reclaim_needed, 0); | atomic_store_int(&uma_reclaim_needed, 0); | ||||
sx_xunlock(&uma_drain_lock); | |||||
/* Don't fire more than once per-second. */ | /* Don't fire more than once per-second. */ | ||||
pause("umarclslp", hz); | pause("umarclslp", hz); | ||||
} | |||||
} | |||||
/* See uma.h */ | |||||
void | |||||
uma_zreclaim(uma_zone_t zone, int req) | |||||
{ | |||||
switch (req) { | |||||
case UMA_RECLAIM_TRIM: | |||||
zone_trim(zone); | |||||
break; | |||||
case UMA_RECLAIM_DRAIN: | |||||
zone_drain(zone); | |||||
break; | |||||
case UMA_RECLAIM_DRAIN_CPU: | |||||
pcpu_cache_drain_safe(zone); | |||||
zone_drain(zone); | |||||
break; | |||||
default: | |||||
panic("unhandled reclamation request %d", req); | |||||
} | } | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
int | int | ||||
uma_zone_exhausted(uma_zone_t zone) | uma_zone_exhausted(uma_zone_t zone) | ||||
{ | { | ||||
int full; | int full; | ||||
▲ Show 20 Lines • Show All 577 Lines • Show Last 20 Lines |
I missed the review for the working set-size. Where is that?