Changeset View
Standalone View
sys/vm/uma_core.c
Show First 20 Lines • Show All 255 Lines • ▼ Show 20 Lines | |||||
static void uma_startup3(void); | static void uma_startup3(void); | ||||
static void *zone_alloc_item(uma_zone_t, void *, int, int); | static void *zone_alloc_item(uma_zone_t, void *, int, int); | ||||
static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); | static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); | ||||
static void bucket_enable(void); | static void bucket_enable(void); | ||||
static void bucket_init(void); | static void bucket_init(void); | ||||
static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); | static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); | ||||
static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); | static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); | ||||
static void bucket_zone_drain(void); | static void bucket_zone_drain(void); | ||||
static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int); | |||||
static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int); | static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int); | ||||
static uma_slab_t zone_fetch_slab_multi(uma_zone_t, uma_keg_t, int, int); | |||||
static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); | static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); | ||||
static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); | static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item); | ||||
static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, | static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, | ||||
uma_fini fini, int align, uint32_t flags); | uma_fini fini, int align, uint32_t flags); | ||||
static int zone_import(uma_zone_t, void **, int, int, int); | static int zone_import(uma_zone_t, void **, int, int, int); | ||||
static void zone_release(uma_zone_t, void **, int); | static void zone_release(uma_zone_t, void **, int); | ||||
static void uma_zero_item(void *, uma_zone_t); | static void uma_zero_item(void *, uma_zone_t); | ||||
void uma_print_zone(uma_zone_t); | void uma_print_zone(uma_zone_t); | ||||
void uma_print_stats(void); | void uma_print_stats(void); | ||||
▲ Show 20 Lines • Show All 190 Lines • ▼ Show 20 Lines | zone_try_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, const bool ws) | ||||
ZONE_LOCK_ASSERT(zone); | ZONE_LOCK_ASSERT(zone); | ||||
if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { | if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { | ||||
MPASS(zdom->uzd_nitems >= bucket->ub_cnt); | MPASS(zdom->uzd_nitems >= bucket->ub_cnt); | ||||
LIST_REMOVE(bucket, ub_link); | LIST_REMOVE(bucket, ub_link); | ||||
zdom->uzd_nitems -= bucket->ub_cnt; | zdom->uzd_nitems -= bucket->ub_cnt; | ||||
if (ws && zdom->uzd_imin > zdom->uzd_nitems) | if (ws && zdom->uzd_imin > zdom->uzd_nitems) | ||||
zdom->uzd_imin = zdom->uzd_nitems; | zdom->uzd_imin = zdom->uzd_nitems; | ||||
zone->uz_bktcount -= bucket->ub_cnt; | |||||
} | } | ||||
return (bucket); | return (bucket); | ||||
} | } | ||||
static void | static void | ||||
zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket, | zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket, | ||||
const bool ws) | const bool ws) | ||||
{ | { | ||||
ZONE_LOCK_ASSERT(zone); | ZONE_LOCK_ASSERT(zone); | ||||
LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); | LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); | ||||
zdom->uzd_nitems += bucket->ub_cnt; | zdom->uzd_nitems += bucket->ub_cnt; | ||||
if (ws && zdom->uzd_imax < zdom->uzd_nitems) | if (ws && zdom->uzd_imax < zdom->uzd_nitems) | ||||
zdom->uzd_imax = zdom->uzd_nitems; | zdom->uzd_imax = zdom->uzd_nitems; | ||||
KASSERT(zone->uz_bktcount < zone->uz_bktmax, ("%s: zone %p overflow", | |||||
markj: This assertion should go at the beginning of the function. | |||||
__func__, zone)); | |||||
zone->uz_bktcount += bucket->ub_cnt; | |||||
} | } | ||||
static void | static void | ||||
zone_log_warning(uma_zone_t zone) | zone_log_warning(uma_zone_t zone) | ||||
{ | { | ||||
static const struct timeval warninterval = { 300, 0 }; | static const struct timeval warninterval = { 300, 0 }; | ||||
if (!zone_warnings || zone->uz_warning == NULL) | if (!zone_warnings || zone->uz_warning == NULL) | ||||
return; | return; | ||||
if (ratecheck(&zone->uz_ratecheck, &warninterval)) | if (ratecheck(&zone->uz_ratecheck, &warninterval)) | ||||
printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); | printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); | ||||
} | } | ||||
static inline void | static inline void | ||||
zone_maxaction(uma_zone_t zone) | zone_maxaction(uma_zone_t zone) | ||||
{ | { | ||||
if (zone->uz_maxaction.ta_func != NULL) | if (zone->uz_maxaction.ta_func != NULL) | ||||
taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); | taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); | ||||
} | } | ||||
static void | |||||
zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) | |||||
{ | |||||
uma_klink_t klink; | |||||
LIST_FOREACH(klink, &zone->uz_kegs, kl_link) | |||||
kegfn(klink->kl_keg); | |||||
} | |||||
/* | /* | ||||
* Routine called by timeout which is used to fire off some time interval | * Routine called by timeout which is used to fire off some time interval | ||||
* based calculations. (stats, hash size, etc.) | * based calculations. (stats, hash size, etc.) | ||||
* | * | ||||
* Arguments: | * Arguments: | ||||
* arg Unused | * arg Unused | ||||
* | * | ||||
* Returns: | * Returns: | ||||
Show All 28 Lines | |||||
/* | /* | ||||
* Routine to perform timeout driven calculations. This expands the | * Routine to perform timeout driven calculations. This expands the | ||||
* hashes and does per cpu statistics aggregation. | * hashes and does per cpu statistics aggregation. | ||||
* | * | ||||
* Returns nothing. | * Returns nothing. | ||||
*/ | */ | ||||
static void | static void | ||||
keg_timeout(uma_keg_t keg) | zone_timeout(uma_zone_t zone) | ||||
{ | { | ||||
uma_keg_t keg = zone->uz_keg; | |||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
/* | /* | ||||
* Expand the keg hash table. | * Expand the keg hash table. | ||||
* | * | ||||
* This is done if the number of slabs is larger than the hash size. | * This is done if the number of slabs is larger than the hash size. | ||||
* What I'm trying to do here is completely reduce collisions. This | * What I'm trying to do here is completely reduce collisions. This | ||||
* may be a little aggressive. Should I allow for two collisions max? | * may be a little aggressive. Should I allow for two collisions max? | ||||
Show All 21 Lines | if (ret) { | ||||
} else | } else | ||||
oldhash = newhash; | oldhash = newhash; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
hash_free(&oldhash); | hash_free(&oldhash); | ||||
return; | return; | ||||
} | } | ||||
} | } | ||||
KEG_UNLOCK(keg); | |||||
} | |||||
static void | for (int i = 0; i < vm_ndomains; i++) | ||||
zone_timeout(uma_zone_t zone) | |||||
{ | |||||
int i; | |||||
zone_foreach_keg(zone, &keg_timeout); | |||||
ZONE_LOCK(zone); | |||||
for (i = 0; i < vm_ndomains; i++) | |||||
zone_domain_update_wss(&zone->uz_domain[i]); | zone_domain_update_wss(&zone->uz_domain[i]); | ||||
Not Done Inline ActionsWe want the zone lock here, not the keg lock. markj: We want the zone lock here, not the keg lock. | |||||
Done Inline ActionsIn the new scheme if zone has a keg, they share lock. The zone_timeout() is called for only keg-enabled zones, see zone_foreach(). So, KEG_LOCK() here is correct and is self-documenting. glebius: In the new scheme if zone has a keg, they share lock.
The zone_timeout() is called for only… | |||||
ZONE_UNLOCK(zone); | |||||
KEG_UNLOCK(keg); | |||||
} | } | ||||
/* | /* | ||||
* Allocate and zero fill the next sized hash table from the appropriate | * Allocate and zero fill the next sized hash table from the appropriate | ||||
* backing store. | * backing store. | ||||
* | * | ||||
* Arguments: | * Arguments: | ||||
* hash A new hash structure with the old hash size in uh_hashsize | * hash A new hash structure with the old hash size in uh_hashsize | ||||
▲ Show 20 Lines • Show All 113 Lines • ▼ Show 20 Lines | bucket_drain(uma_zone_t zone, uma_bucket_t bucket) | ||||
if (bucket == NULL) | if (bucket == NULL) | ||||
return; | return; | ||||
if (zone->uz_fini) | if (zone->uz_fini) | ||||
for (i = 0; i < bucket->ub_cnt; i++) | for (i = 0; i < bucket->ub_cnt; i++) | ||||
zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); | zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); | ||||
zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); | zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); | ||||
ZONE_LOCK(zone); | |||||
zone->uz_frees++; | |||||
Not Done Inline ActionsShouldn't we be adding bucket->ub_cnt to uz_frees? markj: Shouldn't we be adding bucket->ub_cnt to uz_frees? | |||||
zone->uz_items -= bucket->ub_cnt; | |||||
if (zone->uz_sleepers && zone->uz_items < zone->uz_maxitems) | |||||
wakeup_one(zone); | |||||
ZONE_UNLOCK(zone); | |||||
bucket->ub_cnt = 0; | bucket->ub_cnt = 0; | ||||
} | } | ||||
/* | /* | ||||
* Drains the per cpu caches for a zone. | * Drains the per cpu caches for a zone. | ||||
* | * | ||||
* NOTE: This may only be called while the zone is being turn down, and not | * NOTE: This may only be called while the zone is being turn down, and not | ||||
* during normal operation. This is necessary in order that we do not have | * during normal operation. This is necessary in order that we do not have | ||||
▲ Show 20 Lines • Show All 269 Lines • ▼ Show 20 Lines | zone_drain_wait(uma_zone_t zone, int waitok) | ||||
zone->uz_flags |= UMA_ZFLAG_DRAINING; | zone->uz_flags |= UMA_ZFLAG_DRAINING; | ||||
bucket_cache_drain(zone); | bucket_cache_drain(zone); | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
/* | /* | ||||
* The DRAINING flag protects us from being freed while | * The DRAINING flag protects us from being freed while | ||||
* we're running. Normally the uma_rwlock would protect us but we | * we're running. Normally the uma_rwlock would protect us but we | ||||
* must be able to release and acquire the right lock for each keg. | * must be able to release and acquire the right lock for each keg. | ||||
*/ | */ | ||||
zone_foreach_keg(zone, &keg_drain); | keg_drain(zone->uz_keg); | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
zone->uz_flags &= ~UMA_ZFLAG_DRAINING; | zone->uz_flags &= ~UMA_ZFLAG_DRAINING; | ||||
wakeup(zone); | wakeup(zone); | ||||
out: | out: | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
} | } | ||||
void | void | ||||
Show All 23 Lines | keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait) | ||||
unsigned long size; | unsigned long size; | ||||
uint8_t *mem; | uint8_t *mem; | ||||
uint8_t flags; | uint8_t flags; | ||||
int i; | int i; | ||||
KASSERT(domain >= 0 && domain < vm_ndomains, | KASSERT(domain >= 0 && domain < vm_ndomains, | ||||
("keg_alloc_slab: domain %d out of range", domain)); | ("keg_alloc_slab: domain %d out of range", domain)); | ||||
mtx_assert(&keg->uk_lock, MA_OWNED); | mtx_assert(&keg->uk_lock, MA_OWNED); | ||||
MPASS(zone->uz_lockptr == &keg->uk_lock); | |||||
allocf = keg->uk_allocf; | allocf = keg->uk_allocf; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
slab = NULL; | slab = NULL; | ||||
mem = NULL; | mem = NULL; | ||||
if (keg->uk_flags & UMA_ZONE_OFFPAGE) { | if (keg->uk_flags & UMA_ZONE_OFFPAGE) { | ||||
slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait); | slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait); | ||||
▲ Show 20 Lines • Show All 79 Lines • ▼ Show 20 Lines | |||||
static void * | static void * | ||||
startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, | startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, | ||||
int wait) | int wait) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
void *mem; | void *mem; | ||||
int pages; | int pages; | ||||
keg = zone_first_keg(zone); | keg = zone->uz_keg; | ||||
/* | /* | ||||
* If we are in BOOT_BUCKETS or higher, than switch to real | * If we are in BOOT_BUCKETS or higher, than switch to real | ||||
* allocator. Zones with page sized slabs switch at BOOT_PAGEALLOC. | * allocator. Zones with page sized slabs switch at BOOT_PAGEALLOC. | ||||
*/ | */ | ||||
switch (booted) { | switch (booted) { | ||||
case BOOT_COLD: | case BOOT_COLD: | ||||
case BOOT_STRAPPED: | case BOOT_STRAPPED: | ||||
break; | break; | ||||
▲ Show 20 Lines • Show All 121 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
TAILQ_HEAD(, vm_page) alloctail; | TAILQ_HEAD(, vm_page) alloctail; | ||||
u_long npages; | u_long npages; | ||||
vm_offset_t retkva, zkva; | vm_offset_t retkva, zkva; | ||||
vm_page_t p, p_next; | vm_page_t p, p_next; | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
TAILQ_INIT(&alloctail); | TAILQ_INIT(&alloctail); | ||||
keg = zone_first_keg(zone); | keg = zone->uz_keg; | ||||
npages = howmany(bytes, PAGE_SIZE); | npages = howmany(bytes, PAGE_SIZE); | ||||
while (npages > 0) { | while (npages > 0) { | ||||
p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT | | p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT | | ||||
VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | | ||||
((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : | ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : | ||||
VM_ALLOC_NOWAIT)); | VM_ALLOC_NOWAIT)); | ||||
if (p != NULL) { | if (p != NULL) { | ||||
▲ Show 20 Lines • Show All 206 Lines • ▼ Show 20 Lines | |||||
* Nothing | * Nothing | ||||
*/ | */ | ||||
static void | static void | ||||
keg_large_init(uma_keg_t keg) | keg_large_init(uma_keg_t keg) | ||||
{ | { | ||||
u_int shsize; | u_int shsize; | ||||
KASSERT(keg != NULL, ("Keg is null in keg_large_init")); | KASSERT(keg != NULL, ("Keg is null in keg_large_init")); | ||||
KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, | |||||
("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); | |||||
KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, | KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, | ||||
("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); | ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); | ||||
keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); | keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); | ||||
keg->uk_ipers = 1; | keg->uk_ipers = 1; | ||||
keg->uk_rsize = keg->uk_size; | keg->uk_rsize = keg->uk_size; | ||||
/* Check whether we have enough space to not do OFFPAGE. */ | /* Check whether we have enough space to not do OFFPAGE. */ | ||||
▲ Show 20 Lines • Show All 222 Lines • ▼ Show 20 Lines | zone_ctor(void *mem, int size, void *udata, int flags) | ||||
zone->uz_init = NULL; | zone->uz_init = NULL; | ||||
zone->uz_fini = NULL; | zone->uz_fini = NULL; | ||||
zone->uz_allocs = 0; | zone->uz_allocs = 0; | ||||
zone->uz_frees = 0; | zone->uz_frees = 0; | ||||
zone->uz_fails = 0; | zone->uz_fails = 0; | ||||
zone->uz_sleeps = 0; | zone->uz_sleeps = 0; | ||||
zone->uz_count = 0; | zone->uz_count = 0; | ||||
zone->uz_count_min = 0; | zone->uz_count_min = 0; | ||||
zone->uz_count_max = BUCKET_MAX; | |||||
zone->uz_flags = 0; | zone->uz_flags = 0; | ||||
zone->uz_warning = NULL; | zone->uz_warning = NULL; | ||||
/* The domain structures follow the cpu structures. */ | /* The domain structures follow the cpu structures. */ | ||||
zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus]; | zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus]; | ||||
zone->uz_bktmax = ULONG_MAX; | |||||
timevalclear(&zone->uz_ratecheck); | timevalclear(&zone->uz_ratecheck); | ||||
keg = arg->keg; | |||||
ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); | |||||
/* | /* | ||||
* This is a pure cache zone, no kegs. | * This is a pure cache zone, no kegs. | ||||
*/ | */ | ||||
if (arg->import) { | if (arg->import) { | ||||
if (arg->flags & UMA_ZONE_VM) | if (arg->flags & UMA_ZONE_VM) | ||||
arg->flags |= UMA_ZFLAG_CACHEONLY; | arg->flags |= UMA_ZFLAG_CACHEONLY; | ||||
zone->uz_flags = arg->flags; | zone->uz_flags = arg->flags; | ||||
zone->uz_size = arg->size; | zone->uz_size = arg->size; | ||||
zone->uz_import = arg->import; | zone->uz_import = arg->import; | ||||
zone->uz_release = arg->release; | zone->uz_release = arg->release; | ||||
zone->uz_arg = arg->arg; | zone->uz_arg = arg->arg; | ||||
zone->uz_lockptr = &zone->uz_lock; | zone->uz_lockptr = &zone->uz_lock; | ||||
ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); | |||||
rw_wlock(&uma_rwlock); | rw_wlock(&uma_rwlock); | ||||
LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); | LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); | ||||
rw_wunlock(&uma_rwlock); | rw_wunlock(&uma_rwlock); | ||||
goto out; | goto out; | ||||
} | } | ||||
/* | /* | ||||
* Use the regular zone/keg/slab allocator. | * Use the regular zone/keg/slab allocator. | ||||
*/ | */ | ||||
zone->uz_import = (uma_import)zone_import; | zone->uz_import = (uma_import)zone_import; | ||||
zone->uz_release = (uma_release)zone_release; | zone->uz_release = (uma_release)zone_release; | ||||
zone->uz_arg = zone; | zone->uz_arg = zone; | ||||
keg = arg->keg; | |||||
if (arg->flags & UMA_ZONE_SECONDARY) { | if (arg->flags & UMA_ZONE_SECONDARY) { | ||||
KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); | KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); | ||||
zone->uz_init = arg->uminit; | zone->uz_init = arg->uminit; | ||||
zone->uz_fini = arg->fini; | zone->uz_fini = arg->fini; | ||||
zone->uz_lockptr = &keg->uk_lock; | zone->uz_lockptr = &keg->uk_lock; | ||||
zone->uz_flags |= UMA_ZONE_SECONDARY; | zone->uz_flags |= UMA_ZONE_SECONDARY; | ||||
rw_wlock(&uma_rwlock); | rw_wlock(&uma_rwlock); | ||||
Show All 22 Lines | if (arg->flags & UMA_ZONE_SECONDARY) { | ||||
karg.flags = arg->flags; | karg.flags = arg->flags; | ||||
karg.zone = zone; | karg.zone = zone; | ||||
error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, | error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, | ||||
flags); | flags); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | zone->uz_keg = keg; | ||||
* Link in the first keg. | |||||
*/ | |||||
zone->uz_klink.kl_keg = keg; | |||||
LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); | |||||
zone->uz_lockptr = &keg->uk_lock; | |||||
zone->uz_size = keg->uk_size; | zone->uz_size = keg->uk_size; | ||||
zone->uz_flags |= (keg->uk_flags & | zone->uz_flags |= (keg->uk_flags & | ||||
(UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); | (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); | ||||
/* | /* | ||||
* Some internal zones don't have room allocated for the per cpu | * Some internal zones don't have room allocated for the per cpu | ||||
* caches. If we're internal, bail out here. | * caches. If we're internal, bail out here. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | |||||
* Zone header dtor. | * Zone header dtor. | ||||
* | * | ||||
* Arguments/Returns follow uma_dtor specifications | * Arguments/Returns follow uma_dtor specifications | ||||
* udata unused | * udata unused | ||||
*/ | */ | ||||
static void | static void | ||||
zone_dtor(void *arg, int size, void *udata) | zone_dtor(void *arg, int size, void *udata) | ||||
{ | { | ||||
uma_klink_t klink; | |||||
uma_zone_t zone; | uma_zone_t zone; | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
zone = (uma_zone_t)arg; | zone = (uma_zone_t)arg; | ||||
keg = zone_first_keg(zone); | |||||
if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) | if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) | ||||
cache_drain(zone); | cache_drain(zone); | ||||
rw_wlock(&uma_rwlock); | rw_wlock(&uma_rwlock); | ||||
LIST_REMOVE(zone, uz_link); | LIST_REMOVE(zone, uz_link); | ||||
rw_wunlock(&uma_rwlock); | rw_wunlock(&uma_rwlock); | ||||
/* | /* | ||||
* XXX there are some races here where | * XXX there are some races here where | ||||
* the zone can be drained but zone lock | * the zone can be drained but zone lock | ||||
* released and then refilled before we | * released and then refilled before we | ||||
* remove it... we dont care for now | * remove it... we dont care for now | ||||
*/ | */ | ||||
zone_drain_wait(zone, M_WAITOK); | zone_drain_wait(zone, M_WAITOK); | ||||
/* | /* | ||||
* Unlink all of our kegs. | |||||
*/ | |||||
while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { | |||||
klink->kl_keg = NULL; | |||||
LIST_REMOVE(klink, kl_link); | |||||
if (klink == &zone->uz_klink) | |||||
continue; | |||||
free(klink, M_TEMP); | |||||
} | |||||
/* | |||||
* We only destroy kegs from non secondary zones. | * We only destroy kegs from non secondary zones. | ||||
*/ | */ | ||||
if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { | if ((keg = zone->uz_keg) != NULL && | ||||
(zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { | |||||
rw_wlock(&uma_rwlock); | rw_wlock(&uma_rwlock); | ||||
LIST_REMOVE(keg, uk_link); | LIST_REMOVE(keg, uk_link); | ||||
rw_wunlock(&uma_rwlock); | rw_wunlock(&uma_rwlock); | ||||
zone_free_item(kegs, keg, NULL, SKIP_NONE); | zone_free_item(kegs, keg, NULL, SKIP_NONE); | ||||
} | } | ||||
if (zone->uz_lockptr == &zone->uz_lock) | |||||
ZONE_LOCK_FINI(zone); | ZONE_LOCK_FINI(zone); | ||||
} | } | ||||
/* | /* | ||||
* Traverses every zone in the system and calls a callback | * Traverses every zone in the system and calls a callback | ||||
* | * | ||||
* Arguments: | * Arguments: | ||||
* zfunc A pointer to a function which accepts a zone | * zfunc A pointer to a function which accepts a zone | ||||
* as an argument. | * as an argument. | ||||
▲ Show 20 Lines • Show All 267 Lines • ▼ Show 20 Lines | |||||
uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, | uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, | ||||
uma_init zinit, uma_fini zfini, uma_zone_t master) | uma_init zinit, uma_fini zfini, uma_zone_t master) | ||||
{ | { | ||||
struct uma_zctor_args args; | struct uma_zctor_args args; | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
uma_zone_t res; | uma_zone_t res; | ||||
bool locked; | bool locked; | ||||
keg = zone_first_keg(master); | keg = master->uz_keg; | ||||
memset(&args, 0, sizeof(args)); | memset(&args, 0, sizeof(args)); | ||||
args.name = name; | args.name = name; | ||||
args.size = keg->uk_size; | args.size = keg->uk_size; | ||||
args.ctor = ctor; | args.ctor = ctor; | ||||
args.dtor = dtor; | args.dtor = dtor; | ||||
args.uminit = zinit; | args.uminit = zinit; | ||||
args.fini = zfini; | args.fini = zfini; | ||||
args.align = keg->uk_align; | args.align = keg->uk_align; | ||||
Show All 32 Lines | uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, | ||||
args.release = zrelease; | args.release = zrelease; | ||||
args.arg = arg; | args.arg = arg; | ||||
args.align = 0; | args.align = 0; | ||||
args.flags = flags; | args.flags = flags; | ||||
return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK)); | return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK)); | ||||
} | } | ||||
static void | |||||
zone_lock_pair(uma_zone_t a, uma_zone_t b) | |||||
{ | |||||
if (a < b) { | |||||
ZONE_LOCK(a); | |||||
mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); | |||||
} else { | |||||
ZONE_LOCK(b); | |||||
mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); | |||||
} | |||||
} | |||||
static void | |||||
zone_unlock_pair(uma_zone_t a, uma_zone_t b) | |||||
{ | |||||
ZONE_UNLOCK(a); | |||||
ZONE_UNLOCK(b); | |||||
} | |||||
int | |||||
uma_zsecond_add(uma_zone_t zone, uma_zone_t master) | |||||
{ | |||||
uma_klink_t klink; | |||||
uma_klink_t kl; | |||||
int error; | |||||
error = 0; | |||||
klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); | |||||
zone_lock_pair(zone, master); | |||||
/* | |||||
* zone must use vtoslab() to resolve objects and must already be | |||||
* a secondary. | |||||
*/ | |||||
if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) | |||||
!= (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { | |||||
error = EINVAL; | |||||
goto out; | |||||
} | |||||
/* | |||||
* The new master must also use vtoslab(). | |||||
*/ | |||||
if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { | |||||
error = EINVAL; | |||||
goto out; | |||||
} | |||||
/* | |||||
* The underlying object must be the same size. rsize | |||||
* may be different. | |||||
*/ | |||||
if (master->uz_size != zone->uz_size) { | |||||
error = E2BIG; | |||||
goto out; | |||||
} | |||||
/* | |||||
* Put it at the end of the list. | |||||
*/ | |||||
klink->kl_keg = zone_first_keg(master); | |||||
LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { | |||||
if (LIST_NEXT(kl, kl_link) == NULL) { | |||||
LIST_INSERT_AFTER(kl, klink, kl_link); | |||||
break; | |||||
} | |||||
} | |||||
klink = NULL; | |||||
zone->uz_flags |= UMA_ZFLAG_MULTI; | |||||
zone->uz_slab = zone_fetch_slab_multi; | |||||
out: | |||||
zone_unlock_pair(zone, master); | |||||
if (klink != NULL) | |||||
free(klink, M_TEMP); | |||||
return (error); | |||||
} | |||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zdestroy(uma_zone_t zone) | uma_zdestroy(uma_zone_t zone) | ||||
{ | { | ||||
sx_slock(&uma_drain_lock); | sx_slock(&uma_drain_lock); | ||||
zone_free_item(zones, zone, NULL, SKIP_NONE); | zone_free_item(zones, zone, NULL, SKIP_NONE); | ||||
sx_sunlock(&uma_drain_lock); | sx_sunlock(&uma_drain_lock); | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | |||||
/* See uma.h */ | /* See uma.h */ | ||||
void * | void * | ||||
uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) | uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) | ||||
{ | { | ||||
uma_zone_domain_t zdom; | uma_zone_domain_t zdom; | ||||
uma_bucket_t bucket; | uma_bucket_t bucket; | ||||
uma_cache_t cache; | uma_cache_t cache; | ||||
void *item; | void *item; | ||||
int cpu, domain, lockfail; | int cpu, domain, lockfail, max; | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
bool skipdbg; | bool skipdbg; | ||||
#endif | #endif | ||||
/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | ||||
random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | ||||
/* This is the fast path allocation */ | /* This is the fast path allocation */ | ||||
▲ Show 20 Lines • Show All 153 Lines • ▼ Show 20 Lines | #endif | ||||
} | } | ||||
/* We are no longer associated with this CPU. */ | /* We are no longer associated with this CPU. */ | ||||
critical_exit(); | critical_exit(); | ||||
/* | /* | ||||
* We bump the uz count when the cache size is insufficient to | * We bump the uz count when the cache size is insufficient to | ||||
* handle the working set. | * handle the working set. | ||||
*/ | */ | ||||
if (lockfail && zone->uz_count < BUCKET_MAX) | if (lockfail && zone->uz_count < zone->uz_count_max) | ||||
zone->uz_count++; | zone->uz_count++; | ||||
/* | |||||
Done Inline ActionsI prefer an explicit comparison: if (zone->uz_maxitems > 0) markj: I prefer an explicit comparison: if (zone->uz_maxitems > 0) | |||||
* Short-circuit if we can't allocate more buckets. | |||||
Done Inline ActionsWhy do we duplicate the logic from zone_alloc_item() instead of simply jumping to zone_alloc_item()? markj: Why do we duplicate the logic from zone_alloc_item() instead of simply jumping to… | |||||
*/ | |||||
if (zone->uz_bktcount >= zone->uz_bktmax) { | |||||
Done Inline ActionsI don't understand this check. In the common case the bucket will be placed in the per-CPU cache, so it won't count against uz_bktcount. And we already check this condition again before adding it to the bucket list. markj: I don't understand this check. In the common case the bucket will be placed in the per-CPU… | |||||
Done Inline ActionsYou are absolutely correct. This came from Netflix tree and later my profiling showed that condition never happens. glebius: You are absolutely correct. This came from Netflix tree and later my profiling showed that… | |||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
goto zalloc_item; | |||||
} | |||||
if (zone->uz_maxitems) { | |||||
if (zone->uz_items >= zone->uz_maxitems) { | |||||
zone_log_warning(zone); | |||||
Done Inline Actionsmsleep(9) is deprecated by mtx_sleep(9). markj: msleep(9) is deprecated by mtx_sleep(9). | |||||
zone_maxaction(zone); | |||||
if (flags & M_NOWAIT) { | |||||
ZONE_UNLOCK(zone); | |||||
return (NULL); | |||||
} | |||||
zone->uz_sleeps++; | |||||
zone->uz_sleepers++; | |||||
msleep(zone, zone->uz_lockptr, PVM, "zonelimit", 0); | |||||
zone->uz_sleepers--; | |||||
if (zone->uz_items >= zone->uz_maxitems) { | |||||
ZONE_UNLOCK(zone); | |||||
goto zalloc_restart; | |||||
Done Inline ActionsExplicit comparison: zone->uz_sleepers > 0. markj: Explicit comparison: zone->uz_sleepers > 0. | |||||
} | |||||
} | |||||
max = MIN(zone->uz_count, zone->uz_maxitems - zone->uz_items); | |||||
} else | |||||
max = zone->uz_count; | |||||
zone->uz_items += max; | |||||
if (zone->uz_sleepers && zone->uz_items < zone->uz_maxitems) | |||||
wakeup_one(zone); | |||||
ZONE_UNLOCK(zone); | |||||
/* | /* | ||||
* Now lets just fill a bucket and put it on the free list. If that | * Now lets just fill a bucket and put it on the free list. If that | ||||
* works we'll restart the allocation from the beginning and it | * works we'll restart the allocation from the beginning and it | ||||
* will use the just filled bucket. | * will use the just filled bucket. | ||||
*/ | */ | ||||
bucket = zone_alloc_bucket(zone, udata, domain, flags); | /* Don't wait for buckets, preserve caller's NOVM setting. */ | ||||
Not Done Inline ActionsWhy did this get inlined? I can't see the reason. markj: Why did this get inlined? I can't see the reason. | |||||
Not Done Inline ActionsI'm preparing a patch to put it back into separate function, and to be fair, I feel that it is more difficult to understand the code when a middle of uma_zalloc_arg() is separated from the rest of the function. If you really insist I can roll this back. However, zone_alloc_bucket() would be modified. You see, the inlined code doesn't exactly match code of the function. It has few additions to rollback uz_items in case of allocation failure or strange bucket. glebius: I'm preparing a patch to put it back into separate function, and to be fair, I feel that it is… | |||||
Done Inline ActionsI still think I prefer having a separate function. The details of how we fill a bucket aren't really relevant to the core allocation algorithm. At least, please pick a more specific variable name than "max". markj: I still think I prefer having a separate function. The details of how we fill a bucket aren't… | |||||
bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); | |||||
if (bucket == NULL) { | |||||
ZONE_LOCK(zone); | |||||
zone->uz_items -= max; | |||||
ZONE_UNLOCK(zone); | |||||
goto zalloc_item; | |||||
} | |||||
bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, | |||||
MIN(bucket->ub_entries, max), domain, flags); | |||||
/* | |||||
* Initialize the memory if necessary. | |||||
*/ | |||||
if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { | |||||
int i; | |||||
for (i = 0; i < bucket->ub_cnt; i++) | |||||
if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, | |||||
flags) != 0) | |||||
break; | |||||
/* | |||||
* If we couldn't initialize the whole bucket, put the | |||||
* rest back onto the freelist. | |||||
*/ | |||||
if (i != bucket->ub_cnt) { | |||||
zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], | |||||
bucket->ub_cnt - i); | |||||
#ifdef INVARIANTS | |||||
bzero(&bucket->ub_bucket[i], | |||||
sizeof(void *) * (bucket->ub_cnt - i)); | |||||
#endif | |||||
bucket->ub_cnt = i; | |||||
} | |||||
} | |||||
/* | |||||
* Check if bucket_alloc() returned a strange bucket or | |||||
* we reduced ub_cnt due to failed uz_init. | |||||
*/ | |||||
if (bucket->ub_cnt < max) { | |||||
ZONE_LOCK(zone); | |||||
zone->uz_items -= max - bucket->ub_cnt; | |||||
Done Inline ActionsShould assert that zone->uz_items >= max - bucket->ub_cnt before the subtraction. markj: Should assert that zone->uz_items >= max - bucket->ub_cnt before the subtraction. | |||||
if (zone->uz_sleepers > 0 && | |||||
zone->uz_items < zone->uz_maxitems) | |||||
wakeup_one(zone); | |||||
ZONE_UNLOCK(zone); | |||||
if (bucket->ub_cnt == 0) { | |||||
bucket_free(zone, bucket, udata); | |||||
atomic_add_long(&zone->uz_fails, 1); | |||||
goto zalloc_item; | |||||
} | |||||
} | |||||
CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", | CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", | ||||
zone->uz_name, zone, bucket); | zone->uz_name, zone, bucket); | ||||
if (bucket != NULL) { | if (bucket != NULL) { | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
critical_enter(); | critical_enter(); | ||||
cpu = curcpu; | cpu = curcpu; | ||||
Done Inline ActionsWhy do you not do a wakeup here? markj: Why do you not do a wakeup here? | |||||
cache = &zone->uz_cpu[cpu]; | cache = &zone->uz_cpu[cpu]; | ||||
/* | /* | ||||
* See if we lost the race or were migrated. Cache the | * See if we lost the race or were migrated. Cache the | ||||
* initialized bucket to make this less likely or claim | * initialized bucket to make this less likely or claim | ||||
* the memory directly. | * the memory directly. | ||||
*/ | */ | ||||
if (cache->uc_allocbucket == NULL && | if (cache->uc_allocbucket == NULL && | ||||
((zone->uz_flags & UMA_ZONE_NUMA) == 0 || | ((zone->uz_flags & UMA_ZONE_NUMA) == 0 || | ||||
domain == PCPU_GET(domain))) { | domain == PCPU_GET(domain))) { | ||||
cache->uc_allocbucket = bucket; | cache->uc_allocbucket = bucket; | ||||
zdom->uzd_imax += bucket->ub_cnt; | zdom->uzd_imax += bucket->ub_cnt; | ||||
} else if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) { | } else if (zone->uz_bktcount >= zone->uz_bktmax) { | ||||
critical_exit(); | critical_exit(); | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
bucket_drain(zone, bucket); | bucket_drain(zone, bucket); | ||||
bucket_free(zone, bucket, udata); | bucket_free(zone, bucket, udata); | ||||
goto zalloc_restart; | goto zalloc_restart; | ||||
} else | } else | ||||
zone_put_bucket(zone, zdom, bucket, false); | zone_put_bucket(zone, zdom, bucket, false); | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
goto zalloc_start; | goto zalloc_start; | ||||
} | } | ||||
/* | /* | ||||
* We may not be able to get a bucket so return an actual item. | * We may not be able to get a bucket so return an actual item. | ||||
*/ | */ | ||||
zalloc_item: | zalloc_item: | ||||
item = zone_alloc_item(zone, udata, domain, flags); | item = zone_alloc_item(zone, udata, domain, flags); | ||||
Done Inline ActionsWhy not have it in zone_alloc_item_locked()? With the current diff, uma_large_malloc(), for example, can bypass limits. markj: Why not have it in zone_alloc_item_locked()? With the current diff, uma_large_malloc(), for… | |||||
return (item); | return (item); | ||||
} | } | ||||
void * | void * | ||||
uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) | uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) | ||||
{ | { | ||||
Show All 26 Lines | |||||
keg_first_slab(uma_keg_t keg, int domain, bool rr) | keg_first_slab(uma_keg_t keg, int domain, bool rr) | ||||
{ | { | ||||
uma_domain_t dom; | uma_domain_t dom; | ||||
uma_slab_t slab; | uma_slab_t slab; | ||||
int start; | int start; | ||||
KASSERT(domain >= 0 && domain < vm_ndomains, | KASSERT(domain >= 0 && domain < vm_ndomains, | ||||
("keg_first_slab: domain %d out of range", domain)); | ("keg_first_slab: domain %d out of range", domain)); | ||||
mtx_assert(&keg->uk_lock, MA_OWNED); | |||||
Done Inline ActionsConsider adding a KEG_LOCK_ASSERT()? markj: Consider adding a KEG_LOCK_ASSERT()? | |||||
slab = NULL; | slab = NULL; | ||||
start = domain; | start = domain; | ||||
do { | do { | ||||
dom = &keg->uk_domain[domain]; | dom = &keg->uk_domain[domain]; | ||||
if (!LIST_EMPTY(&dom->ud_part_slab)) | if (!LIST_EMPTY(&dom->ud_part_slab)) | ||||
return (LIST_FIRST(&dom->ud_part_slab)); | return (LIST_FIRST(&dom->ud_part_slab)); | ||||
if (!LIST_EMPTY(&dom->ud_free_slab)) { | if (!LIST_EMPTY(&dom->ud_free_slab)) { | ||||
▲ Show 20 Lines • Show All 60 Lines • ▼ Show 20 Lines | for (;;) { | ||||
} | } | ||||
/* | /* | ||||
* M_NOVM means don't ask at all! | * M_NOVM means don't ask at all! | ||||
*/ | */ | ||||
if (flags & M_NOVM) | if (flags & M_NOVM) | ||||
break; | break; | ||||
if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { | KASSERT(zone->uz_maxitems == 0 || | ||||
keg->uk_flags |= UMA_ZFLAG_FULL; | zone->uz_items <= zone->uz_maxitems, | ||||
/* | ("%s: zone %p overflow", __func__, zone)); | ||||
* If this is not a multi-zone, set the FULL bit. | |||||
* Otherwise slab_multi() takes care of it. | |||||
*/ | |||||
if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { | |||||
zone->uz_flags |= UMA_ZFLAG_FULL; | |||||
zone_log_warning(zone); | |||||
zone_maxaction(zone); | |||||
} | |||||
if (flags & M_NOWAIT) | |||||
return (NULL); | |||||
zone->uz_sleeps++; | |||||
msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); | |||||
continue; | |||||
} | |||||
slab = keg_alloc_slab(keg, zone, domain, aflags); | slab = keg_alloc_slab(keg, zone, domain, aflags); | ||||
/* | /* | ||||
* If we got a slab here it's safe to mark it partially used | * If we got a slab here it's safe to mark it partially used | ||||
* and return. We assume that the caller is going to remove | * and return. We assume that the caller is going to remove | ||||
* at least one item. | * at least one item. | ||||
*/ | */ | ||||
if (slab) { | if (slab) { | ||||
MPASS(slab->us_keg == keg); | MPASS(slab->us_keg == keg); | ||||
Show All 26 Lines | |||||
} | } | ||||
static uma_slab_t | static uma_slab_t | ||||
zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags) | zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags) | ||||
{ | { | ||||
uma_slab_t slab; | uma_slab_t slab; | ||||
if (keg == NULL) { | if (keg == NULL) { | ||||
keg = zone_first_keg(zone); | keg = zone->uz_keg; | ||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
} | } | ||||
for (;;) { | for (;;) { | ||||
slab = keg_fetch_slab(keg, zone, domain, flags); | slab = keg_fetch_slab(keg, zone, domain, flags); | ||||
if (slab) | if (slab) | ||||
return (slab); | return (slab); | ||||
if (flags & (M_NOWAIT | M_NOVM)) | if (flags & (M_NOWAIT | M_NOVM)) | ||||
break; | break; | ||||
} | } | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
/* | |||||
* uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns | |||||
* with the keg locked. On NULL no lock is held. | |||||
* | |||||
* The last pointer is used to seed the search. It is not required. | |||||
*/ | |||||
static uma_slab_t | |||||
zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int domain, int rflags) | |||||
{ | |||||
uma_klink_t klink; | |||||
uma_slab_t slab; | |||||
uma_keg_t keg; | |||||
int flags; | |||||
int empty; | |||||
int full; | |||||
/* | |||||
* Don't wait on the first pass. This will skip limit tests | |||||
* as well. We don't want to block if we can find a provider | |||||
* without blocking. | |||||
*/ | |||||
flags = (rflags & ~M_WAITOK) | M_NOWAIT; | |||||
/* | |||||
* Use the last slab allocated as a hint for where to start | |||||
* the search. | |||||
*/ | |||||
if (last != NULL) { | |||||
slab = keg_fetch_slab(last, zone, domain, flags); | |||||
if (slab) | |||||
return (slab); | |||||
KEG_UNLOCK(last); | |||||
} | |||||
/* | |||||
* Loop until we have a slab incase of transient failures | |||||
* while M_WAITOK is specified. I'm not sure this is 100% | |||||
* required but we've done it for so long now. | |||||
*/ | |||||
for (;;) { | |||||
empty = 0; | |||||
full = 0; | |||||
/* | |||||
* Search the available kegs for slabs. Be careful to hold the | |||||
* correct lock while calling into the keg layer. | |||||
*/ | |||||
LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { | |||||
keg = klink->kl_keg; | |||||
KEG_LOCK(keg); | |||||
if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { | |||||
slab = keg_fetch_slab(keg, zone, domain, flags); | |||||
if (slab) | |||||
return (slab); | |||||
} | |||||
if (keg->uk_flags & UMA_ZFLAG_FULL) | |||||
full++; | |||||
else | |||||
empty++; | |||||
KEG_UNLOCK(keg); | |||||
} | |||||
if (rflags & (M_NOWAIT | M_NOVM)) | |||||
break; | |||||
flags = rflags; | |||||
/* | |||||
* All kegs are full. XXX We can't atomically check all kegs | |||||
* and sleep so just sleep for a short period and retry. | |||||
*/ | |||||
if (full && !empty) { | |||||
ZONE_LOCK(zone); | |||||
zone->uz_flags |= UMA_ZFLAG_FULL; | |||||
zone->uz_sleeps++; | |||||
zone_log_warning(zone); | |||||
zone_maxaction(zone); | |||||
msleep(zone, zone->uz_lockptr, PVM, | |||||
"zonelimit", hz/100); | |||||
zone->uz_flags &= ~UMA_ZFLAG_FULL; | |||||
ZONE_UNLOCK(zone); | |||||
continue; | |||||
} | |||||
} | |||||
return (NULL); | |||||
} | |||||
static void * | static void * | ||||
slab_alloc_item(uma_keg_t keg, uma_slab_t slab) | slab_alloc_item(uma_keg_t keg, uma_slab_t slab) | ||||
{ | { | ||||
uma_domain_t dom; | uma_domain_t dom; | ||||
void *item; | void *item; | ||||
uint8_t freei; | uint8_t freei; | ||||
MPASS(keg == slab->us_keg); | MPASS(keg == slab->us_keg); | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | #endif | ||||
flags |= M_NOWAIT; | flags |= M_NOWAIT; | ||||
} | } | ||||
if (slab != NULL) | if (slab != NULL) | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
return i; | return i; | ||||
} | } | ||||
static uma_bucket_t | |||||
zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags) | |||||
{ | |||||
uma_bucket_t bucket; | |||||
int max; | |||||
CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain); | |||||
/* Don't wait for buckets, preserve caller's NOVM setting. */ | |||||
bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); | |||||
if (bucket == NULL) | |||||
return (NULL); | |||||
max = MIN(bucket->ub_entries, zone->uz_count); | |||||
bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, | |||||
max, domain, flags); | |||||
/* | /* | ||||
* Initialize the memory if necessary. | |||||
*/ | |||||
if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { | |||||
int i; | |||||
for (i = 0; i < bucket->ub_cnt; i++) | |||||
if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, | |||||
flags) != 0) | |||||
break; | |||||
/* | |||||
* If we couldn't initialize the whole bucket, put the | |||||
* rest back onto the freelist. | |||||
*/ | |||||
if (i != bucket->ub_cnt) { | |||||
zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], | |||||
bucket->ub_cnt - i); | |||||
#ifdef INVARIANTS | |||||
bzero(&bucket->ub_bucket[i], | |||||
sizeof(void *) * (bucket->ub_cnt - i)); | |||||
#endif | |||||
bucket->ub_cnt = i; | |||||
} | |||||
} | |||||
if (bucket->ub_cnt == 0) { | |||||
bucket_free(zone, bucket, udata); | |||||
atomic_add_long(&zone->uz_fails, 1); | |||||
return (NULL); | |||||
} | |||||
return (bucket); | |||||
} | |||||
/* | |||||
* Allocates a single item from a zone. | * Allocates a single item from a zone. | ||||
* | * | ||||
* Arguments | * Arguments | ||||
* zone The zone to alloc for. | * zone The zone to alloc for. | ||||
* udata The data to be passed to the constructor. | * udata The data to be passed to the constructor. | ||||
* domain The domain to allocate from or UMA_ANYDOMAIN. | * domain The domain to allocate from or UMA_ANYDOMAIN. | ||||
* flags M_WAITOK, M_NOWAIT, M_ZERO. | * flags M_WAITOK, M_NOWAIT, M_ZERO. | ||||
* | * | ||||
* Returns | * Returns | ||||
* NULL if there is no memory and M_NOWAIT is set | * NULL if there is no memory and M_NOWAIT is set | ||||
* An item if successful | * An item if successful | ||||
*/ | */ | ||||
static void * | static void * | ||||
zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) | zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) | ||||
{ | { | ||||
void *item; | void *item; | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
bool skipdbg; | bool skipdbg; | ||||
#endif | #endif | ||||
item = NULL; | ZONE_LOCK(zone); | ||||
if (zone->uz_maxitems && zone->uz_items >= zone->uz_maxitems) { | |||||
Done Inline ActionsI would comment, /* Returns with the zone unlocked. */ markj: I would comment, `/* Returns with the zone unlocked. */` | |||||
zone_log_warning(zone); | |||||
zone_maxaction(zone); | |||||
if (flags & M_NOWAIT) { | |||||
ZONE_UNLOCK(zone); | |||||
return (NULL); | |||||
} | |||||
zone->uz_sleeps++; | |||||
zone->uz_sleepers++; | |||||
msleep(zone, zone->uz_lockptr, PVM, "zonelimit", 0); | |||||
KASSERT(zone->uz_items < zone->uz_maxitems, | |||||
("%s: woke up with full zone %p", __func__, zone)); | |||||
zone->uz_sleepers--; | |||||
} | |||||
zone->uz_items++; | |||||
Not Done Inline ActionsI'd fine this clearer as max_items - items > 1. markj: I'd fine this clearer as `max_items - items > 1`. | |||||
Done Inline ActionsFor me your suggestion looks more obfuscated rather then clearer. glebius: For me your suggestion looks more obfuscated rather then clearer. | |||||
zone->uz_allocs++; | |||||
if (zone->uz_sleepers && zone->uz_items < zone->uz_maxitems) | |||||
wakeup_one(zone); | |||||
ZONE_UNLOCK(zone); | |||||
Not Done Inline ActionsThe extra locking is rather unfortunate. Why do we do the check here and not somewhere where we already hold the zone lock? markj: The extra locking is rather unfortunate. Why do we do the check here and not somewhere where we… | |||||
Done Inline ActionsBecause uz_import is not always zone_import(). For vm_page zones uz_import it is outside of UMA and doesn't lock zone. Right before uz_import is the only point where we can do bookkeeping of allocations. The performance impact here is not measurable. We are already at a slow path and are about to acquire some kind of lock in uz_import. For regular zones, the same lock, so same cache line. Also, ratio of fastpath allocs to slowpath is expected to be super low. For example for vm_page we got one slowpath alloc for 10^7 immediate returns from per-CPU cache with zero jumps to zalloc_start. glebius: Because uz_import is not always zone_import(). For vm_page zones uz_import it is outside of UMA… | |||||
if (domain != UMA_ANYDOMAIN) { | if (domain != UMA_ANYDOMAIN) { | ||||
/* avoid allocs targeting empty domains */ | /* avoid allocs targeting empty domains */ | ||||
if (VM_DOMAIN_EMPTY(domain)) | if (VM_DOMAIN_EMPTY(domain)) | ||||
domain = UMA_ANYDOMAIN; | domain = UMA_ANYDOMAIN; | ||||
} | } | ||||
item = NULL; | |||||
Done Inline ActionsWhat problem does this line fix? markj: What problem does this line fix? | |||||
if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1) | if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1) | ||||
goto fail; | goto fail; | ||||
atomic_add_long(&zone->uz_allocs, 1); | |||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
skipdbg = uma_dbg_zskip(zone, item); | skipdbg = uma_dbg_zskip(zone, item); | ||||
#endif | #endif | ||||
/* | /* | ||||
* We have to call both the zone's init (not the keg's init) | * We have to call both the zone's init (not the keg's init) | ||||
* and the zone's ctor. This is because the item is going from | * and the zone's ctor. This is because the item is going from | ||||
* a keg slab directly to the user, and the user is expecting it | * a keg slab directly to the user, and the user is expecting it | ||||
Show All 24 Lines | #endif | ||||
CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, | CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, | ||||
zone->uz_name, zone); | zone->uz_name, zone); | ||||
return (item); | return (item); | ||||
fail: | fail: | ||||
CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", | CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", | ||||
zone->uz_name, zone); | zone->uz_name, zone); | ||||
ZONE_LOCK(zone); | |||||
zone->uz_items--; | |||||
zone->uz_allocs--; | |||||
ZONE_UNLOCK(zone); | |||||
atomic_add_long(&zone->uz_fails, 1); | atomic_add_long(&zone->uz_fails, 1); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zfree_arg(uma_zone_t zone, void *item, void *udata) | uma_zfree_arg(uma_zone_t zone, void *item, void *udata) | ||||
{ | { | ||||
uma_cache_t cache; | uma_cache_t cache; | ||||
uma_bucket_t bucket; | uma_bucket_t bucket; | ||||
uma_zone_domain_t zdom; | uma_zone_domain_t zdom; | ||||
int cpu, domain, lockfail; | int cpu, domain; | ||||
bool lockfail, locked; | |||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
bool skipdbg; | bool skipdbg; | ||||
#endif | #endif | ||||
/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | ||||
random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | ||||
CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, | CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, | ||||
Show All 26 Lines | #ifdef INVARIANTS | ||||
if (zone->uz_dtor != NULL && (!skipdbg || | if (zone->uz_dtor != NULL && (!skipdbg || | ||||
zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor)) | zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor)) | ||||
#else | #else | ||||
if (zone->uz_dtor != NULL) | if (zone->uz_dtor != NULL) | ||||
#endif | #endif | ||||
zone->uz_dtor(item, zone->uz_size, udata); | zone->uz_dtor(item, zone->uz_size, udata); | ||||
/* | /* | ||||
* The race here is acceptable. If we miss it we'll just have to wait | |||||
* a little longer for the limits to be reset. | |||||
*/ | |||||
if (zone->uz_flags & UMA_ZFLAG_FULL) | |||||
goto zfree_item; | |||||
/* | |||||
* If possible, free to the per-CPU cache. There are two | * If possible, free to the per-CPU cache. There are two | ||||
* requirements for safe access to the per-CPU cache: (1) the thread | * requirements for safe access to the per-CPU cache: (1) the thread | ||||
* accessing the cache must not be preempted or yield during access, | * accessing the cache must not be preempted or yield during access, | ||||
* and (2) the thread must not migrate CPUs without switching which | * and (2) the thread must not migrate CPUs without switching which | ||||
* cache it accesses. We rely on a critical section to prevent | * cache it accesses. We rely on a critical section to prevent | ||||
* preemption and migration. We release the critical section in | * preemption and migration. We release the critical section in | ||||
* order to acquire the zone mutex if we are unable to free to the | * order to acquire the zone mutex if we are unable to free to the | ||||
* current cache; when we re-acquire the critical section, we must | * current cache; when we re-acquire the critical section, we must | ||||
* detect and handle migration if it has occurred. | * detect and handle migration if it has occurred. | ||||
*/ | */ | ||||
locked = false; | |||||
zfree_restart: | zfree_restart: | ||||
critical_enter(); | critical_enter(); | ||||
cpu = curcpu; | cpu = curcpu; | ||||
cache = &zone->uz_cpu[cpu]; | cache = &zone->uz_cpu[cpu]; | ||||
zfree_start: | zfree_start: | ||||
/* | /* | ||||
* Try to free into the allocbucket first to give LIFO ordering | * Try to free into the allocbucket first to give LIFO ordering | ||||
* for cache-hot datastructures. Spill over into the freebucket | * for cache-hot datastructures. Spill over into the freebucket | ||||
* if necessary. Alloc will swap them if one runs dry. | * if necessary. Alloc will swap them if one runs dry. | ||||
*/ | */ | ||||
bucket = cache->uc_allocbucket; | bucket = cache->uc_allocbucket; | ||||
if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) | if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) | ||||
bucket = cache->uc_freebucket; | bucket = cache->uc_freebucket; | ||||
if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { | if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { | ||||
KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, | KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, | ||||
("uma_zfree: Freeing to non free bucket index.")); | ("uma_zfree: Freeing to non free bucket index.")); | ||||
bucket->ub_bucket[bucket->ub_cnt] = item; | bucket->ub_bucket[bucket->ub_cnt] = item; | ||||
bucket->ub_cnt++; | bucket->ub_cnt++; | ||||
cache->uc_frees++; | cache->uc_frees++; | ||||
critical_exit(); | critical_exit(); | ||||
if (locked) | |||||
ZONE_UNLOCK(zone); | |||||
return; | return; | ||||
} | } | ||||
/* | /* | ||||
* We must go back the zone, which requires acquiring the zone lock, | * We must go back the zone, which requires acquiring the zone lock, | ||||
* which in turn means we must release and re-acquire the critical | * which in turn means we must release and re-acquire the critical | ||||
* section. Since the critical section is released, we may be | * section. Since the critical section is released, we may be | ||||
* preempted or migrate. As such, make sure not to maintain any | * preempted or migrate. As such, make sure not to maintain any | ||||
* thread-local state specific to the cache from prior to releasing | * thread-local state specific to the cache from prior to releasing | ||||
* the critical section. | * the critical section. | ||||
*/ | */ | ||||
critical_exit(); | critical_exit(); | ||||
if (zone->uz_count == 0 || bucketdisable) | if (zone->uz_count == 0 || bucketdisable) | ||||
goto zfree_item; | goto zfree_item; | ||||
lockfail = 0; | lockfail = false; | ||||
if (!locked) { | |||||
if (ZONE_TRYLOCK(zone) == 0) { | if (ZONE_TRYLOCK(zone) == 0) { | ||||
/* Record contention to size the buckets. */ | /* Record contention to size the buckets. */ | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
lockfail = 1; | lockfail = true; | ||||
} | } | ||||
locked = true; | |||||
} | |||||
/* | |||||
* Now we got the lock, check for sleepers and give a chance to | |||||
* first one to allocate. If item will end up on CPU cache, | |||||
* and they will wake up on wrong CPU, then they will go back | |||||
* to sleep. | |||||
*/ | |||||
if (zone->uz_sleepers) | |||||
Done Inline ActionsExplicit comparison: uz_sleepers > 0. markj: Explicit comparison: uz_sleepers > 0. | |||||
wakeup_one(zone); | |||||
critical_enter(); | critical_enter(); | ||||
cpu = curcpu; | cpu = curcpu; | ||||
cache = &zone->uz_cpu[cpu]; | cache = &zone->uz_cpu[cpu]; | ||||
bucket = cache->uc_freebucket; | bucket = cache->uc_freebucket; | ||||
if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { | if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) | ||||
markjUnsubmitted Done Inline ActionsWhat's the point of deferring the unlock? We hold a critical section here and there's a free slot in uc_freebucket, so we're guaranteed to succeed during the restart. In all other cases we are jumping back with locked == false, right? markj: What's the point of deferring the unlock? We hold a critical section here and there's a free… | |||||
ZONE_UNLOCK(zone); | |||||
goto zfree_start; | goto zfree_start; | ||||
} | |||||
cache->uc_freebucket = NULL; | cache->uc_freebucket = NULL; | ||||
/* We are no longer associated with this CPU. */ | /* We are no longer associated with this CPU. */ | ||||
critical_exit(); | critical_exit(); | ||||
if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) { | if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) { | ||||
domain = PCPU_GET(domain); | domain = PCPU_GET(domain); | ||||
if (VM_DOMAIN_EMPTY(domain)) | if (VM_DOMAIN_EMPTY(domain)) | ||||
domain = UMA_ANYDOMAIN; | domain = UMA_ANYDOMAIN; | ||||
} else | } else | ||||
domain = 0; | domain = 0; | ||||
zdom = &zone->uz_domain[0]; | zdom = &zone->uz_domain[0]; | ||||
/* Can we throw this on the zone full list? */ | /* Can we throw this on the zone full list? */ | ||||
if (bucket != NULL) { | if (bucket != NULL) { | ||||
CTR3(KTR_UMA, | CTR3(KTR_UMA, | ||||
"uma_zfree: zone %s(%p) putting bucket %p on free list", | "uma_zfree: zone %s(%p) putting bucket %p on free list", | ||||
zone->uz_name, zone, bucket); | zone->uz_name, zone, bucket); | ||||
/* ub_cnt is pointing to the last free item */ | /* ub_cnt is pointing to the last free item */ | ||||
KASSERT(bucket->ub_cnt != 0, | KASSERT(bucket->ub_cnt == bucket->ub_entries, | ||||
("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); | ("uma_zfree: Attempting to insert not full bucket onto the full list.\n")); | ||||
if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) { | if (zone->uz_bktcount >= zone->uz_bktmax) { | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
locked = false; | |||||
bucket_drain(zone, bucket); | bucket_drain(zone, bucket); | ||||
bucket_free(zone, bucket, udata); | bucket_free(zone, bucket, udata); | ||||
goto zfree_restart; | goto zfree_restart; | ||||
} else | } else | ||||
zone_put_bucket(zone, zdom, bucket, true); | zone_put_bucket(zone, zdom, bucket, true); | ||||
} | } | ||||
/* | /* | ||||
* We bump the uz count when the cache size is insufficient to | * We bump the uz count when the cache size is insufficient to | ||||
* handle the working set. | * handle the working set. | ||||
*/ | */ | ||||
if (lockfail && zone->uz_count < BUCKET_MAX) | if (lockfail && zone->uz_count < zone->uz_count_max) | ||||
zone->uz_count++; | zone->uz_count++; | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
locked = false; | |||||
bucket = bucket_alloc(zone, udata, M_NOWAIT); | bucket = bucket_alloc(zone, udata, M_NOWAIT); | ||||
CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p", | CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p", | ||||
zone->uz_name, zone, bucket); | zone->uz_name, zone, bucket); | ||||
if (bucket) { | if (bucket) { | ||||
critical_enter(); | critical_enter(); | ||||
cpu = curcpu; | cpu = curcpu; | ||||
cache = &zone->uz_cpu[cpu]; | cache = &zone->uz_cpu[cpu]; | ||||
Show All 11 Lines | if (bucket) { | ||||
bucket_free(zone, bucket, udata); | bucket_free(zone, bucket, udata); | ||||
goto zfree_restart; | goto zfree_restart; | ||||
} | } | ||||
/* | /* | ||||
* If nothing else caught this, we'll just do an internal free. | * If nothing else caught this, we'll just do an internal free. | ||||
*/ | */ | ||||
zfree_item: | zfree_item: | ||||
if (locked) | |||||
ZONE_UNLOCK(zone); | |||||
zone_free_item(zone, item, udata, SKIP_DTOR); | zone_free_item(zone, item, udata, SKIP_DTOR); | ||||
return; | |||||
} | } | ||||
void | void | ||||
uma_zfree_domain(uma_zone_t zone, void *item, void *udata) | uma_zfree_domain(uma_zone_t zone, void *item, void *udata) | ||||
{ | { | ||||
/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | ||||
random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | ||||
CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread, | CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread, | ||||
zone->uz_name); | zone->uz_name); | ||||
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), | KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), | ||||
("uma_zfree_domain: called with spinlock or critical section held")); | ("uma_zfree_domain: called with spinlock or critical section held")); | ||||
/* uma_zfree(..., NULL) does nothing, to match free(9). */ | /* uma_zfree(..., NULL) does nothing, to match free(9). */ | ||||
if (item == NULL) | if (item == NULL) | ||||
return; | return; | ||||
zone_free_item(zone, item, udata, SKIP_NONE); | zone_free_item(zone, item, udata, SKIP_NONE); | ||||
} | } | ||||
static void | static void | ||||
slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) | slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item) | ||||
{ | { | ||||
uma_keg_t keg; | |||||
uma_domain_t dom; | uma_domain_t dom; | ||||
uint8_t freei; | uint8_t freei; | ||||
keg = zone->uz_keg; | |||||
MPASS(zone->uz_lockptr == &keg->uk_lock); | |||||
mtx_assert(&keg->uk_lock, MA_OWNED); | mtx_assert(&keg->uk_lock, MA_OWNED); | ||||
MPASS(keg == slab->us_keg); | MPASS(keg == slab->us_keg); | ||||
dom = &keg->uk_domain[slab->us_domain]; | dom = &keg->uk_domain[slab->us_domain]; | ||||
/* Do we need to remove from any lists? */ | /* Do we need to remove from any lists? */ | ||||
if (slab->us_freecount+1 == keg->uk_ipers) { | if (slab->us_freecount+1 == keg->uk_ipers) { | ||||
LIST_REMOVE(slab, us_link); | LIST_REMOVE(slab, us_link); | ||||
Show All 14 Lines | |||||
static void | static void | ||||
zone_release(uma_zone_t zone, void **bucket, int cnt) | zone_release(uma_zone_t zone, void **bucket, int cnt) | ||||
{ | { | ||||
void *item; | void *item; | ||||
uma_slab_t slab; | uma_slab_t slab; | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
uint8_t *mem; | uint8_t *mem; | ||||
int clearfull; | |||||
int i; | int i; | ||||
clearfull = 0; | keg = zone->uz_keg; | ||||
keg = zone_first_keg(zone); | |||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
for (i = 0; i < cnt; i++) { | for (i = 0; i < cnt; i++) { | ||||
item = bucket[i]; | item = bucket[i]; | ||||
if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { | if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { | ||||
mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); | mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); | ||||
if (zone->uz_flags & UMA_ZONE_HASH) { | if (zone->uz_flags & UMA_ZONE_HASH) { | ||||
slab = hash_sfind(&keg->uk_hash, mem); | slab = hash_sfind(&keg->uk_hash, mem); | ||||
} else { | } else { | ||||
mem += keg->uk_pgoff; | mem += keg->uk_pgoff; | ||||
slab = (uma_slab_t)mem; | slab = (uma_slab_t)mem; | ||||
} | } | ||||
} else { | } else { | ||||
slab = vtoslab((vm_offset_t)item); | slab = vtoslab((vm_offset_t)item); | ||||
if (slab->us_keg != keg) { | MPASS(slab->us_keg == keg); | ||||
KEG_UNLOCK(keg); | |||||
keg = slab->us_keg; | |||||
KEG_LOCK(keg); | |||||
} | } | ||||
slab_free_item(zone, slab, item); | |||||
if (zone->uz_sleepers && zone->uz_items < zone->uz_maxitems) | |||||
wakeup_one(zone); | |||||
} | } | ||||
slab_free_item(keg, slab, item); | |||||
if (keg->uk_flags & UMA_ZFLAG_FULL) { | |||||
if (keg->uk_pages < keg->uk_maxpages) { | |||||
keg->uk_flags &= ~UMA_ZFLAG_FULL; | |||||
clearfull = 1; | |||||
} | |||||
/* | |||||
* We can handle one more allocation. Since we're | |||||
* clearing ZFLAG_FULL, wake up all procs blocked | |||||
* on pages. This should be uncommon, so keeping this | |||||
* simple for now (rather than adding count of blocked | |||||
* threads etc). | |||||
*/ | |||||
wakeup(keg); | |||||
} | |||||
} | |||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
if (clearfull) { | |||||
ZONE_LOCK(zone); | |||||
zone->uz_flags &= ~UMA_ZFLAG_FULL; | |||||
wakeup(zone); | |||||
ZONE_UNLOCK(zone); | |||||
} | } | ||||
} | |||||
/* | /* | ||||
* Frees a single item to any zone. | * Frees a single item to any zone. | ||||
* | * | ||||
* Arguments: | * Arguments: | ||||
* zone The zone to free to | * zone The zone to free to | ||||
* item The item we're freeing | * item The item we're freeing | ||||
* udata User supplied data for the dtor | * udata User supplied data for the dtor | ||||
* skip Skip dtors and finis | * skip Skip dtors and finis | ||||
Show All 18 Lines | |||||
#else | #else | ||||
if (skip < SKIP_DTOR && zone->uz_dtor != NULL) | if (skip < SKIP_DTOR && zone->uz_dtor != NULL) | ||||
#endif | #endif | ||||
zone->uz_dtor(item, zone->uz_size, udata); | zone->uz_dtor(item, zone->uz_size, udata); | ||||
if (skip < SKIP_FINI && zone->uz_fini) | if (skip < SKIP_FINI && zone->uz_fini) | ||||
zone->uz_fini(item, zone->uz_size); | zone->uz_fini(item, zone->uz_size); | ||||
atomic_add_long(&zone->uz_frees, 1); | |||||
zone->uz_release(zone->uz_arg, &item, 1); | zone->uz_release(zone->uz_arg, &item, 1); | ||||
ZONE_LOCK(zone); | |||||
zone->uz_frees++; | |||||
zone->uz_items--; | |||||
if (zone->uz_sleepers && zone->uz_items < zone->uz_maxitems) | |||||
wakeup_one(zone); | |||||
ZONE_UNLOCK(zone); | |||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
int | int | ||||
uma_zone_set_max(uma_zone_t zone, int nitems) | uma_zone_set_max(uma_zone_t zone, int nitems) | ||||
{ | { | ||||
uma_keg_t keg; | struct uma_bucket_zone *ubz; | ||||
keg = zone_first_keg(zone); | /* | ||||
if (keg == NULL) | * If limit is very low we may need to limit how | ||||
return (0); | * much items are allowed in CPU caches. | ||||
KEG_LOCK(keg); | */ | ||||
keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; | ubz = &bucket_zones[0]; | ||||
if (keg->uk_maxpages * keg->uk_ipers < nitems) | for (; ubz->ubz_entries != 0; ubz++) | ||||
keg->uk_maxpages += keg->uk_ppera; | if (ubz->ubz_entries * 2 * mp_ncpus > nitems) | ||||
nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; | break; | ||||
KEG_UNLOCK(keg); | if (ubz == &bucket_zones[0]) | ||||
nitems = ubz->ubz_entries * 2 * mp_ncpus; | |||||
Not Done Inline ActionsIt is weird to me that the patch is being so careful in general to do precise accounting, but then just adjusts the caller's request here. Why not simply disable per-CPU caching in this case? markj: It is weird to me that the patch is being so careful in general to do precise accounting, but… | |||||
Done Inline ActionsWithout per-CPU caching UMA isn't a useful allocator at all. Note that uma_zone_set_max() never claimed to put a hard cap on a zone. With new code we are getting closer to requested cap than we used before. glebius: Without per-CPU caching UMA isn't a useful allocator at all.
Note that uma_zone_set_max()… | |||||
Not Done Inline ActionsAnd yet UMA_NOBUCKET exists. :) I pointed this out because you are using UMA zones to manage pbufs, and the existing allocator is precise. For certain uses, where the max is a function of the about of physical memory, it may be that we don't care about per-CPU caching at all on small systems. markj: And yet UMA_NOBUCKET exists. :)
I pointed this out because you are using UMA zones to manage… | |||||
Done Inline ActionsUMA_NOBUCKET doesn't turn off CPU caches. The existing allocator is precise, but maximums provided to it are very very rough guesses. "Mkay, 1/2 nswbufs might not be enough, but 3/4 would be fine..." For pbufs potential adjustment done by uma_zone_set_max() is entirely okay. I even see it as an autotuning. The more CPUs machine has, the more buffers it would have. glebius: UMA_NOBUCKET doesn't turn off CPU caches.
The existing allocator is precise, but maximums… | |||||
Not Done Inline ActionsAre you thinking of UMA_NOBUCKETCACHE? That is a different flag (and I'm glad it's gone with this change). It is a bug if UMA_NOBUCKET _doesn't_ turn off per-CPU caches. markj: Are you thinking of UMA_NOBUCKETCACHE? That is a different flag (and I'm glad it's gone with… | |||||
Done Inline ActionsAh, sorry. Yes, I meant UMA_NOBUCKETCACHE. glebius: Ah, sorry. Yes, I meant UMA_NOBUCKETCACHE. | |||||
Not Done Inline ActionsI still don't really like this, but don't object to it. markj: I still don't really like this, but don't object to it. | |||||
else | |||||
ubz--; | |||||
ZONE_LOCK(zone); | |||||
zone->uz_count_max = zone->uz_count = ubz->ubz_entries; | |||||
if (zone->uz_count_min > zone->uz_count_max) | |||||
zone->uz_count_min = zone->uz_count_max; | |||||
zone->uz_maxitems = nitems; | |||||
ZONE_UNLOCK(zone); | |||||
return (nitems); | return (nitems); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
int | int | ||||
uma_zone_set_maxcache(uma_zone_t zone, int nitems) | |||||
{ | |||||
ZONE_LOCK(zone); | |||||
zone->uz_bktmax = nitems; | |||||
ZONE_UNLOCK(zone); | |||||
return (nitems); | |||||
} | |||||
/* See uma.h */ | |||||
int | |||||
uma_zone_get_max(uma_zone_t zone) | uma_zone_get_max(uma_zone_t zone) | ||||
{ | { | ||||
int nitems; | int nitems; | ||||
uma_keg_t keg; | |||||
keg = zone_first_keg(zone); | ZONE_LOCK(zone); | ||||
if (keg == NULL) | nitems = zone->uz_maxitems; | ||||
return (0); | ZONE_UNLOCK(zone); | ||||
KEG_LOCK(keg); | |||||
nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; | |||||
KEG_UNLOCK(keg); | |||||
return (nitems); | return (nitems); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zone_set_warning(uma_zone_t zone, const char *warning) | uma_zone_set_warning(uma_zone_t zone, const char *warning) | ||||
{ | { | ||||
Show All 37 Lines | |||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zone_set_init(uma_zone_t zone, uma_init uminit) | uma_zone_set_init(uma_zone_t zone, uma_init uminit) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
keg = zone_first_keg(zone); | KEG_GET(zone, keg); | ||||
KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); | |||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
KASSERT(keg->uk_pages == 0, | KASSERT(keg->uk_pages == 0, | ||||
("uma_zone_set_init on non-empty keg")); | ("uma_zone_set_init on non-empty keg")); | ||||
keg->uk_init = uminit; | keg->uk_init = uminit; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zone_set_fini(uma_zone_t zone, uma_fini fini) | uma_zone_set_fini(uma_zone_t zone, uma_fini fini) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
keg = zone_first_keg(zone); | KEG_GET(zone, keg); | ||||
KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type")); | |||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
KASSERT(keg->uk_pages == 0, | KASSERT(keg->uk_pages == 0, | ||||
("uma_zone_set_fini on non-empty keg")); | ("uma_zone_set_fini on non-empty keg")); | ||||
keg->uk_fini = fini; | keg->uk_fini = fini; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) | uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) | ||||
{ | { | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
KASSERT(zone_first_keg(zone)->uk_pages == 0, | KASSERT(zone->uz_keg->uk_pages == 0, | ||||
("uma_zone_set_zinit on non-empty keg")); | ("uma_zone_set_zinit on non-empty keg")); | ||||
zone->uz_init = zinit; | zone->uz_init = zinit; | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) | uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) | ||||
{ | { | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
KASSERT(zone_first_keg(zone)->uk_pages == 0, | KASSERT(zone->uz_keg->uk_pages == 0, | ||||
("uma_zone_set_zfini on non-empty keg")); | ("uma_zone_set_zfini on non-empty keg")); | ||||
zone->uz_fini = zfini; | zone->uz_fini = zfini; | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
/* XXX uk_freef is not actually used with the zone locked */ | /* XXX uk_freef is not actually used with the zone locked */ | ||||
void | void | ||||
uma_zone_set_freef(uma_zone_t zone, uma_free freef) | uma_zone_set_freef(uma_zone_t zone, uma_free freef) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
keg = zone_first_keg(zone); | KEG_GET(zone, keg); | ||||
KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type")); | KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type")); | ||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
keg->uk_freef = freef; | keg->uk_freef = freef; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
/* XXX uk_allocf is not actually used with the zone locked */ | /* XXX uk_allocf is not actually used with the zone locked */ | ||||
void | void | ||||
uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) | uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
keg = zone_first_keg(zone); | KEG_GET(zone, keg); | ||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
keg->uk_allocf = allocf; | keg->uk_allocf = allocf; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zone_reserve(uma_zone_t zone, int items) | uma_zone_reserve(uma_zone_t zone, int items) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
keg = zone_first_keg(zone); | KEG_GET(zone, keg); | ||||
if (keg == NULL) | |||||
return; | |||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
keg->uk_reserve = items; | keg->uk_reserve = items; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
return; | return; | ||||
Done Inline ActionsSince you did it elsewhere, get rid of this return statement too? markj: Since you did it elsewhere, get rid of this return statement too? | |||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
int | int | ||||
uma_zone_reserve_kva(uma_zone_t zone, int count) | uma_zone_reserve_kva(uma_zone_t zone, int count) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
vm_offset_t kva; | vm_offset_t kva; | ||||
u_int pages; | u_int pages; | ||||
keg = zone_first_keg(zone); | KEG_GET(zone, keg); | ||||
if (keg == NULL) | |||||
return (0); | |||||
pages = count / keg->uk_ipers; | |||||
pages = count / keg->uk_ipers; | |||||
if (pages * keg->uk_ipers < count) | if (pages * keg->uk_ipers < count) | ||||
pages++; | pages++; | ||||
pages *= keg->uk_ppera; | pages *= keg->uk_ppera; | ||||
#ifdef UMA_MD_SMALL_ALLOC | #ifdef UMA_MD_SMALL_ALLOC | ||||
if (keg->uk_ppera > 1) { | if (keg->uk_ppera > 1) { | ||||
#else | #else | ||||
if (1) { | if (1) { | ||||
#endif | #endif | ||||
kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); | kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); | ||||
if (kva == 0) | if (kva == 0) | ||||
return (0); | return (0); | ||||
} else | } else | ||||
kva = 0; | kva = 0; | ||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
keg->uk_kva = kva; | keg->uk_kva = kva; | ||||
keg->uk_offset = 0; | keg->uk_offset = 0; | ||||
keg->uk_maxpages = pages; | |||||
#ifdef UMA_MD_SMALL_ALLOC | #ifdef UMA_MD_SMALL_ALLOC | ||||
keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; | keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; | ||||
#else | #else | ||||
keg->uk_allocf = noobj_alloc; | keg->uk_allocf = noobj_alloc; | ||||
#endif | #endif | ||||
keg->uk_flags |= UMA_ZONE_NOFREE; | keg->uk_flags |= UMA_ZONE_NOFREE; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
return (1); | return (1); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_prealloc(uma_zone_t zone, int items) | uma_prealloc(uma_zone_t zone, int items) | ||||
{ | { | ||||
struct vm_domainset_iter di; | struct vm_domainset_iter di; | ||||
uma_domain_t dom; | uma_domain_t dom; | ||||
uma_slab_t slab; | uma_slab_t slab; | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
int domain, flags, slabs; | int domain, flags, slabs; | ||||
keg = zone_first_keg(zone); | KEG_GET(zone, keg); | ||||
if (keg == NULL) | |||||
return; | |||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
slabs = items / keg->uk_ipers; | slabs = items / keg->uk_ipers; | ||||
if (slabs * keg->uk_ipers < items) | if (slabs * keg->uk_ipers < items) | ||||
slabs++; | slabs++; | ||||
flags = M_WAITOK; | flags = M_WAITOK; | ||||
vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, &flags); | vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, &flags); | ||||
while (slabs-- > 0) { | while (slabs-- > 0) { | ||||
slab = keg_alloc_slab(keg, zone, domain, flags); | slab = keg_alloc_slab(keg, zone, domain, flags); | ||||
▲ Show 20 Lines • Show All 72 Lines • ▼ Show 20 Lines | |||||
/* See uma.h */ | /* See uma.h */ | ||||
int | int | ||||
uma_zone_exhausted(uma_zone_t zone) | uma_zone_exhausted(uma_zone_t zone) | ||||
{ | { | ||||
int full; | int full; | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
full = (zone->uz_flags & UMA_ZFLAG_FULL); | full = (zone->uz_sleepers > 0); | ||||
Done Inline ActionsThe parens are not needed anymore. markj: The parens are not needed anymore. | |||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
return (full); | return (full); | ||||
} | } | ||||
int | int | ||||
uma_zone_exhausted_nolock(uma_zone_t zone) | uma_zone_exhausted_nolock(uma_zone_t zone) | ||||
{ | { | ||||
return (zone->uz_flags & UMA_ZFLAG_FULL); | return (zone->uz_sleepers > 0); | ||||
} | } | ||||
void * | void * | ||||
uma_large_malloc_domain(vm_size_t size, int domain, int wait) | uma_large_malloc_domain(vm_size_t size, int domain, int wait) | ||||
{ | { | ||||
struct domainset *policy; | struct domainset *policy; | ||||
vm_offset_t addr; | vm_offset_t addr; | ||||
uma_slab_t slab; | uma_slab_t slab; | ||||
▲ Show 20 Lines • Show All 103 Lines • ▼ Show 20 Lines | |||||
static void | static void | ||||
uma_print_keg(uma_keg_t keg) | uma_print_keg(uma_keg_t keg) | ||||
{ | { | ||||
uma_domain_t dom; | uma_domain_t dom; | ||||
uma_slab_t slab; | uma_slab_t slab; | ||||
int i; | int i; | ||||
printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " | printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " | ||||
"out %d free %d limit %d\n", | "out %d free %d\n", | ||||
keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, | keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, | ||||
keg->uk_ipers, keg->uk_ppera, | keg->uk_ipers, keg->uk_ppera, | ||||
(keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, | (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, | ||||
keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); | keg->uk_free); | ||||
for (i = 0; i < vm_ndomains; i++) { | for (i = 0; i < vm_ndomains; i++) { | ||||
dom = &keg->uk_domain[i]; | dom = &keg->uk_domain[i]; | ||||
printf("Part slabs:\n"); | printf("Part slabs:\n"); | ||||
LIST_FOREACH(slab, &dom->ud_part_slab, us_link) | LIST_FOREACH(slab, &dom->ud_part_slab, us_link) | ||||
slab_print(slab); | slab_print(slab); | ||||
printf("Free slabs:\n"); | printf("Free slabs:\n"); | ||||
LIST_FOREACH(slab, &dom->ud_free_slab, us_link) | LIST_FOREACH(slab, &dom->ud_free_slab, us_link) | ||||
slab_print(slab); | slab_print(slab); | ||||
printf("Full slabs:\n"); | printf("Full slabs:\n"); | ||||
LIST_FOREACH(slab, &dom->ud_full_slab, us_link) | LIST_FOREACH(slab, &dom->ud_full_slab, us_link) | ||||
slab_print(slab); | slab_print(slab); | ||||
} | } | ||||
} | } | ||||
void | void | ||||
uma_print_zone(uma_zone_t zone) | uma_print_zone(uma_zone_t zone) | ||||
{ | { | ||||
uma_cache_t cache; | uma_cache_t cache; | ||||
uma_klink_t kl; | |||||
int i; | int i; | ||||
printf("zone: %s(%p) size %d flags %#x\n", | printf("zone: %s(%p) size %d maxitems %lu flags %#x\n", | ||||
zone->uz_name, zone, zone->uz_size, zone->uz_flags); | zone->uz_name, zone, zone->uz_size, zone->uz_maxitems, | ||||
LIST_FOREACH(kl, &zone->uz_kegs, kl_link) | zone->uz_flags); | ||||
uma_print_keg(kl->kl_keg); | if (zone->uz_lockptr != &zone->uz_lock) | ||||
uma_print_keg(zone->uz_keg); | |||||
CPU_FOREACH(i) { | CPU_FOREACH(i) { | ||||
cache = &zone->uz_cpu[i]; | cache = &zone->uz_cpu[i]; | ||||
printf("CPU %d Cache:\n", i); | printf("CPU %d Cache:\n", i); | ||||
cache_print(cache); | cache_print(cache); | ||||
} | } | ||||
} | } | ||||
#ifdef DDB | #ifdef DDB | ||||
▲ Show 20 Lines • Show All 62 Lines • ▼ Show 20 Lines | |||||
sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) | sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct uma_stream_header ush; | struct uma_stream_header ush; | ||||
struct uma_type_header uth; | struct uma_type_header uth; | ||||
struct uma_percpu_stat *ups; | struct uma_percpu_stat *ups; | ||||
uma_zone_domain_t zdom; | uma_zone_domain_t zdom; | ||||
struct sbuf sbuf; | struct sbuf sbuf; | ||||
uma_cache_t cache; | uma_cache_t cache; | ||||
uma_klink_t kl; | |||||
uma_keg_t kz; | uma_keg_t kz; | ||||
uma_zone_t z; | uma_zone_t z; | ||||
uma_keg_t k; | |||||
int count, error, i; | int count, error, i; | ||||
error = sysctl_wire_old_buffer(req, 0); | error = sysctl_wire_old_buffer(req, 0); | ||||
if (error != 0) | if (error != 0) | ||||
return (error); | return (error); | ||||
sbuf_new_for_sysctl(&sbuf, NULL, 128, req); | sbuf_new_for_sysctl(&sbuf, NULL, 128, req); | ||||
sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); | sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); | ||||
ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK); | ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK); | ||||
Show All 17 Lines | sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) | ||||
LIST_FOREACH(kz, &uma_kegs, uk_link) { | LIST_FOREACH(kz, &uma_kegs, uk_link) { | ||||
LIST_FOREACH(z, &kz->uk_zones, uz_link) { | LIST_FOREACH(z, &kz->uk_zones, uz_link) { | ||||
bzero(&uth, sizeof(uth)); | bzero(&uth, sizeof(uth)); | ||||
ZONE_LOCK(z); | ZONE_LOCK(z); | ||||
strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); | strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); | ||||
uth.uth_align = kz->uk_align; | uth.uth_align = kz->uk_align; | ||||
uth.uth_size = kz->uk_size; | uth.uth_size = kz->uk_size; | ||||
uth.uth_rsize = kz->uk_rsize; | uth.uth_rsize = kz->uk_rsize; | ||||
LIST_FOREACH(kl, &z->uz_kegs, kl_link) { | uth.uth_pages += (z->uz_items / kz->uk_ipers) * | ||||
k = kl->kl_keg; | kz->uk_ppera; | ||||
uth.uth_maxpages += k->uk_maxpages; | uth.uth_maxpages += (z->uz_maxitems / kz->uk_ipers) * | ||||
uth.uth_pages += k->uk_pages; | kz->uk_ppera; | ||||
uth.uth_keg_free += k->uk_free; | uth.uth_limit = z->uz_maxitems; | ||||
uth.uth_limit = (k->uk_maxpages / k->uk_ppera) | uth.uth_keg_free += z->uz_keg->uk_free; | ||||
* k->uk_ipers; | |||||
} | |||||
/* | /* | ||||
* A zone is secondary is it is not the first entry | * A zone is secondary is it is not the first entry | ||||
* on the keg's zone list. | * on the keg's zone list. | ||||
*/ | */ | ||||
if ((z->uz_flags & UMA_ZONE_SECONDARY) && | if ((z->uz_flags & UMA_ZONE_SECONDARY) && | ||||
(LIST_FIRST(&kz->uk_zones) != z)) | (LIST_FIRST(&kz->uk_zones) != z)) | ||||
uth.uth_zone_flags = UTH_ZONE_SECONDARY; | uth.uth_zone_flags = UTH_ZONE_SECONDARY; | ||||
▲ Show 20 Lines • Show All 80 Lines • ▼ Show 20 Lines | uma_dbg_getslab(uma_zone_t zone, void *item) | ||||
if (zone->uz_flags & UMA_ZONE_VTOSLAB) { | if (zone->uz_flags & UMA_ZONE_VTOSLAB) { | ||||
slab = vtoslab((vm_offset_t)mem); | slab = vtoslab((vm_offset_t)mem); | ||||
} else { | } else { | ||||
/* | /* | ||||
* It is safe to return the slab here even though the | * It is safe to return the slab here even though the | ||||
* zone is unlocked because the item's allocation state | * zone is unlocked because the item's allocation state | ||||
* essentially holds a reference. | * essentially holds a reference. | ||||
*/ | */ | ||||
if (zone->uz_lockptr == &zone->uz_lock) | |||||
return (NULL); | |||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
keg = LIST_FIRST(&zone->uz_kegs)->kl_keg; | keg = zone->uz_keg; | ||||
if (keg->uk_flags & UMA_ZONE_HASH) | if (keg->uk_flags & UMA_ZONE_HASH) | ||||
slab = hash_sfind(&keg->uk_hash, mem); | slab = hash_sfind(&keg->uk_hash, mem); | ||||
else | else | ||||
slab = (uma_slab_t)(mem + keg->uk_pgoff); | slab = (uma_slab_t)(mem + keg->uk_pgoff); | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
} | } | ||||
return (slab); | return (slab); | ||||
} | } | ||||
static bool | static bool | ||||
uma_dbg_zskip(uma_zone_t zone, void *mem) | uma_dbg_zskip(uma_zone_t zone, void *mem) | ||||
{ | { | ||||
uma_keg_t keg; | |||||
if ((keg = zone_first_keg(zone)) == NULL) | if (zone->uz_lockptr == &zone->uz_lock) | ||||
return (true); | return (true); | ||||
return (uma_dbg_kskip(keg, mem)); | return (uma_dbg_kskip(zone->uz_keg, mem)); | ||||
} | } | ||||
static bool | static bool | ||||
uma_dbg_kskip(uma_keg_t keg, void *mem) | uma_dbg_kskip(uma_keg_t keg, void *mem) | ||||
{ | { | ||||
uintptr_t idx; | uintptr_t idx; | ||||
if (dbg_divisor == 0) | if (dbg_divisor == 0) | ||||
▲ Show 20 Lines • Show All 142 Lines • Show Last 20 Lines |
This assertion should go at the beginning of the function.