Changeset View
Changeset View
Standalone View
Standalone View
head/sys/vm/uma_core.c
Show First 20 Lines • Show All 213 Lines • ▼ Show 20 Lines | struct uma_bucket_zone bucket_zones[] = { | ||||
{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, | { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, | ||||
{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, | { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, | ||||
{ NULL, NULL, 0} | { NULL, NULL, 0} | ||||
}; | }; | ||||
/* | /* | ||||
* Flags and enumerations to be passed to internal functions. | * Flags and enumerations to be passed to internal functions. | ||||
*/ | */ | ||||
enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }; | enum zfreeskip { | ||||
SKIP_NONE = 0, | |||||
SKIP_CNT = 0x00000001, | |||||
SKIP_DTOR = 0x00010000, | |||||
SKIP_FINI = 0x00020000, | |||||
}; | |||||
#define UMA_ANYDOMAIN -1 /* Special value for domain search. */ | #define UMA_ANYDOMAIN -1 /* Special value for domain search. */ | ||||
/* Prototypes.. */ | /* Prototypes.. */ | ||||
int uma_startup_count(int); | int uma_startup_count(int); | ||||
void uma_startup(void *, int); | void uma_startup(void *, int); | ||||
void uma_startup1(void); | void uma_startup1(void); | ||||
Show All 19 Lines | |||||
static void zone_foreach(void (*zfunc)(uma_zone_t)); | static void zone_foreach(void (*zfunc)(uma_zone_t)); | ||||
static void zone_timeout(uma_zone_t zone); | static void zone_timeout(uma_zone_t zone); | ||||
static int hash_alloc(struct uma_hash *); | static int hash_alloc(struct uma_hash *); | ||||
static int hash_expand(struct uma_hash *, struct uma_hash *); | static int hash_expand(struct uma_hash *, struct uma_hash *); | ||||
static void hash_free(struct uma_hash *hash); | static void hash_free(struct uma_hash *hash); | ||||
static void uma_timeout(void *); | static void uma_timeout(void *); | ||||
static void uma_startup3(void); | static void uma_startup3(void); | ||||
static void *zone_alloc_item(uma_zone_t, void *, int, int); | static void *zone_alloc_item(uma_zone_t, void *, int, int); | ||||
static void *zone_alloc_item_locked(uma_zone_t, void *, int, int); | |||||
static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); | static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); | ||||
static void bucket_enable(void); | static void bucket_enable(void); | ||||
static void bucket_init(void); | static void bucket_init(void); | ||||
static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); | static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); | ||||
static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); | static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); | ||||
static void bucket_zone_drain(void); | static void bucket_zone_drain(void); | ||||
static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int); | static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int, int); | ||||
static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int); | static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int); | ||||
static uma_slab_t zone_fetch_slab_multi(uma_zone_t, uma_keg_t, int, int); | |||||
static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); | static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); | ||||
static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); | static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item); | ||||
static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, | static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, | ||||
uma_fini fini, int align, uint32_t flags); | uma_fini fini, int align, uint32_t flags); | ||||
static int zone_import(uma_zone_t, void **, int, int, int); | static int zone_import(uma_zone_t, void **, int, int, int); | ||||
static void zone_release(uma_zone_t, void **, int); | static void zone_release(uma_zone_t, void **, int); | ||||
static void uma_zero_item(void *, uma_zone_t); | static void uma_zero_item(void *, uma_zone_t); | ||||
void uma_print_zone(uma_zone_t); | void uma_print_zone(uma_zone_t); | ||||
void uma_print_stats(void); | void uma_print_stats(void); | ||||
▲ Show 20 Lines • Show All 190 Lines • ▼ Show 20 Lines | zone_try_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, const bool ws) | ||||
ZONE_LOCK_ASSERT(zone); | ZONE_LOCK_ASSERT(zone); | ||||
if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { | if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { | ||||
MPASS(zdom->uzd_nitems >= bucket->ub_cnt); | MPASS(zdom->uzd_nitems >= bucket->ub_cnt); | ||||
LIST_REMOVE(bucket, ub_link); | LIST_REMOVE(bucket, ub_link); | ||||
zdom->uzd_nitems -= bucket->ub_cnt; | zdom->uzd_nitems -= bucket->ub_cnt; | ||||
if (ws && zdom->uzd_imin > zdom->uzd_nitems) | if (ws && zdom->uzd_imin > zdom->uzd_nitems) | ||||
zdom->uzd_imin = zdom->uzd_nitems; | zdom->uzd_imin = zdom->uzd_nitems; | ||||
zone->uz_bkt_count -= bucket->ub_cnt; | |||||
} | } | ||||
return (bucket); | return (bucket); | ||||
} | } | ||||
static void | static void | ||||
zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket, | zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket, | ||||
const bool ws) | const bool ws) | ||||
{ | { | ||||
ZONE_LOCK_ASSERT(zone); | ZONE_LOCK_ASSERT(zone); | ||||
KASSERT(zone->uz_bkt_count < zone->uz_bkt_max, ("%s: zone %p overflow", | |||||
__func__, zone)); | |||||
LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); | LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); | ||||
zdom->uzd_nitems += bucket->ub_cnt; | zdom->uzd_nitems += bucket->ub_cnt; | ||||
if (ws && zdom->uzd_imax < zdom->uzd_nitems) | if (ws && zdom->uzd_imax < zdom->uzd_nitems) | ||||
zdom->uzd_imax = zdom->uzd_nitems; | zdom->uzd_imax = zdom->uzd_nitems; | ||||
zone->uz_bkt_count += bucket->ub_cnt; | |||||
} | } | ||||
static void | static void | ||||
zone_log_warning(uma_zone_t zone) | zone_log_warning(uma_zone_t zone) | ||||
{ | { | ||||
static const struct timeval warninterval = { 300, 0 }; | static const struct timeval warninterval = { 300, 0 }; | ||||
if (!zone_warnings || zone->uz_warning == NULL) | if (!zone_warnings || zone->uz_warning == NULL) | ||||
return; | return; | ||||
if (ratecheck(&zone->uz_ratecheck, &warninterval)) | if (ratecheck(&zone->uz_ratecheck, &warninterval)) | ||||
printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); | printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); | ||||
} | } | ||||
static inline void | static inline void | ||||
zone_maxaction(uma_zone_t zone) | zone_maxaction(uma_zone_t zone) | ||||
{ | { | ||||
if (zone->uz_maxaction.ta_func != NULL) | if (zone->uz_maxaction.ta_func != NULL) | ||||
taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); | taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); | ||||
} | } | ||||
static void | |||||
zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) | |||||
{ | |||||
uma_klink_t klink; | |||||
LIST_FOREACH(klink, &zone->uz_kegs, kl_link) | |||||
kegfn(klink->kl_keg); | |||||
} | |||||
/* | /* | ||||
* Routine called by timeout which is used to fire off some time interval | * Routine called by timeout which is used to fire off some time interval | ||||
* based calculations. (stats, hash size, etc.) | * based calculations. (stats, hash size, etc.) | ||||
* | * | ||||
* Arguments: | * Arguments: | ||||
* arg Unused | * arg Unused | ||||
* | * | ||||
* Returns: | * Returns: | ||||
Show All 28 Lines | |||||
/* | /* | ||||
* Routine to perform timeout driven calculations. This expands the | * Routine to perform timeout driven calculations. This expands the | ||||
* hashes and does per cpu statistics aggregation. | * hashes and does per cpu statistics aggregation. | ||||
* | * | ||||
* Returns nothing. | * Returns nothing. | ||||
*/ | */ | ||||
static void | static void | ||||
keg_timeout(uma_keg_t keg) | zone_timeout(uma_zone_t zone) | ||||
{ | { | ||||
uma_keg_t keg = zone->uz_keg; | |||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
/* | /* | ||||
* Expand the keg hash table. | * Expand the keg hash table. | ||||
* | * | ||||
* This is done if the number of slabs is larger than the hash size. | * This is done if the number of slabs is larger than the hash size. | ||||
* What I'm trying to do here is completely reduce collisions. This | * What I'm trying to do here is completely reduce collisions. This | ||||
* may be a little aggressive. Should I allow for two collisions max? | * may be a little aggressive. Should I allow for two collisions max? | ||||
Show All 21 Lines | if (ret) { | ||||
} else | } else | ||||
oldhash = newhash; | oldhash = newhash; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
hash_free(&oldhash); | hash_free(&oldhash); | ||||
return; | return; | ||||
} | } | ||||
} | } | ||||
KEG_UNLOCK(keg); | |||||
} | |||||
static void | for (int i = 0; i < vm_ndomains; i++) | ||||
zone_timeout(uma_zone_t zone) | |||||
{ | |||||
int i; | |||||
zone_foreach_keg(zone, &keg_timeout); | |||||
ZONE_LOCK(zone); | |||||
for (i = 0; i < vm_ndomains; i++) | |||||
zone_domain_update_wss(&zone->uz_domain[i]); | zone_domain_update_wss(&zone->uz_domain[i]); | ||||
ZONE_UNLOCK(zone); | |||||
KEG_UNLOCK(keg); | |||||
} | } | ||||
/* | /* | ||||
* Allocate and zero fill the next sized hash table from the appropriate | * Allocate and zero fill the next sized hash table from the appropriate | ||||
* backing store. | * backing store. | ||||
* | * | ||||
* Arguments: | * Arguments: | ||||
* hash A new hash structure with the old hash size in uh_hashsize | * hash A new hash structure with the old hash size in uh_hashsize | ||||
▲ Show 20 Lines • Show All 113 Lines • ▼ Show 20 Lines | bucket_drain(uma_zone_t zone, uma_bucket_t bucket) | ||||
if (bucket == NULL) | if (bucket == NULL) | ||||
return; | return; | ||||
if (zone->uz_fini) | if (zone->uz_fini) | ||||
for (i = 0; i < bucket->ub_cnt; i++) | for (i = 0; i < bucket->ub_cnt; i++) | ||||
zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); | zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); | ||||
zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); | zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); | ||||
ZONE_LOCK(zone); | |||||
zone->uz_items -= bucket->ub_cnt; | |||||
if (zone->uz_sleepers && zone->uz_items < zone->uz_max_items) | |||||
wakeup_one(zone); | |||||
ZONE_UNLOCK(zone); | |||||
bucket->ub_cnt = 0; | bucket->ub_cnt = 0; | ||||
} | } | ||||
/* | /* | ||||
* Drains the per cpu caches for a zone. | * Drains the per cpu caches for a zone. | ||||
* | * | ||||
* NOTE: This may only be called while the zone is being turn down, and not | * NOTE: This may only be called while the zone is being turn down, and not | ||||
* during normal operation. This is necessary in order that we do not have | * during normal operation. This is necessary in order that we do not have | ||||
▲ Show 20 Lines • Show All 269 Lines • ▼ Show 20 Lines | zone_drain_wait(uma_zone_t zone, int waitok) | ||||
zone->uz_flags |= UMA_ZFLAG_DRAINING; | zone->uz_flags |= UMA_ZFLAG_DRAINING; | ||||
bucket_cache_drain(zone); | bucket_cache_drain(zone); | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
/* | /* | ||||
* The DRAINING flag protects us from being freed while | * The DRAINING flag protects us from being freed while | ||||
* we're running. Normally the uma_rwlock would protect us but we | * we're running. Normally the uma_rwlock would protect us but we | ||||
* must be able to release and acquire the right lock for each keg. | * must be able to release and acquire the right lock for each keg. | ||||
*/ | */ | ||||
zone_foreach_keg(zone, &keg_drain); | keg_drain(zone->uz_keg); | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
zone->uz_flags &= ~UMA_ZFLAG_DRAINING; | zone->uz_flags &= ~UMA_ZFLAG_DRAINING; | ||||
wakeup(zone); | wakeup(zone); | ||||
out: | out: | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
} | } | ||||
void | void | ||||
Show All 22 Lines | keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait) | ||||
uma_slab_t slab; | uma_slab_t slab; | ||||
unsigned long size; | unsigned long size; | ||||
uint8_t *mem; | uint8_t *mem; | ||||
uint8_t flags; | uint8_t flags; | ||||
int i; | int i; | ||||
KASSERT(domain >= 0 && domain < vm_ndomains, | KASSERT(domain >= 0 && domain < vm_ndomains, | ||||
("keg_alloc_slab: domain %d out of range", domain)); | ("keg_alloc_slab: domain %d out of range", domain)); | ||||
mtx_assert(&keg->uk_lock, MA_OWNED); | KEG_LOCK_ASSERT(keg); | ||||
MPASS(zone->uz_lockptr == &keg->uk_lock); | |||||
allocf = keg->uk_allocf; | allocf = keg->uk_allocf; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
slab = NULL; | slab = NULL; | ||||
mem = NULL; | mem = NULL; | ||||
if (keg->uk_flags & UMA_ZONE_OFFPAGE) { | if (keg->uk_flags & UMA_ZONE_OFFPAGE) { | ||||
slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait); | slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait); | ||||
▲ Show 20 Lines • Show All 79 Lines • ▼ Show 20 Lines | |||||
static void * | static void * | ||||
startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, | startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, | ||||
int wait) | int wait) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
void *mem; | void *mem; | ||||
int pages; | int pages; | ||||
keg = zone_first_keg(zone); | keg = zone->uz_keg; | ||||
/* | /* | ||||
* If we are in BOOT_BUCKETS or higher, than switch to real | * If we are in BOOT_BUCKETS or higher, than switch to real | ||||
* allocator. Zones with page sized slabs switch at BOOT_PAGEALLOC. | * allocator. Zones with page sized slabs switch at BOOT_PAGEALLOC. | ||||
*/ | */ | ||||
switch (booted) { | switch (booted) { | ||||
case BOOT_COLD: | case BOOT_COLD: | ||||
case BOOT_STRAPPED: | case BOOT_STRAPPED: | ||||
break; | break; | ||||
▲ Show 20 Lines • Show All 121 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
TAILQ_HEAD(, vm_page) alloctail; | TAILQ_HEAD(, vm_page) alloctail; | ||||
u_long npages; | u_long npages; | ||||
vm_offset_t retkva, zkva; | vm_offset_t retkva, zkva; | ||||
vm_page_t p, p_next; | vm_page_t p, p_next; | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
TAILQ_INIT(&alloctail); | TAILQ_INIT(&alloctail); | ||||
keg = zone_first_keg(zone); | keg = zone->uz_keg; | ||||
npages = howmany(bytes, PAGE_SIZE); | npages = howmany(bytes, PAGE_SIZE); | ||||
while (npages > 0) { | while (npages > 0) { | ||||
p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT | | p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT | | ||||
VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | | ||||
((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : | ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : | ||||
VM_ALLOC_NOWAIT)); | VM_ALLOC_NOWAIT)); | ||||
if (p != NULL) { | if (p != NULL) { | ||||
▲ Show 20 Lines • Show All 205 Lines • ▼ Show 20 Lines | |||||
* Returns | * Returns | ||||
* Nothing | * Nothing | ||||
*/ | */ | ||||
static void | static void | ||||
keg_large_init(uma_keg_t keg) | keg_large_init(uma_keg_t keg) | ||||
{ | { | ||||
KASSERT(keg != NULL, ("Keg is null in keg_large_init")); | KASSERT(keg != NULL, ("Keg is null in keg_large_init")); | ||||
KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, | |||||
("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); | |||||
KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, | KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, | ||||
("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); | ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); | ||||
keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); | keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); | ||||
keg->uk_ipers = 1; | keg->uk_ipers = 1; | ||||
keg->uk_rsize = keg->uk_size; | keg->uk_rsize = keg->uk_size; | ||||
/* Check whether we have enough space to not do OFFPAGE. */ | /* Check whether we have enough space to not do OFFPAGE. */ | ||||
▲ Show 20 Lines • Show All 204 Lines • ▼ Show 20 Lines | zone_ctor(void *mem, int size, void *udata, int flags) | ||||
zone->uz_init = NULL; | zone->uz_init = NULL; | ||||
zone->uz_fini = NULL; | zone->uz_fini = NULL; | ||||
zone->uz_allocs = 0; | zone->uz_allocs = 0; | ||||
zone->uz_frees = 0; | zone->uz_frees = 0; | ||||
zone->uz_fails = 0; | zone->uz_fails = 0; | ||||
zone->uz_sleeps = 0; | zone->uz_sleeps = 0; | ||||
zone->uz_count = 0; | zone->uz_count = 0; | ||||
zone->uz_count_min = 0; | zone->uz_count_min = 0; | ||||
zone->uz_count_max = BUCKET_MAX; | |||||
zone->uz_flags = 0; | zone->uz_flags = 0; | ||||
zone->uz_warning = NULL; | zone->uz_warning = NULL; | ||||
/* The domain structures follow the cpu structures. */ | /* The domain structures follow the cpu structures. */ | ||||
zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus]; | zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus]; | ||||
zone->uz_bkt_max = ULONG_MAX; | |||||
timevalclear(&zone->uz_ratecheck); | timevalclear(&zone->uz_ratecheck); | ||||
keg = arg->keg; | |||||
ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); | |||||
/* | /* | ||||
* This is a pure cache zone, no kegs. | * This is a pure cache zone, no kegs. | ||||
*/ | */ | ||||
if (arg->import) { | if (arg->import) { | ||||
if (arg->flags & UMA_ZONE_VM) | if (arg->flags & UMA_ZONE_VM) | ||||
arg->flags |= UMA_ZFLAG_CACHEONLY; | arg->flags |= UMA_ZFLAG_CACHEONLY; | ||||
zone->uz_flags = arg->flags; | zone->uz_flags = arg->flags; | ||||
zone->uz_size = arg->size; | zone->uz_size = arg->size; | ||||
zone->uz_import = arg->import; | zone->uz_import = arg->import; | ||||
zone->uz_release = arg->release; | zone->uz_release = arg->release; | ||||
zone->uz_arg = arg->arg; | zone->uz_arg = arg->arg; | ||||
zone->uz_lockptr = &zone->uz_lock; | zone->uz_lockptr = &zone->uz_lock; | ||||
ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); | |||||
rw_wlock(&uma_rwlock); | rw_wlock(&uma_rwlock); | ||||
LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); | LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); | ||||
rw_wunlock(&uma_rwlock); | rw_wunlock(&uma_rwlock); | ||||
goto out; | goto out; | ||||
} | } | ||||
/* | /* | ||||
* Use the regular zone/keg/slab allocator. | * Use the regular zone/keg/slab allocator. | ||||
*/ | */ | ||||
zone->uz_import = (uma_import)zone_import; | zone->uz_import = (uma_import)zone_import; | ||||
zone->uz_release = (uma_release)zone_release; | zone->uz_release = (uma_release)zone_release; | ||||
zone->uz_arg = zone; | zone->uz_arg = zone; | ||||
keg = arg->keg; | |||||
if (arg->flags & UMA_ZONE_SECONDARY) { | if (arg->flags & UMA_ZONE_SECONDARY) { | ||||
KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); | KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); | ||||
zone->uz_init = arg->uminit; | zone->uz_init = arg->uminit; | ||||
zone->uz_fini = arg->fini; | zone->uz_fini = arg->fini; | ||||
zone->uz_lockptr = &keg->uk_lock; | zone->uz_lockptr = &keg->uk_lock; | ||||
zone->uz_flags |= UMA_ZONE_SECONDARY; | zone->uz_flags |= UMA_ZONE_SECONDARY; | ||||
rw_wlock(&uma_rwlock); | rw_wlock(&uma_rwlock); | ||||
Show All 22 Lines | if (arg->flags & UMA_ZONE_SECONDARY) { | ||||
karg.flags = arg->flags; | karg.flags = arg->flags; | ||||
karg.zone = zone; | karg.zone = zone; | ||||
error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, | error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, | ||||
flags); | flags); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | zone->uz_keg = keg; | ||||
* Link in the first keg. | |||||
*/ | |||||
zone->uz_klink.kl_keg = keg; | |||||
LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); | |||||
zone->uz_lockptr = &keg->uk_lock; | |||||
zone->uz_size = keg->uk_size; | zone->uz_size = keg->uk_size; | ||||
zone->uz_flags |= (keg->uk_flags & | zone->uz_flags |= (keg->uk_flags & | ||||
(UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); | (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); | ||||
/* | /* | ||||
* Some internal zones don't have room allocated for the per cpu | * Some internal zones don't have room allocated for the per cpu | ||||
* caches. If we're internal, bail out here. | * caches. If we're internal, bail out here. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | |||||
* Zone header dtor. | * Zone header dtor. | ||||
* | * | ||||
* Arguments/Returns follow uma_dtor specifications | * Arguments/Returns follow uma_dtor specifications | ||||
* udata unused | * udata unused | ||||
*/ | */ | ||||
static void | static void | ||||
zone_dtor(void *arg, int size, void *udata) | zone_dtor(void *arg, int size, void *udata) | ||||
{ | { | ||||
uma_klink_t klink; | |||||
uma_zone_t zone; | uma_zone_t zone; | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
zone = (uma_zone_t)arg; | zone = (uma_zone_t)arg; | ||||
keg = zone_first_keg(zone); | |||||
if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) | if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) | ||||
cache_drain(zone); | cache_drain(zone); | ||||
rw_wlock(&uma_rwlock); | rw_wlock(&uma_rwlock); | ||||
LIST_REMOVE(zone, uz_link); | LIST_REMOVE(zone, uz_link); | ||||
rw_wunlock(&uma_rwlock); | rw_wunlock(&uma_rwlock); | ||||
/* | /* | ||||
* XXX there are some races here where | * XXX there are some races here where | ||||
* the zone can be drained but zone lock | * the zone can be drained but zone lock | ||||
* released and then refilled before we | * released and then refilled before we | ||||
* remove it... we dont care for now | * remove it... we dont care for now | ||||
*/ | */ | ||||
zone_drain_wait(zone, M_WAITOK); | zone_drain_wait(zone, M_WAITOK); | ||||
/* | /* | ||||
* Unlink all of our kegs. | |||||
*/ | |||||
while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { | |||||
klink->kl_keg = NULL; | |||||
LIST_REMOVE(klink, kl_link); | |||||
if (klink == &zone->uz_klink) | |||||
continue; | |||||
free(klink, M_TEMP); | |||||
} | |||||
/* | |||||
* We only destroy kegs from non secondary zones. | * We only destroy kegs from non secondary zones. | ||||
*/ | */ | ||||
if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { | if ((keg = zone->uz_keg) != NULL && | ||||
(zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { | |||||
rw_wlock(&uma_rwlock); | rw_wlock(&uma_rwlock); | ||||
LIST_REMOVE(keg, uk_link); | LIST_REMOVE(keg, uk_link); | ||||
rw_wunlock(&uma_rwlock); | rw_wunlock(&uma_rwlock); | ||||
zone_free_item(kegs, keg, NULL, SKIP_NONE); | zone_free_item(kegs, keg, NULL, SKIP_NONE); | ||||
} | } | ||||
if (zone->uz_lockptr == &zone->uz_lock) | |||||
ZONE_LOCK_FINI(zone); | ZONE_LOCK_FINI(zone); | ||||
} | } | ||||
/* | /* | ||||
* Traverses every zone in the system and calls a callback | * Traverses every zone in the system and calls a callback | ||||
* | * | ||||
* Arguments: | * Arguments: | ||||
* zfunc A pointer to a function which accepts a zone | * zfunc A pointer to a function which accepts a zone | ||||
* as an argument. | * as an argument. | ||||
▲ Show 20 Lines • Show All 274 Lines • ▼ Show 20 Lines | |||||
uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, | uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, | ||||
uma_init zinit, uma_fini zfini, uma_zone_t master) | uma_init zinit, uma_fini zfini, uma_zone_t master) | ||||
{ | { | ||||
struct uma_zctor_args args; | struct uma_zctor_args args; | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
uma_zone_t res; | uma_zone_t res; | ||||
bool locked; | bool locked; | ||||
keg = zone_first_keg(master); | keg = master->uz_keg; | ||||
memset(&args, 0, sizeof(args)); | memset(&args, 0, sizeof(args)); | ||||
args.name = name; | args.name = name; | ||||
args.size = keg->uk_size; | args.size = keg->uk_size; | ||||
args.ctor = ctor; | args.ctor = ctor; | ||||
args.dtor = dtor; | args.dtor = dtor; | ||||
args.uminit = zinit; | args.uminit = zinit; | ||||
args.fini = zfini; | args.fini = zfini; | ||||
args.align = keg->uk_align; | args.align = keg->uk_align; | ||||
Show All 27 Lines | uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, | ||||
args.ctor = ctor; | args.ctor = ctor; | ||||
args.dtor = dtor; | args.dtor = dtor; | ||||
args.uminit = zinit; | args.uminit = zinit; | ||||
args.fini = zfini; | args.fini = zfini; | ||||
args.import = zimport; | args.import = zimport; | ||||
args.release = zrelease; | args.release = zrelease; | ||||
args.arg = arg; | args.arg = arg; | ||||
args.align = 0; | args.align = 0; | ||||
args.flags = flags; | args.flags = flags | UMA_ZFLAG_CACHE; | ||||
return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK)); | return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK)); | ||||
} | } | ||||
static void | |||||
zone_lock_pair(uma_zone_t a, uma_zone_t b) | |||||
{ | |||||
if (a < b) { | |||||
ZONE_LOCK(a); | |||||
mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); | |||||
} else { | |||||
ZONE_LOCK(b); | |||||
mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); | |||||
} | |||||
} | |||||
static void | |||||
zone_unlock_pair(uma_zone_t a, uma_zone_t b) | |||||
{ | |||||
ZONE_UNLOCK(a); | |||||
ZONE_UNLOCK(b); | |||||
} | |||||
int | |||||
uma_zsecond_add(uma_zone_t zone, uma_zone_t master) | |||||
{ | |||||
uma_klink_t klink; | |||||
uma_klink_t kl; | |||||
int error; | |||||
error = 0; | |||||
klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); | |||||
zone_lock_pair(zone, master); | |||||
/* | |||||
* zone must use vtoslab() to resolve objects and must already be | |||||
* a secondary. | |||||
*/ | |||||
if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) | |||||
!= (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { | |||||
error = EINVAL; | |||||
goto out; | |||||
} | |||||
/* | |||||
* The new master must also use vtoslab(). | |||||
*/ | |||||
if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { | |||||
error = EINVAL; | |||||
goto out; | |||||
} | |||||
/* | |||||
* The underlying object must be the same size. rsize | |||||
* may be different. | |||||
*/ | |||||
if (master->uz_size != zone->uz_size) { | |||||
error = E2BIG; | |||||
goto out; | |||||
} | |||||
/* | |||||
* Put it at the end of the list. | |||||
*/ | |||||
klink->kl_keg = zone_first_keg(master); | |||||
LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { | |||||
if (LIST_NEXT(kl, kl_link) == NULL) { | |||||
LIST_INSERT_AFTER(kl, klink, kl_link); | |||||
break; | |||||
} | |||||
} | |||||
klink = NULL; | |||||
zone->uz_flags |= UMA_ZFLAG_MULTI; | |||||
zone->uz_slab = zone_fetch_slab_multi; | |||||
out: | |||||
zone_unlock_pair(zone, master); | |||||
if (klink != NULL) | |||||
free(klink, M_TEMP); | |||||
return (error); | |||||
} | |||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zdestroy(uma_zone_t zone) | uma_zdestroy(uma_zone_t zone) | ||||
{ | { | ||||
sx_slock(&uma_drain_lock); | sx_slock(&uma_drain_lock); | ||||
zone_free_item(zones, zone, NULL, SKIP_NONE); | zone_free_item(zones, zone, NULL, SKIP_NONE); | ||||
sx_sunlock(&uma_drain_lock); | sx_sunlock(&uma_drain_lock); | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | |||||
/* See uma.h */ | /* See uma.h */ | ||||
void * | void * | ||||
uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) | uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) | ||||
{ | { | ||||
uma_zone_domain_t zdom; | uma_zone_domain_t zdom; | ||||
uma_bucket_t bucket; | uma_bucket_t bucket; | ||||
uma_cache_t cache; | uma_cache_t cache; | ||||
void *item; | void *item; | ||||
int cpu, domain, lockfail; | int cpu, domain, lockfail, maxbucket; | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
bool skipdbg; | bool skipdbg; | ||||
#endif | #endif | ||||
/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | ||||
random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | ||||
/* This is the fast path allocation */ | /* This is the fast path allocation */ | ||||
▲ Show 20 Lines • Show All 61 Lines • ▼ Show 20 Lines | |||||
#endif | #endif | ||||
if (zone->uz_ctor != NULL && | if (zone->uz_ctor != NULL && | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
(!skipdbg || zone->uz_ctor != trash_ctor || | (!skipdbg || zone->uz_ctor != trash_ctor || | ||||
zone->uz_dtor != trash_dtor) && | zone->uz_dtor != trash_dtor) && | ||||
#endif | #endif | ||||
zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { | zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { | ||||
atomic_add_long(&zone->uz_fails, 1); | atomic_add_long(&zone->uz_fails, 1); | ||||
zone_free_item(zone, item, udata, SKIP_DTOR); | zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
if (!skipdbg) | if (!skipdbg) | ||||
uma_dbg_alloc(zone, NULL, item); | uma_dbg_alloc(zone, NULL, item); | ||||
#endif | #endif | ||||
if (flags & M_ZERO) | if (flags & M_ZERO) | ||||
uma_zero_item(item, zone); | uma_zero_item(item, zone); | ||||
Show All 26 Lines | #endif | ||||
if (zone->uz_flags & UMA_ZONE_NUMA) { | if (zone->uz_flags & UMA_ZONE_NUMA) { | ||||
domain = PCPU_GET(domain); | domain = PCPU_GET(domain); | ||||
if (VM_DOMAIN_EMPTY(domain)) | if (VM_DOMAIN_EMPTY(domain)) | ||||
domain = UMA_ANYDOMAIN; | domain = UMA_ANYDOMAIN; | ||||
} else | } else | ||||
domain = UMA_ANYDOMAIN; | domain = UMA_ANYDOMAIN; | ||||
/* Short-circuit for zones without buckets and low memory. */ | /* Short-circuit for zones without buckets and low memory. */ | ||||
if (zone->uz_count == 0 || bucketdisable) | if (zone->uz_count == 0 || bucketdisable) { | ||||
ZONE_LOCK(zone); | |||||
goto zalloc_item; | goto zalloc_item; | ||||
} | |||||
/* | /* | ||||
* Attempt to retrieve the item from the per-CPU cache has failed, so | * Attempt to retrieve the item from the per-CPU cache has failed, so | ||||
* we must go back to the zone. This requires the zone lock, so we | * we must go back to the zone. This requires the zone lock, so we | ||||
* must drop the critical section, then re-acquire it when we go back | * must drop the critical section, then re-acquire it when we go back | ||||
* to the cache. Since the critical section is released, we may be | * to the cache. Since the critical section is released, we may be | ||||
* preempted or migrate. As such, make sure not to maintain any | * preempted or migrate. As such, make sure not to maintain any | ||||
* thread-local state specific to the cache from prior to releasing | * thread-local state specific to the cache from prior to releasing | ||||
Show All 31 Lines | #endif | ||||
} | } | ||||
/* We are no longer associated with this CPU. */ | /* We are no longer associated with this CPU. */ | ||||
critical_exit(); | critical_exit(); | ||||
/* | /* | ||||
* We bump the uz count when the cache size is insufficient to | * We bump the uz count when the cache size is insufficient to | ||||
* handle the working set. | * handle the working set. | ||||
*/ | */ | ||||
if (lockfail && zone->uz_count < BUCKET_MAX) | if (lockfail && zone->uz_count < zone->uz_count_max) | ||||
zone->uz_count++; | zone->uz_count++; | ||||
if (zone->uz_max_items > 0) { | |||||
if (zone->uz_items >= zone->uz_max_items) | |||||
goto zalloc_item; | |||||
maxbucket = MIN(zone->uz_count, | |||||
zone->uz_max_items - zone->uz_items); | |||||
} else | |||||
maxbucket = zone->uz_count; | |||||
zone->uz_items += maxbucket; | |||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
/* | /* | ||||
* Now lets just fill a bucket and put it on the free list. If that | * Now lets just fill a bucket and put it on the free list. If that | ||||
* works we'll restart the allocation from the beginning and it | * works we'll restart the allocation from the beginning and it | ||||
* will use the just filled bucket. | * will use the just filled bucket. | ||||
*/ | */ | ||||
bucket = zone_alloc_bucket(zone, udata, domain, flags); | bucket = zone_alloc_bucket(zone, udata, domain, flags, maxbucket); | ||||
CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", | CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", | ||||
zone->uz_name, zone, bucket); | zone->uz_name, zone, bucket); | ||||
if (bucket != NULL) { | |||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
if (bucket != NULL) { | |||||
if (bucket->ub_cnt < maxbucket) { | |||||
MPASS(zone->uz_flags & UMA_ZFLAG_CACHE || | |||||
zone->uz_items >= maxbucket - bucket->ub_cnt); | |||||
zone->uz_items -= maxbucket - bucket->ub_cnt; | |||||
if (zone->uz_sleepers > 0 && | |||||
zone->uz_items < zone->uz_max_items) | |||||
wakeup_one(zone); | |||||
} | |||||
critical_enter(); | critical_enter(); | ||||
cpu = curcpu; | cpu = curcpu; | ||||
cache = &zone->uz_cpu[cpu]; | cache = &zone->uz_cpu[cpu]; | ||||
/* | /* | ||||
* See if we lost the race or were migrated. Cache the | * See if we lost the race or were migrated. Cache the | ||||
* initialized bucket to make this less likely or claim | * initialized bucket to make this less likely or claim | ||||
* the memory directly. | * the memory directly. | ||||
*/ | */ | ||||
if (cache->uc_allocbucket == NULL && | if (cache->uc_allocbucket == NULL && | ||||
((zone->uz_flags & UMA_ZONE_NUMA) == 0 || | ((zone->uz_flags & UMA_ZONE_NUMA) == 0 || | ||||
domain == PCPU_GET(domain))) { | domain == PCPU_GET(domain))) { | ||||
cache->uc_allocbucket = bucket; | cache->uc_allocbucket = bucket; | ||||
zdom->uzd_imax += bucket->ub_cnt; | zdom->uzd_imax += bucket->ub_cnt; | ||||
} else if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) { | } else if (zone->uz_bkt_count >= zone->uz_bkt_max) { | ||||
critical_exit(); | critical_exit(); | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
bucket_drain(zone, bucket); | bucket_drain(zone, bucket); | ||||
bucket_free(zone, bucket, udata); | bucket_free(zone, bucket, udata); | ||||
goto zalloc_restart; | goto zalloc_restart; | ||||
} else | } else | ||||
zone_put_bucket(zone, zdom, bucket, false); | zone_put_bucket(zone, zdom, bucket, false); | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
goto zalloc_start; | goto zalloc_start; | ||||
} else { | |||||
zone->uz_items -= maxbucket; | |||||
if (zone->uz_sleepers > 0 && | |||||
zone->uz_items + 1 < zone->uz_max_items) | |||||
wakeup_one(zone); | |||||
} | } | ||||
/* | /* | ||||
* We may not be able to get a bucket so return an actual item. | * We may not be able to get a bucket so return an actual item. | ||||
*/ | */ | ||||
zalloc_item: | zalloc_item: | ||||
item = zone_alloc_item(zone, udata, domain, flags); | item = zone_alloc_item_locked(zone, udata, domain, flags); | ||||
return (item); | return (item); | ||||
} | } | ||||
void * | void * | ||||
uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) | uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) | ||||
{ | { | ||||
Show All 26 Lines | |||||
keg_first_slab(uma_keg_t keg, int domain, bool rr) | keg_first_slab(uma_keg_t keg, int domain, bool rr) | ||||
{ | { | ||||
uma_domain_t dom; | uma_domain_t dom; | ||||
uma_slab_t slab; | uma_slab_t slab; | ||||
int start; | int start; | ||||
KASSERT(domain >= 0 && domain < vm_ndomains, | KASSERT(domain >= 0 && domain < vm_ndomains, | ||||
("keg_first_slab: domain %d out of range", domain)); | ("keg_first_slab: domain %d out of range", domain)); | ||||
KEG_LOCK_ASSERT(keg); | |||||
slab = NULL; | slab = NULL; | ||||
start = domain; | start = domain; | ||||
do { | do { | ||||
dom = &keg->uk_domain[domain]; | dom = &keg->uk_domain[domain]; | ||||
if (!LIST_EMPTY(&dom->ud_part_slab)) | if (!LIST_EMPTY(&dom->ud_part_slab)) | ||||
return (LIST_FIRST(&dom->ud_part_slab)); | return (LIST_FIRST(&dom->ud_part_slab)); | ||||
if (!LIST_EMPTY(&dom->ud_free_slab)) { | if (!LIST_EMPTY(&dom->ud_free_slab)) { | ||||
Show All 9 Lines | keg_first_slab(uma_keg_t keg, int domain, bool rr) | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
static uma_slab_t | static uma_slab_t | ||||
keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags) | keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags) | ||||
{ | { | ||||
uint32_t reserve; | uint32_t reserve; | ||||
mtx_assert(&keg->uk_lock, MA_OWNED); | KEG_LOCK_ASSERT(keg); | ||||
reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve; | reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve; | ||||
if (keg->uk_free <= reserve) | if (keg->uk_free <= reserve) | ||||
return (NULL); | return (NULL); | ||||
return (keg_first_slab(keg, domain, rr)); | return (keg_first_slab(keg, domain, rr)); | ||||
} | } | ||||
static uma_slab_t | static uma_slab_t | ||||
keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags) | keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags) | ||||
{ | { | ||||
struct vm_domainset_iter di; | struct vm_domainset_iter di; | ||||
uma_domain_t dom; | uma_domain_t dom; | ||||
uma_slab_t slab; | uma_slab_t slab; | ||||
int aflags, domain; | int aflags, domain; | ||||
bool rr; | bool rr; | ||||
restart: | restart: | ||||
mtx_assert(&keg->uk_lock, MA_OWNED); | KEG_LOCK_ASSERT(keg); | ||||
/* | /* | ||||
* Use the keg's policy if upper layers haven't already specified a | * Use the keg's policy if upper layers haven't already specified a | ||||
* domain (as happens with first-touch zones). | * domain (as happens with first-touch zones). | ||||
* | * | ||||
* To avoid races we run the iterator with the keg lock held, but that | * To avoid races we run the iterator with the keg lock held, but that | ||||
* means that we cannot allow the vm_domainset layer to sleep. Thus, | * means that we cannot allow the vm_domainset layer to sleep. Thus, | ||||
* clear M_WAITOK and handle low memory conditions locally. | * clear M_WAITOK and handle low memory conditions locally. | ||||
Show All 16 Lines | for (;;) { | ||||
} | } | ||||
/* | /* | ||||
* M_NOVM means don't ask at all! | * M_NOVM means don't ask at all! | ||||
*/ | */ | ||||
if (flags & M_NOVM) | if (flags & M_NOVM) | ||||
break; | break; | ||||
if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { | KASSERT(zone->uz_max_items == 0 || | ||||
keg->uk_flags |= UMA_ZFLAG_FULL; | zone->uz_items <= zone->uz_max_items, | ||||
/* | ("%s: zone %p overflow", __func__, zone)); | ||||
* If this is not a multi-zone, set the FULL bit. | |||||
* Otherwise slab_multi() takes care of it. | |||||
*/ | |||||
if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { | |||||
zone->uz_flags |= UMA_ZFLAG_FULL; | |||||
zone_log_warning(zone); | |||||
zone_maxaction(zone); | |||||
} | |||||
if (flags & M_NOWAIT) | |||||
return (NULL); | |||||
zone->uz_sleeps++; | |||||
msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); | |||||
continue; | |||||
} | |||||
slab = keg_alloc_slab(keg, zone, domain, aflags); | slab = keg_alloc_slab(keg, zone, domain, aflags); | ||||
/* | /* | ||||
* If we got a slab here it's safe to mark it partially used | * If we got a slab here it's safe to mark it partially used | ||||
* and return. We assume that the caller is going to remove | * and return. We assume that the caller is going to remove | ||||
* at least one item. | * at least one item. | ||||
*/ | */ | ||||
if (slab) { | if (slab) { | ||||
MPASS(slab->us_keg == keg); | MPASS(slab->us_keg == keg); | ||||
Show All 26 Lines | |||||
} | } | ||||
static uma_slab_t | static uma_slab_t | ||||
zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags) | zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags) | ||||
{ | { | ||||
uma_slab_t slab; | uma_slab_t slab; | ||||
if (keg == NULL) { | if (keg == NULL) { | ||||
keg = zone_first_keg(zone); | keg = zone->uz_keg; | ||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
} | } | ||||
for (;;) { | for (;;) { | ||||
slab = keg_fetch_slab(keg, zone, domain, flags); | slab = keg_fetch_slab(keg, zone, domain, flags); | ||||
if (slab) | if (slab) | ||||
return (slab); | return (slab); | ||||
if (flags & (M_NOWAIT | M_NOVM)) | if (flags & (M_NOWAIT | M_NOVM)) | ||||
break; | break; | ||||
} | } | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
/* | |||||
* uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns | |||||
* with the keg locked. On NULL no lock is held. | |||||
* | |||||
* The last pointer is used to seed the search. It is not required. | |||||
*/ | |||||
static uma_slab_t | |||||
zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int domain, int rflags) | |||||
{ | |||||
uma_klink_t klink; | |||||
uma_slab_t slab; | |||||
uma_keg_t keg; | |||||
int flags; | |||||
int empty; | |||||
int full; | |||||
/* | |||||
* Don't wait on the first pass. This will skip limit tests | |||||
* as well. We don't want to block if we can find a provider | |||||
* without blocking. | |||||
*/ | |||||
flags = (rflags & ~M_WAITOK) | M_NOWAIT; | |||||
/* | |||||
* Use the last slab allocated as a hint for where to start | |||||
* the search. | |||||
*/ | |||||
if (last != NULL) { | |||||
slab = keg_fetch_slab(last, zone, domain, flags); | |||||
if (slab) | |||||
return (slab); | |||||
KEG_UNLOCK(last); | |||||
} | |||||
/* | |||||
* Loop until we have a slab incase of transient failures | |||||
* while M_WAITOK is specified. I'm not sure this is 100% | |||||
* required but we've done it for so long now. | |||||
*/ | |||||
for (;;) { | |||||
empty = 0; | |||||
full = 0; | |||||
/* | |||||
* Search the available kegs for slabs. Be careful to hold the | |||||
* correct lock while calling into the keg layer. | |||||
*/ | |||||
LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { | |||||
keg = klink->kl_keg; | |||||
KEG_LOCK(keg); | |||||
if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { | |||||
slab = keg_fetch_slab(keg, zone, domain, flags); | |||||
if (slab) | |||||
return (slab); | |||||
} | |||||
if (keg->uk_flags & UMA_ZFLAG_FULL) | |||||
full++; | |||||
else | |||||
empty++; | |||||
KEG_UNLOCK(keg); | |||||
} | |||||
if (rflags & (M_NOWAIT | M_NOVM)) | |||||
break; | |||||
flags = rflags; | |||||
/* | |||||
* All kegs are full. XXX We can't atomically check all kegs | |||||
* and sleep so just sleep for a short period and retry. | |||||
*/ | |||||
if (full && !empty) { | |||||
ZONE_LOCK(zone); | |||||
zone->uz_flags |= UMA_ZFLAG_FULL; | |||||
zone->uz_sleeps++; | |||||
zone_log_warning(zone); | |||||
zone_maxaction(zone); | |||||
msleep(zone, zone->uz_lockptr, PVM, | |||||
"zonelimit", hz/100); | |||||
zone->uz_flags &= ~UMA_ZFLAG_FULL; | |||||
ZONE_UNLOCK(zone); | |||||
continue; | |||||
} | |||||
} | |||||
return (NULL); | |||||
} | |||||
static void * | static void * | ||||
slab_alloc_item(uma_keg_t keg, uma_slab_t slab) | slab_alloc_item(uma_keg_t keg, uma_slab_t slab) | ||||
{ | { | ||||
uma_domain_t dom; | uma_domain_t dom; | ||||
void *item; | void *item; | ||||
uint8_t freei; | uint8_t freei; | ||||
MPASS(keg == slab->us_keg); | MPASS(keg == slab->us_keg); | ||||
mtx_assert(&keg->uk_lock, MA_OWNED); | KEG_LOCK_ASSERT(keg); | ||||
freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; | freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; | ||||
BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); | BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); | ||||
item = slab->us_data + (keg->uk_rsize * freei); | item = slab->us_data + (keg->uk_rsize * freei); | ||||
slab->us_freecount--; | slab->us_freecount--; | ||||
keg->uk_free--; | keg->uk_free--; | ||||
/* Move this slab to the full list */ | /* Move this slab to the full list */ | ||||
▲ Show 20 Lines • Show All 50 Lines • ▼ Show 20 Lines | #endif | ||||
} | } | ||||
if (slab != NULL) | if (slab != NULL) | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
return i; | return i; | ||||
} | } | ||||
static uma_bucket_t | static uma_bucket_t | ||||
zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags) | zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags, int max) | ||||
{ | { | ||||
uma_bucket_t bucket; | uma_bucket_t bucket; | ||||
int max; | |||||
CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain); | CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain); | ||||
/* Don't wait for buckets, preserve caller's NOVM setting. */ | /* Don't wait for buckets, preserve caller's NOVM setting. */ | ||||
bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); | bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); | ||||
if (bucket == NULL) | if (bucket == NULL) | ||||
return (NULL); | return (NULL); | ||||
max = MIN(bucket->ub_entries, zone->uz_count); | |||||
bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, | bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, | ||||
max, domain, flags); | max, domain, flags); | ||||
/* | /* | ||||
* Initialize the memory if necessary. | * Initialize the memory if necessary. | ||||
*/ | */ | ||||
if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { | if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { | ||||
int i; | int i; | ||||
Show All 38 Lines | |||||
* Returns | * Returns | ||||
* NULL if there is no memory and M_NOWAIT is set | * NULL if there is no memory and M_NOWAIT is set | ||||
* An item if successful | * An item if successful | ||||
*/ | */ | ||||
static void * | static void * | ||||
zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) | zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) | ||||
{ | { | ||||
ZONE_LOCK(zone); | |||||
return (zone_alloc_item_locked(zone, udata, domain, flags)); | |||||
} | |||||
/* | |||||
* Returns with zone unlocked. | |||||
*/ | |||||
static void * | |||||
zone_alloc_item_locked(uma_zone_t zone, void *udata, int domain, int flags) | |||||
{ | |||||
void *item; | void *item; | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
bool skipdbg; | bool skipdbg; | ||||
#endif | #endif | ||||
item = NULL; | ZONE_LOCK_ASSERT(zone); | ||||
if (zone->uz_max_items > 0 && zone->uz_items >= zone->uz_max_items) { | |||||
zone_log_warning(zone); | |||||
zone_maxaction(zone); | |||||
if (flags & M_NOWAIT) { | |||||
ZONE_UNLOCK(zone); | |||||
return (NULL); | |||||
} | |||||
zone->uz_sleeps++; | |||||
zone->uz_sleepers++; | |||||
while (zone->uz_items >= zone->uz_max_items) | |||||
mtx_sleep(zone, zone->uz_lockptr, PVM, "zonelimit", 0); | |||||
zone->uz_sleepers--; | |||||
if (zone->uz_sleepers > 0 && | |||||
zone->uz_items + 1 < zone->uz_max_items) | |||||
wakeup_one(zone); | |||||
} | |||||
zone->uz_items++; | |||||
zone->uz_allocs++; | |||||
ZONE_UNLOCK(zone); | |||||
if (domain != UMA_ANYDOMAIN) { | if (domain != UMA_ANYDOMAIN) { | ||||
/* avoid allocs targeting empty domains */ | /* avoid allocs targeting empty domains */ | ||||
if (VM_DOMAIN_EMPTY(domain)) | if (VM_DOMAIN_EMPTY(domain)) | ||||
domain = UMA_ANYDOMAIN; | domain = UMA_ANYDOMAIN; | ||||
} | } | ||||
if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1) | if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1) | ||||
goto fail; | goto fail; | ||||
atomic_add_long(&zone->uz_allocs, 1); | |||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
skipdbg = uma_dbg_zskip(zone, item); | skipdbg = uma_dbg_zskip(zone, item); | ||||
#endif | #endif | ||||
/* | /* | ||||
* We have to call both the zone's init (not the keg's init) | * We have to call both the zone's init (not the keg's init) | ||||
* and the zone's ctor. This is because the item is going from | * and the zone's ctor. This is because the item is going from | ||||
* a keg slab directly to the user, and the user is expecting it | * a keg slab directly to the user, and the user is expecting it | ||||
* to be both zone-init'd as well as zone-ctor'd. | * to be both zone-init'd as well as zone-ctor'd. | ||||
*/ | */ | ||||
if (zone->uz_init != NULL) { | if (zone->uz_init != NULL) { | ||||
if (zone->uz_init(item, zone->uz_size, flags) != 0) { | if (zone->uz_init(item, zone->uz_size, flags) != 0) { | ||||
zone_free_item(zone, item, udata, SKIP_FINI); | zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
} | } | ||||
if (zone->uz_ctor != NULL && | if (zone->uz_ctor != NULL && | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
(!skipdbg || zone->uz_ctor != trash_ctor || | (!skipdbg || zone->uz_ctor != trash_ctor || | ||||
zone->uz_dtor != trash_dtor) && | zone->uz_dtor != trash_dtor) && | ||||
#endif | #endif | ||||
zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { | zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { | ||||
zone_free_item(zone, item, udata, SKIP_DTOR); | zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
if (!skipdbg) | if (!skipdbg) | ||||
uma_dbg_alloc(zone, NULL, item); | uma_dbg_alloc(zone, NULL, item); | ||||
#endif | #endif | ||||
if (flags & M_ZERO) | if (flags & M_ZERO) | ||||
uma_zero_item(item, zone); | uma_zero_item(item, zone); | ||||
CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, | CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, | ||||
zone->uz_name, zone); | zone->uz_name, zone); | ||||
return (item); | return (item); | ||||
fail: | fail: | ||||
ZONE_LOCK(zone); | |||||
zone->uz_items--; | |||||
zone->uz_allocs--; | |||||
ZONE_UNLOCK(zone); | |||||
atomic_add_long(&zone->uz_fails, 1); | |||||
CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", | CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", | ||||
zone->uz_name, zone); | zone->uz_name, zone); | ||||
atomic_add_long(&zone->uz_fails, 1); | |||||
return (NULL); | return (NULL); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zfree_arg(uma_zone_t zone, void *item, void *udata) | uma_zfree_arg(uma_zone_t zone, void *item, void *udata) | ||||
{ | { | ||||
uma_cache_t cache; | uma_cache_t cache; | ||||
uma_bucket_t bucket; | uma_bucket_t bucket; | ||||
uma_zone_domain_t zdom; | uma_zone_domain_t zdom; | ||||
int cpu, domain, lockfail; | int cpu, domain; | ||||
bool lockfail; | |||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
bool skipdbg; | bool skipdbg; | ||||
#endif | #endif | ||||
/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | ||||
random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | ||||
CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, | CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, | ||||
Show All 29 Lines | #else | ||||
if (zone->uz_dtor != NULL) | if (zone->uz_dtor != NULL) | ||||
#endif | #endif | ||||
zone->uz_dtor(item, zone->uz_size, udata); | zone->uz_dtor(item, zone->uz_size, udata); | ||||
/* | /* | ||||
* The race here is acceptable. If we miss it we'll just have to wait | * The race here is acceptable. If we miss it we'll just have to wait | ||||
* a little longer for the limits to be reset. | * a little longer for the limits to be reset. | ||||
*/ | */ | ||||
if (zone->uz_flags & UMA_ZFLAG_FULL) | if (zone->uz_sleepers > 0) | ||||
goto zfree_item; | goto zfree_item; | ||||
/* | /* | ||||
* If possible, free to the per-CPU cache. There are two | * If possible, free to the per-CPU cache. There are two | ||||
* requirements for safe access to the per-CPU cache: (1) the thread | * requirements for safe access to the per-CPU cache: (1) the thread | ||||
* accessing the cache must not be preempted or yield during access, | * accessing the cache must not be preempted or yield during access, | ||||
* and (2) the thread must not migrate CPUs without switching which | * and (2) the thread must not migrate CPUs without switching which | ||||
* cache it accesses. We rely on a critical section to prevent | * cache it accesses. We rely on a critical section to prevent | ||||
Show All 33 Lines | zfree_start: | ||||
* preempted or migrate. As such, make sure not to maintain any | * preempted or migrate. As such, make sure not to maintain any | ||||
* thread-local state specific to the cache from prior to releasing | * thread-local state specific to the cache from prior to releasing | ||||
* the critical section. | * the critical section. | ||||
*/ | */ | ||||
critical_exit(); | critical_exit(); | ||||
if (zone->uz_count == 0 || bucketdisable) | if (zone->uz_count == 0 || bucketdisable) | ||||
goto zfree_item; | goto zfree_item; | ||||
lockfail = 0; | lockfail = false; | ||||
if (ZONE_TRYLOCK(zone) == 0) { | if (ZONE_TRYLOCK(zone) == 0) { | ||||
/* Record contention to size the buckets. */ | /* Record contention to size the buckets. */ | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
lockfail = 1; | lockfail = true; | ||||
} | } | ||||
critical_enter(); | critical_enter(); | ||||
cpu = curcpu; | cpu = curcpu; | ||||
cache = &zone->uz_cpu[cpu]; | cache = &zone->uz_cpu[cpu]; | ||||
bucket = cache->uc_freebucket; | bucket = cache->uc_freebucket; | ||||
if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { | if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
Show All 12 Lines | zfree_start: | ||||
zdom = &zone->uz_domain[0]; | zdom = &zone->uz_domain[0]; | ||||
/* Can we throw this on the zone full list? */ | /* Can we throw this on the zone full list? */ | ||||
if (bucket != NULL) { | if (bucket != NULL) { | ||||
CTR3(KTR_UMA, | CTR3(KTR_UMA, | ||||
"uma_zfree: zone %s(%p) putting bucket %p on free list", | "uma_zfree: zone %s(%p) putting bucket %p on free list", | ||||
zone->uz_name, zone, bucket); | zone->uz_name, zone, bucket); | ||||
/* ub_cnt is pointing to the last free item */ | /* ub_cnt is pointing to the last free item */ | ||||
KASSERT(bucket->ub_cnt != 0, | KASSERT(bucket->ub_cnt == bucket->ub_entries, | ||||
("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); | ("uma_zfree: Attempting to insert not full bucket onto the full list.\n")); | ||||
if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) { | if (zone->uz_bkt_count >= zone->uz_bkt_max) { | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
bucket_drain(zone, bucket); | bucket_drain(zone, bucket); | ||||
bucket_free(zone, bucket, udata); | bucket_free(zone, bucket, udata); | ||||
goto zfree_restart; | goto zfree_restart; | ||||
} else | } else | ||||
zone_put_bucket(zone, zdom, bucket, true); | zone_put_bucket(zone, zdom, bucket, true); | ||||
} | } | ||||
/* | /* | ||||
* We bump the uz count when the cache size is insufficient to | * We bump the uz count when the cache size is insufficient to | ||||
* handle the working set. | * handle the working set. | ||||
*/ | */ | ||||
if (lockfail && zone->uz_count < BUCKET_MAX) | if (lockfail && zone->uz_count < zone->uz_count_max) | ||||
zone->uz_count++; | zone->uz_count++; | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
bucket = bucket_alloc(zone, udata, M_NOWAIT); | bucket = bucket_alloc(zone, udata, M_NOWAIT); | ||||
CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p", | CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p", | ||||
zone->uz_name, zone, bucket); | zone->uz_name, zone, bucket); | ||||
if (bucket) { | if (bucket) { | ||||
critical_enter(); | critical_enter(); | ||||
Show All 14 Lines | if (bucket) { | ||||
goto zfree_restart; | goto zfree_restart; | ||||
} | } | ||||
/* | /* | ||||
* If nothing else caught this, we'll just do an internal free. | * If nothing else caught this, we'll just do an internal free. | ||||
*/ | */ | ||||
zfree_item: | zfree_item: | ||||
zone_free_item(zone, item, udata, SKIP_DTOR); | zone_free_item(zone, item, udata, SKIP_DTOR); | ||||
return; | |||||
} | } | ||||
void | void | ||||
uma_zfree_domain(uma_zone_t zone, void *item, void *udata) | uma_zfree_domain(uma_zone_t zone, void *item, void *udata) | ||||
{ | { | ||||
/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ | ||||
random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); | ||||
CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread, | CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread, | ||||
zone->uz_name); | zone->uz_name); | ||||
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), | KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), | ||||
("uma_zfree_domain: called with spinlock or critical section held")); | ("uma_zfree_domain: called with spinlock or critical section held")); | ||||
/* uma_zfree(..., NULL) does nothing, to match free(9). */ | /* uma_zfree(..., NULL) does nothing, to match free(9). */ | ||||
if (item == NULL) | if (item == NULL) | ||||
return; | return; | ||||
zone_free_item(zone, item, udata, SKIP_NONE); | zone_free_item(zone, item, udata, SKIP_NONE); | ||||
} | } | ||||
static void | static void | ||||
slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) | slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item) | ||||
{ | { | ||||
uma_keg_t keg; | |||||
uma_domain_t dom; | uma_domain_t dom; | ||||
uint8_t freei; | uint8_t freei; | ||||
mtx_assert(&keg->uk_lock, MA_OWNED); | keg = zone->uz_keg; | ||||
MPASS(zone->uz_lockptr == &keg->uk_lock); | |||||
KEG_LOCK_ASSERT(keg); | |||||
MPASS(keg == slab->us_keg); | MPASS(keg == slab->us_keg); | ||||
dom = &keg->uk_domain[slab->us_domain]; | dom = &keg->uk_domain[slab->us_domain]; | ||||
/* Do we need to remove from any lists? */ | /* Do we need to remove from any lists? */ | ||||
if (slab->us_freecount+1 == keg->uk_ipers) { | if (slab->us_freecount+1 == keg->uk_ipers) { | ||||
LIST_REMOVE(slab, us_link); | LIST_REMOVE(slab, us_link); | ||||
LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); | LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); | ||||
Show All 13 Lines | |||||
static void | static void | ||||
zone_release(uma_zone_t zone, void **bucket, int cnt) | zone_release(uma_zone_t zone, void **bucket, int cnt) | ||||
{ | { | ||||
void *item; | void *item; | ||||
uma_slab_t slab; | uma_slab_t slab; | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
uint8_t *mem; | uint8_t *mem; | ||||
int clearfull; | |||||
int i; | int i; | ||||
clearfull = 0; | keg = zone->uz_keg; | ||||
keg = zone_first_keg(zone); | |||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
for (i = 0; i < cnt; i++) { | for (i = 0; i < cnt; i++) { | ||||
item = bucket[i]; | item = bucket[i]; | ||||
if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { | if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { | ||||
mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); | mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); | ||||
if (zone->uz_flags & UMA_ZONE_HASH) { | if (zone->uz_flags & UMA_ZONE_HASH) { | ||||
slab = hash_sfind(&keg->uk_hash, mem); | slab = hash_sfind(&keg->uk_hash, mem); | ||||
} else { | } else { | ||||
mem += keg->uk_pgoff; | mem += keg->uk_pgoff; | ||||
slab = (uma_slab_t)mem; | slab = (uma_slab_t)mem; | ||||
} | } | ||||
} else { | } else { | ||||
slab = vtoslab((vm_offset_t)item); | slab = vtoslab((vm_offset_t)item); | ||||
if (slab->us_keg != keg) { | MPASS(slab->us_keg == keg); | ||||
KEG_UNLOCK(keg); | |||||
keg = slab->us_keg; | |||||
KEG_LOCK(keg); | |||||
} | } | ||||
slab_free_item(zone, slab, item); | |||||
} | } | ||||
slab_free_item(keg, slab, item); | |||||
if (keg->uk_flags & UMA_ZFLAG_FULL) { | |||||
if (keg->uk_pages < keg->uk_maxpages) { | |||||
keg->uk_flags &= ~UMA_ZFLAG_FULL; | |||||
clearfull = 1; | |||||
} | |||||
/* | |||||
* We can handle one more allocation. Since we're | |||||
* clearing ZFLAG_FULL, wake up all procs blocked | |||||
* on pages. This should be uncommon, so keeping this | |||||
* simple for now (rather than adding count of blocked | |||||
* threads etc). | |||||
*/ | |||||
wakeup(keg); | |||||
} | |||||
} | |||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
if (clearfull) { | |||||
ZONE_LOCK(zone); | |||||
zone->uz_flags &= ~UMA_ZFLAG_FULL; | |||||
wakeup(zone); | |||||
ZONE_UNLOCK(zone); | |||||
} | } | ||||
} | |||||
/* | /* | ||||
* Frees a single item to any zone. | * Frees a single item to any zone. | ||||
* | * | ||||
* Arguments: | * Arguments: | ||||
* zone The zone to free to | * zone The zone to free to | ||||
* item The item we're freeing | * item The item we're freeing | ||||
* udata User supplied data for the dtor | * udata User supplied data for the dtor | ||||
* skip Skip dtors and finis | * skip Skip dtors and finis | ||||
Show All 18 Lines | |||||
#else | #else | ||||
if (skip < SKIP_DTOR && zone->uz_dtor != NULL) | if (skip < SKIP_DTOR && zone->uz_dtor != NULL) | ||||
#endif | #endif | ||||
zone->uz_dtor(item, zone->uz_size, udata); | zone->uz_dtor(item, zone->uz_size, udata); | ||||
if (skip < SKIP_FINI && zone->uz_fini) | if (skip < SKIP_FINI && zone->uz_fini) | ||||
zone->uz_fini(item, zone->uz_size); | zone->uz_fini(item, zone->uz_size); | ||||
atomic_add_long(&zone->uz_frees, 1); | |||||
zone->uz_release(zone->uz_arg, &item, 1); | zone->uz_release(zone->uz_arg, &item, 1); | ||||
if (skip & SKIP_CNT) | |||||
return; | |||||
ZONE_LOCK(zone); | |||||
zone->uz_frees++; | |||||
zone->uz_items--; | |||||
if (zone->uz_sleepers > 0 && zone->uz_items < zone->uz_max_items) | |||||
wakeup_one(zone); | |||||
ZONE_UNLOCK(zone); | |||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
int | int | ||||
uma_zone_set_max(uma_zone_t zone, int nitems) | uma_zone_set_max(uma_zone_t zone, int nitems) | ||||
{ | { | ||||
uma_keg_t keg; | struct uma_bucket_zone *ubz; | ||||
keg = zone_first_keg(zone); | /* | ||||
if (keg == NULL) | * If limit is very low we may need to limit how | ||||
return (0); | * much items are allowed in CPU caches. | ||||
KEG_LOCK(keg); | */ | ||||
keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; | ubz = &bucket_zones[0]; | ||||
if (keg->uk_maxpages * keg->uk_ipers < nitems) | for (; ubz->ubz_entries != 0; ubz++) | ||||
keg->uk_maxpages += keg->uk_ppera; | if (ubz->ubz_entries * 2 * mp_ncpus > nitems) | ||||
nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; | break; | ||||
KEG_UNLOCK(keg); | if (ubz == &bucket_zones[0]) | ||||
nitems = ubz->ubz_entries * 2 * mp_ncpus; | |||||
else | |||||
ubz--; | |||||
ZONE_LOCK(zone); | |||||
zone->uz_count_max = zone->uz_count = ubz->ubz_entries; | |||||
if (zone->uz_count_min > zone->uz_count_max) | |||||
zone->uz_count_min = zone->uz_count_max; | |||||
zone->uz_max_items = nitems; | |||||
ZONE_UNLOCK(zone); | |||||
return (nitems); | return (nitems); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
int | int | ||||
uma_zone_set_maxcache(uma_zone_t zone, int nitems) | |||||
{ | |||||
ZONE_LOCK(zone); | |||||
zone->uz_bkt_max = nitems; | |||||
ZONE_UNLOCK(zone); | |||||
return (nitems); | |||||
} | |||||
/* See uma.h */ | |||||
int | |||||
uma_zone_get_max(uma_zone_t zone) | uma_zone_get_max(uma_zone_t zone) | ||||
{ | { | ||||
int nitems; | int nitems; | ||||
uma_keg_t keg; | |||||
keg = zone_first_keg(zone); | ZONE_LOCK(zone); | ||||
if (keg == NULL) | nitems = zone->uz_max_items; | ||||
return (0); | ZONE_UNLOCK(zone); | ||||
KEG_LOCK(keg); | |||||
nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; | |||||
KEG_UNLOCK(keg); | |||||
return (nitems); | return (nitems); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zone_set_warning(uma_zone_t zone, const char *warning) | uma_zone_set_warning(uma_zone_t zone, const char *warning) | ||||
{ | { | ||||
Show All 37 Lines | |||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zone_set_init(uma_zone_t zone, uma_init uminit) | uma_zone_set_init(uma_zone_t zone, uma_init uminit) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
keg = zone_first_keg(zone); | KEG_GET(zone, keg); | ||||
KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); | |||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
KASSERT(keg->uk_pages == 0, | KASSERT(keg->uk_pages == 0, | ||||
("uma_zone_set_init on non-empty keg")); | ("uma_zone_set_init on non-empty keg")); | ||||
keg->uk_init = uminit; | keg->uk_init = uminit; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zone_set_fini(uma_zone_t zone, uma_fini fini) | uma_zone_set_fini(uma_zone_t zone, uma_fini fini) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
keg = zone_first_keg(zone); | KEG_GET(zone, keg); | ||||
KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type")); | |||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
KASSERT(keg->uk_pages == 0, | KASSERT(keg->uk_pages == 0, | ||||
("uma_zone_set_fini on non-empty keg")); | ("uma_zone_set_fini on non-empty keg")); | ||||
keg->uk_fini = fini; | keg->uk_fini = fini; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) | uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) | ||||
{ | { | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
KASSERT(zone_first_keg(zone)->uk_pages == 0, | KASSERT(zone->uz_keg->uk_pages == 0, | ||||
("uma_zone_set_zinit on non-empty keg")); | ("uma_zone_set_zinit on non-empty keg")); | ||||
zone->uz_init = zinit; | zone->uz_init = zinit; | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) | uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) | ||||
{ | { | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
KASSERT(zone_first_keg(zone)->uk_pages == 0, | KASSERT(zone->uz_keg->uk_pages == 0, | ||||
("uma_zone_set_zfini on non-empty keg")); | ("uma_zone_set_zfini on non-empty keg")); | ||||
zone->uz_fini = zfini; | zone->uz_fini = zfini; | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
/* XXX uk_freef is not actually used with the zone locked */ | /* XXX uk_freef is not actually used with the zone locked */ | ||||
void | void | ||||
uma_zone_set_freef(uma_zone_t zone, uma_free freef) | uma_zone_set_freef(uma_zone_t zone, uma_free freef) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
keg = zone_first_keg(zone); | KEG_GET(zone, keg); | ||||
KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type")); | KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type")); | ||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
keg->uk_freef = freef; | keg->uk_freef = freef; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
/* XXX uk_allocf is not actually used with the zone locked */ | /* XXX uk_allocf is not actually used with the zone locked */ | ||||
void | void | ||||
uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) | uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
keg = zone_first_keg(zone); | KEG_GET(zone, keg); | ||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
keg->uk_allocf = allocf; | keg->uk_allocf = allocf; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_zone_reserve(uma_zone_t zone, int items) | uma_zone_reserve(uma_zone_t zone, int items) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
keg = zone_first_keg(zone); | KEG_GET(zone, keg); | ||||
if (keg == NULL) | |||||
return; | |||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
keg->uk_reserve = items; | keg->uk_reserve = items; | ||||
KEG_UNLOCK(keg); | KEG_UNLOCK(keg); | ||||
return; | |||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
int | int | ||||
uma_zone_reserve_kva(uma_zone_t zone, int count) | uma_zone_reserve_kva(uma_zone_t zone, int count) | ||||
{ | { | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
vm_offset_t kva; | vm_offset_t kva; | ||||
u_int pages; | u_int pages; | ||||
keg = zone_first_keg(zone); | KEG_GET(zone, keg); | ||||
if (keg == NULL) | |||||
return (0); | |||||
pages = count / keg->uk_ipers; | |||||
pages = count / keg->uk_ipers; | |||||
if (pages * keg->uk_ipers < count) | if (pages * keg->uk_ipers < count) | ||||
pages++; | pages++; | ||||
pages *= keg->uk_ppera; | pages *= keg->uk_ppera; | ||||
#ifdef UMA_MD_SMALL_ALLOC | #ifdef UMA_MD_SMALL_ALLOC | ||||
if (keg->uk_ppera > 1) { | if (keg->uk_ppera > 1) { | ||||
#else | #else | ||||
if (1) { | if (1) { | ||||
#endif | #endif | ||||
kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); | kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); | ||||
if (kva == 0) | if (kva == 0) | ||||
return (0); | return (0); | ||||
} else | } else | ||||
kva = 0; | kva = 0; | ||||
KEG_LOCK(keg); | |||||
ZONE_LOCK(zone); | |||||
MPASS(keg->uk_kva == 0); | |||||
keg->uk_kva = kva; | keg->uk_kva = kva; | ||||
keg->uk_offset = 0; | keg->uk_offset = 0; | ||||
keg->uk_maxpages = pages; | zone->uz_max_items = pages * keg->uk_ipers; | ||||
#ifdef UMA_MD_SMALL_ALLOC | #ifdef UMA_MD_SMALL_ALLOC | ||||
keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; | keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; | ||||
#else | #else | ||||
keg->uk_allocf = noobj_alloc; | keg->uk_allocf = noobj_alloc; | ||||
#endif | #endif | ||||
keg->uk_flags |= UMA_ZONE_NOFREE; | keg->uk_flags |= UMA_ZONE_NOFREE; | ||||
KEG_UNLOCK(keg); | ZONE_UNLOCK(zone); | ||||
return (1); | return (1); | ||||
} | } | ||||
/* See uma.h */ | /* See uma.h */ | ||||
void | void | ||||
uma_prealloc(uma_zone_t zone, int items) | uma_prealloc(uma_zone_t zone, int items) | ||||
{ | { | ||||
struct vm_domainset_iter di; | struct vm_domainset_iter di; | ||||
uma_domain_t dom; | uma_domain_t dom; | ||||
uma_slab_t slab; | uma_slab_t slab; | ||||
uma_keg_t keg; | uma_keg_t keg; | ||||
int domain, flags, slabs; | int domain, flags, slabs; | ||||
keg = zone_first_keg(zone); | KEG_GET(zone, keg); | ||||
if (keg == NULL) | |||||
return; | |||||
KEG_LOCK(keg); | KEG_LOCK(keg); | ||||
slabs = items / keg->uk_ipers; | slabs = items / keg->uk_ipers; | ||||
if (slabs * keg->uk_ipers < items) | if (slabs * keg->uk_ipers < items) | ||||
slabs++; | slabs++; | ||||
flags = M_WAITOK; | flags = M_WAITOK; | ||||
vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, &flags); | vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, &flags); | ||||
while (slabs-- > 0) { | while (slabs-- > 0) { | ||||
slab = keg_alloc_slab(keg, zone, domain, flags); | slab = keg_alloc_slab(keg, zone, domain, flags); | ||||
▲ Show 20 Lines • Show All 72 Lines • ▼ Show 20 Lines | |||||
/* See uma.h */ | /* See uma.h */ | ||||
int | int | ||||
uma_zone_exhausted(uma_zone_t zone) | uma_zone_exhausted(uma_zone_t zone) | ||||
{ | { | ||||
int full; | int full; | ||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
full = (zone->uz_flags & UMA_ZFLAG_FULL); | full = zone->uz_sleepers > 0; | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
return (full); | return (full); | ||||
} | } | ||||
int | int | ||||
uma_zone_exhausted_nolock(uma_zone_t zone) | uma_zone_exhausted_nolock(uma_zone_t zone) | ||||
{ | { | ||||
return (zone->uz_flags & UMA_ZFLAG_FULL); | return (zone->uz_sleepers > 0); | ||||
} | } | ||||
void * | void * | ||||
uma_large_malloc_domain(vm_size_t size, int domain, int wait) | uma_large_malloc_domain(vm_size_t size, int domain, int wait) | ||||
{ | { | ||||
struct domainset *policy; | struct domainset *policy; | ||||
vm_offset_t addr; | vm_offset_t addr; | ||||
uma_slab_t slab; | uma_slab_t slab; | ||||
▲ Show 20 Lines • Show All 103 Lines • ▼ Show 20 Lines | |||||
static void | static void | ||||
uma_print_keg(uma_keg_t keg) | uma_print_keg(uma_keg_t keg) | ||||
{ | { | ||||
uma_domain_t dom; | uma_domain_t dom; | ||||
uma_slab_t slab; | uma_slab_t slab; | ||||
int i; | int i; | ||||
printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " | printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " | ||||
"out %d free %d limit %d\n", | "out %d free %d\n", | ||||
keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, | keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, | ||||
keg->uk_ipers, keg->uk_ppera, | keg->uk_ipers, keg->uk_ppera, | ||||
(keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, | (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, | ||||
keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); | keg->uk_free); | ||||
for (i = 0; i < vm_ndomains; i++) { | for (i = 0; i < vm_ndomains; i++) { | ||||
dom = &keg->uk_domain[i]; | dom = &keg->uk_domain[i]; | ||||
printf("Part slabs:\n"); | printf("Part slabs:\n"); | ||||
LIST_FOREACH(slab, &dom->ud_part_slab, us_link) | LIST_FOREACH(slab, &dom->ud_part_slab, us_link) | ||||
slab_print(slab); | slab_print(slab); | ||||
printf("Free slabs:\n"); | printf("Free slabs:\n"); | ||||
LIST_FOREACH(slab, &dom->ud_free_slab, us_link) | LIST_FOREACH(slab, &dom->ud_free_slab, us_link) | ||||
slab_print(slab); | slab_print(slab); | ||||
printf("Full slabs:\n"); | printf("Full slabs:\n"); | ||||
LIST_FOREACH(slab, &dom->ud_full_slab, us_link) | LIST_FOREACH(slab, &dom->ud_full_slab, us_link) | ||||
slab_print(slab); | slab_print(slab); | ||||
} | } | ||||
} | } | ||||
void | void | ||||
uma_print_zone(uma_zone_t zone) | uma_print_zone(uma_zone_t zone) | ||||
{ | { | ||||
uma_cache_t cache; | uma_cache_t cache; | ||||
uma_klink_t kl; | |||||
int i; | int i; | ||||
printf("zone: %s(%p) size %d flags %#x\n", | printf("zone: %s(%p) size %d maxitems %lu flags %#x\n", | ||||
zone->uz_name, zone, zone->uz_size, zone->uz_flags); | zone->uz_name, zone, zone->uz_size, zone->uz_max_items, | ||||
LIST_FOREACH(kl, &zone->uz_kegs, kl_link) | zone->uz_flags); | ||||
uma_print_keg(kl->kl_keg); | if (zone->uz_lockptr != &zone->uz_lock) | ||||
uma_print_keg(zone->uz_keg); | |||||
CPU_FOREACH(i) { | CPU_FOREACH(i) { | ||||
cache = &zone->uz_cpu[i]; | cache = &zone->uz_cpu[i]; | ||||
printf("CPU %d Cache:\n", i); | printf("CPU %d Cache:\n", i); | ||||
cache_print(cache); | cache_print(cache); | ||||
} | } | ||||
} | } | ||||
#ifdef DDB | #ifdef DDB | ||||
▲ Show 20 Lines • Show All 62 Lines • ▼ Show 20 Lines | |||||
sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) | sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct uma_stream_header ush; | struct uma_stream_header ush; | ||||
struct uma_type_header uth; | struct uma_type_header uth; | ||||
struct uma_percpu_stat *ups; | struct uma_percpu_stat *ups; | ||||
uma_zone_domain_t zdom; | uma_zone_domain_t zdom; | ||||
struct sbuf sbuf; | struct sbuf sbuf; | ||||
uma_cache_t cache; | uma_cache_t cache; | ||||
uma_klink_t kl; | |||||
uma_keg_t kz; | uma_keg_t kz; | ||||
uma_zone_t z; | uma_zone_t z; | ||||
uma_keg_t k; | |||||
int count, error, i; | int count, error, i; | ||||
error = sysctl_wire_old_buffer(req, 0); | error = sysctl_wire_old_buffer(req, 0); | ||||
if (error != 0) | if (error != 0) | ||||
return (error); | return (error); | ||||
sbuf_new_for_sysctl(&sbuf, NULL, 128, req); | sbuf_new_for_sysctl(&sbuf, NULL, 128, req); | ||||
sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); | sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); | ||||
ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK); | ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK); | ||||
Show All 17 Lines | sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) | ||||
LIST_FOREACH(kz, &uma_kegs, uk_link) { | LIST_FOREACH(kz, &uma_kegs, uk_link) { | ||||
LIST_FOREACH(z, &kz->uk_zones, uz_link) { | LIST_FOREACH(z, &kz->uk_zones, uz_link) { | ||||
bzero(&uth, sizeof(uth)); | bzero(&uth, sizeof(uth)); | ||||
ZONE_LOCK(z); | ZONE_LOCK(z); | ||||
strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); | strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); | ||||
uth.uth_align = kz->uk_align; | uth.uth_align = kz->uk_align; | ||||
uth.uth_size = kz->uk_size; | uth.uth_size = kz->uk_size; | ||||
uth.uth_rsize = kz->uk_rsize; | uth.uth_rsize = kz->uk_rsize; | ||||
LIST_FOREACH(kl, &z->uz_kegs, kl_link) { | uth.uth_pages += (z->uz_items / kz->uk_ipers) * | ||||
k = kl->kl_keg; | kz->uk_ppera; | ||||
uth.uth_maxpages += k->uk_maxpages; | uth.uth_maxpages += (z->uz_max_items / kz->uk_ipers) * | ||||
uth.uth_pages += k->uk_pages; | kz->uk_ppera; | ||||
uth.uth_keg_free += k->uk_free; | uth.uth_limit = z->uz_max_items; | ||||
uth.uth_limit = (k->uk_maxpages / k->uk_ppera) | uth.uth_keg_free += z->uz_keg->uk_free; | ||||
* k->uk_ipers; | |||||
} | |||||
/* | /* | ||||
* A zone is secondary is it is not the first entry | * A zone is secondary is it is not the first entry | ||||
* on the keg's zone list. | * on the keg's zone list. | ||||
*/ | */ | ||||
if ((z->uz_flags & UMA_ZONE_SECONDARY) && | if ((z->uz_flags & UMA_ZONE_SECONDARY) && | ||||
(LIST_FIRST(&kz->uk_zones) != z)) | (LIST_FIRST(&kz->uk_zones) != z)) | ||||
uth.uth_zone_flags = UTH_ZONE_SECONDARY; | uth.uth_zone_flags = UTH_ZONE_SECONDARY; | ||||
▲ Show 20 Lines • Show All 80 Lines • ▼ Show 20 Lines | uma_dbg_getslab(uma_zone_t zone, void *item) | ||||
if (zone->uz_flags & UMA_ZONE_VTOSLAB) { | if (zone->uz_flags & UMA_ZONE_VTOSLAB) { | ||||
slab = vtoslab((vm_offset_t)mem); | slab = vtoslab((vm_offset_t)mem); | ||||
} else { | } else { | ||||
/* | /* | ||||
* It is safe to return the slab here even though the | * It is safe to return the slab here even though the | ||||
* zone is unlocked because the item's allocation state | * zone is unlocked because the item's allocation state | ||||
* essentially holds a reference. | * essentially holds a reference. | ||||
*/ | */ | ||||
if (zone->uz_lockptr == &zone->uz_lock) | |||||
return (NULL); | |||||
ZONE_LOCK(zone); | ZONE_LOCK(zone); | ||||
keg = LIST_FIRST(&zone->uz_kegs)->kl_keg; | keg = zone->uz_keg; | ||||
if (keg->uk_flags & UMA_ZONE_HASH) | if (keg->uk_flags & UMA_ZONE_HASH) | ||||
slab = hash_sfind(&keg->uk_hash, mem); | slab = hash_sfind(&keg->uk_hash, mem); | ||||
else | else | ||||
slab = (uma_slab_t)(mem + keg->uk_pgoff); | slab = (uma_slab_t)(mem + keg->uk_pgoff); | ||||
ZONE_UNLOCK(zone); | ZONE_UNLOCK(zone); | ||||
} | } | ||||
return (slab); | return (slab); | ||||
} | } | ||||
static bool | static bool | ||||
uma_dbg_zskip(uma_zone_t zone, void *mem) | uma_dbg_zskip(uma_zone_t zone, void *mem) | ||||
{ | { | ||||
uma_keg_t keg; | |||||
if ((keg = zone_first_keg(zone)) == NULL) | if (zone->uz_lockptr == &zone->uz_lock) | ||||
return (true); | return (true); | ||||
return (uma_dbg_kskip(keg, mem)); | return (uma_dbg_kskip(zone->uz_keg, mem)); | ||||
} | } | ||||
static bool | static bool | ||||
uma_dbg_kskip(uma_keg_t keg, void *mem) | uma_dbg_kskip(uma_keg_t keg, void *mem) | ||||
{ | { | ||||
uintptr_t idx; | uintptr_t idx; | ||||
if (dbg_divisor == 0) | if (dbg_divisor == 0) | ||||
▲ Show 20 Lines • Show All 142 Lines • Show Last 20 Lines |