Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/uma_core.c
Show First 20 Lines • Show All 788 Lines • ▼ Show 20 Lines | cache_drain(uma_zone_t zone) | ||||
* | * | ||||
* XXX: It would good to be able to assert that the zone is being | * XXX: It would good to be able to assert that the zone is being | ||||
* torn down to prevent improper use of cache_drain(). | * torn down to prevent improper use of cache_drain(). | ||||
* | * | ||||
* XXX: We lock the zone before passing into bucket_cache_reclaim() as | * XXX: We lock the zone before passing into bucket_cache_reclaim() as | ||||
* it is used elsewhere. Should the tear-down path be made special | * it is used elsewhere. Should the tear-down path be made special | ||||
* there in some form? | * there in some form? | ||||
*/ | */ | ||||
CPU_FOREACH(cpu) { | for (i = 0; i < mp_maxid + 1; i++) { | ||||
cache = &zone->uz_cpu[cpu]; | cache = &zone->uz_cpu[cpu]; | ||||
bucket_drain(zone, cache->uc_allocbucket); | bucket_drain(zone, cache->uc_allocbucket); | ||||
if (cache->uc_allocbucket != NULL) | if (cache->uc_allocbucket != NULL) | ||||
bucket_free(zone, cache->uc_allocbucket, NULL); | bucket_free(zone, cache->uc_allocbucket, NULL); | ||||
cache->uc_allocbucket = NULL; | cache->uc_allocbucket = NULL; | ||||
bucket_drain(zone, cache->uc_freebucket); | bucket_drain(zone, cache->uc_freebucket); | ||||
if (cache->uc_freebucket != NULL) | if (cache->uc_freebucket != NULL) | ||||
bucket_free(zone, cache->uc_freebucket, NULL); | bucket_free(zone, cache->uc_freebucket, NULL); | ||||
▲ Show 20 Lines • Show All 3,244 Lines • ▼ Show 20 Lines | sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) | ||||
return (sysctl_handle_int(oidp, &count, 0, req)); | return (sysctl_handle_int(oidp, &count, 0, req)); | ||||
} | } | ||||
static void | static void | ||||
uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf, | uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf, | ||||
struct uma_percpu_stat *ups, bool internal) | struct uma_percpu_stat *ups, bool internal) | ||||
{ | { | ||||
uma_zone_domain_t zdom; | uma_zone_domain_t zdom; | ||||
uma_bucket_t bucket; | |||||
uma_cache_t cache; | uma_cache_t cache; | ||||
int i; | int i; | ||||
for (i = 0; i < vm_ndomains; i++) { | for (i = 0; i < vm_ndomains; i++) { | ||||
zdom = &z->uz_domain[i]; | zdom = &z->uz_domain[i]; | ||||
uth->uth_zone_free += zdom->uzd_nitems; | uth->uth_zone_free += zdom->uzd_nitems; | ||||
} | } | ||||
uth->uth_allocs = counter_u64_fetch(z->uz_allocs); | uth->uth_allocs = counter_u64_fetch(z->uz_allocs); | ||||
uth->uth_frees = counter_u64_fetch(z->uz_frees); | uth->uth_frees = counter_u64_fetch(z->uz_frees); | ||||
uth->uth_fails = counter_u64_fetch(z->uz_fails); | uth->uth_fails = counter_u64_fetch(z->uz_fails); | ||||
uth->uth_sleeps = z->uz_sleeps; | uth->uth_sleeps = z->uz_sleeps; | ||||
uth->uth_xdomain = z->uz_xdomain; | uth->uth_xdomain = z->uz_xdomain; | ||||
/* | /* | ||||
* While it is not normally safe to access the cache | * While it is not normally safe to access the cache | ||||
* bucket pointers while not on the CPU that owns the | * bucket pointers while not on the CPU that owns the | ||||
* cache, we only allow the pointers to be exchanged | * cache, we only allow the pointers to be exchanged | ||||
* without the zone lock held, not invalidated, so | * without the zone lock held, not invalidated, so | ||||
* accept the possible race associated with bucket | * accept the possible race associated with bucket | ||||
* exchange during monitoring. | * exchange during monitoring. | ||||
*/ | */ | ||||
for (i = 0; i < mp_maxid + 1; i++) { | CPU_FOREACH(i) { | ||||
bzero(&ups[i], sizeof(*ups)); | bzero(&ups[i], sizeof(*ups)); | ||||
if (internal || CPU_ABSENT(i)) | if (internal || CPU_ABSENT(i)) | ||||
continue; | continue; | ||||
cache = &z->uz_cpu[i]; | cache = &z->uz_cpu[i]; | ||||
if (cache->uc_allocbucket != NULL) | bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_allocbucket); | ||||
ups[i].ups_cache_free += | if (bucket != NULL) | ||||
cache->uc_allocbucket->ub_cnt; | ups[i].ups_cache_free += bucket->ub_cnt; | ||||
if (cache->uc_freebucket != NULL) | bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_freebucket); | ||||
ups[i].ups_cache_free += | if (bucket != NULL) | ||||
cache->uc_freebucket->ub_cnt; | ups[i].ups_cache_free += bucket->ub_cnt; | ||||
if (cache->uc_crossbucket != NULL) | bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_crossbucket); | ||||
ups[i].ups_cache_free += | if (bucket != NULL) | ||||
cache->uc_crossbucket->ub_cnt; | ups[i].ups_cache_free += bucket->ub_cnt; | ||||
ups[i].ups_allocs = cache->uc_allocs; | ups[i].ups_allocs = cache->uc_allocs; | ||||
ups[i].ups_frees = cache->uc_frees; | ups[i].ups_frees = cache->uc_frees; | ||||
} | } | ||||
} | } | ||||
static int | static int | ||||
sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) | sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 362 Lines • Show Last 20 Lines |