Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c +++ sys/vm/uma_core.c @@ -3697,12 +3697,14 @@ * SMR Zones can't re-use the free bucket until the sequence has * expired. */ +#ifndef KASAN if ((cache_uz_flags(cache) & UMA_ZONE_SMR) == 0 && cache->uc_freebucket.ucb_cnt != 0) { cache_bucket_swap(&cache->uc_freebucket, &cache->uc_allocbucket); return (true); } +#endif /* * Discard any empty allocation bucket while we hold no locks. @@ -4361,27 +4363,13 @@ return (NULL); } -/* See uma.h */ -void -uma_zfree_smr(uma_zone_t zone, void *item) +static __always_inline bool +cache_free_fifo(uma_zone_t zone, int uz_flags, void *item, void *udata) { uma_cache_t cache; uma_cache_bucket_t bucket; - int itemdomain, uz_flags; + int itemdomain; - CTR3(KTR_UMA, "uma_zfree_smr zone %s(%p) item %p", - zone->uz_name, zone, item); - -#ifdef UMA_ZALLOC_DEBUG - KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0, - ("uma_zfree_smr: called with non-SMR zone.")); - KASSERT(item != NULL, ("uma_zfree_smr: Called with NULL pointer.")); - SMR_ASSERT_NOT_ENTERED(zone->uz_smr); - if (uma_zfree_debug(zone, item, NULL) == EJUSTRETURN) - return; -#endif - cache = &zone->uz_cpu[curcpu]; - uz_flags = cache_uz_flags(cache); itemdomain = 0; #ifdef NUMA if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) @@ -4401,11 +4389,80 @@ if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) { cache_bucket_push(cache, bucket, item); critical_exit(); - return; + return (true); + } + } while (cache_free(zone, cache, udata, itemdomain)); + critical_exit(); + + return (false); +} + +static __always_inline bool +cache_free_lifo(uma_zone_t zone, int uz_flags, void *item, void *udata) +{ + uma_cache_t cache; + uma_cache_bucket_t bucket; + int itemdomain; + + itemdomain = 0; +#ifdef NUMA + if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) + itemdomain = item_domain(item); +#endif + critical_enter(); + do { + cache = &zone->uz_cpu[curcpu]; + /* + * Try to free into the allocbucket first to give LIFO + * ordering for cache-hot datastructures. Spill over + * into the freebucket if necessary. Alloc will swap + * them if one runs dry. + */ + bucket = &cache->uc_allocbucket; +#ifdef NUMA + if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && + PCPU_GET(domain) != itemdomain) { + bucket = &cache->uc_crossbucket; + } else +#endif + if (bucket->ucb_cnt == bucket->ucb_entries && + cache->uc_freebucket.ucb_cnt < + cache->uc_freebucket.ucb_entries) + cache_bucket_swap(&cache->uc_freebucket, + &cache->uc_allocbucket); + if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) { + cache_bucket_push(cache, bucket, item); + critical_exit(); + return (true); } - } while (cache_free(zone, cache, NULL, itemdomain)); + } while (cache_free(zone, cache, udata, itemdomain)); critical_exit(); + return (false); +} + +/* See uma.h */ +void +uma_zfree_smr(uma_zone_t zone, void *item) +{ + int uz_flags; + + CTR3(KTR_UMA, "uma_zfree_smr zone %s(%p) item %p", + zone->uz_name, zone, item); + +#ifdef UMA_ZALLOC_DEBUG + KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0, + ("uma_zfree_smr: called with non-SMR zone.")); + KASSERT(item != NULL, ("uma_zfree_smr: Called with NULL pointer.")); + SMR_ASSERT_NOT_ENTERED(zone->uz_smr); + if (uma_zfree_debug(zone, item, NULL) == EJUSTRETURN) + return; +#endif + + uz_flags = cache_uz_flags(&zone->uz_cpu[curcpu]); + if (cache_free_fifo(zone, uz_flags, item, NULL)) + return; + /* * If nothing else caught this, we'll just do an internal free. */ @@ -4417,8 +4474,7 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata) { uma_cache_t cache; - uma_cache_bucket_t bucket; - int itemdomain, uz_flags; + int uz_flags; /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); @@ -4467,39 +4523,13 @@ * current cache; when we re-acquire the critical section, we must * detect and handle migration if it has occurred. */ - itemdomain = 0; -#ifdef NUMA - if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) - itemdomain = item_domain(item); -#endif - critical_enter(); - do { - cache = &zone->uz_cpu[curcpu]; - /* - * Try to free into the allocbucket first to give LIFO - * ordering for cache-hot datastructures. Spill over - * into the freebucket if necessary. Alloc will swap - * them if one runs dry. - */ - bucket = &cache->uc_allocbucket; -#ifdef NUMA - if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && - PCPU_GET(domain) != itemdomain) { - bucket = &cache->uc_crossbucket; - } else +#ifdef KASAN + if (cache_free_fifo(zone, uz_flags, item, udata)) + return; +#else + if (cache_free_lifo(zone, uz_flags, item, udata)) + return; #endif - if (bucket->ucb_cnt == bucket->ucb_entries && - cache->uc_freebucket.ucb_cnt < - cache->uc_freebucket.ucb_entries) - cache_bucket_swap(&cache->uc_freebucket, - &cache->uc_allocbucket); - if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) { - cache_bucket_push(cache, bucket, item); - critical_exit(); - return; - } - } while (cache_free(zone, cache, udata, itemdomain)); - critical_exit(); /* * If nothing else caught this, we'll just do an internal free.