Index: sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c =================================================================== --- sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c +++ sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c @@ -165,7 +165,8 @@ ptls->zone = uma_zcache_create(ptls->zname, sizeof(struct mlx5e_tls_tag), NULL, NULL, NULL, NULL, - mlx5e_tls_tag_import, mlx5e_tls_tag_release, priv->mdev, 0); + mlx5e_tls_tag_import, mlx5e_tls_tag_release, priv->mdev, + UMA_ZONE_UNMANAGED); /* shared between RX and TX TLS */ ptls->max_resources = 1U << (MLX5_CAP_GEN(priv->mdev, log_max_dek) - 1); Index: sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c =================================================================== --- sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c +++ sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c @@ -459,7 +459,8 @@ ptls->zone = uma_zcache_create(ptls->zname, sizeof(struct mlx5e_tls_rx_tag), NULL, NULL, NULL, NULL, - mlx5e_tls_rx_tag_import, mlx5e_tls_rx_tag_release, priv->mdev, 0); + mlx5e_tls_rx_tag_import, mlx5e_tls_rx_tag_release, priv->mdev, + UMA_ZONE_UNMANAGED); /* shared between RX and TX TLS */ ptls->max_resources = 1U << (MLX5_CAP_GEN(priv->mdev, log_max_dek) - 1); Index: sys/vm/uma.h =================================================================== --- sys/vm/uma.h +++ sys/vm/uma.h @@ -233,6 +233,10 @@ * These flags share space with UMA_ZFLAGs in uma_int.h. Be careful not to * overlap when adding new features. */ +#define UMA_ZONE_UNMANAGED 0x0001 /* + * Don't regulate the cache size, even + * under memory pressure. + */ #define UMA_ZONE_ZINIT 0x0002 /* Initialize with zeros */ #define UMA_ZONE_CONTIG 0x0004 /* * Physical memory underlying an object Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c +++ sys/vm/uma_core.c @@ -1223,10 +1223,12 @@ trim: /* Trim caches not used for a long time. */ - for (int i = 0; i < vm_ndomains; i++) { - if (bucket_cache_reclaim_domain(zone, false, false, i) && - (zone->uz_flags & UMA_ZFLAG_CACHE) == 0) - keg_drain(zone->uz_keg, i); + if ((zone->uz_flags & UMA_ZONE_UNMANAGED) == 0) { + for (int i = 0; i < vm_ndomains; i++) { + if (bucket_cache_reclaim_domain(zone, false, false, i) && + (zone->uz_flags & UMA_ZFLAG_CACHE) == 0) + keg_drain(zone->uz_keg, i); + } } } @@ -1735,24 +1737,6 @@ ZONE_UNLOCK(zone); } -static void -zone_drain(uma_zone_t zone, void *arg) -{ - int domain; - - domain = (int)(uintptr_t)arg; - zone_reclaim(zone, domain, M_NOWAIT, true); -} - -static void -zone_trim(uma_zone_t zone, void *arg) -{ - int domain; - - domain = (int)(uintptr_t)arg; - zone_reclaim(zone, domain, M_NOWAIT, false); -} - /* * Allocate a new slab for a keg and inserts it into the partial slab list. * The keg should be unlocked on entry. If the allocation succeeds it will @@ -2870,6 +2854,10 @@ ("zone_ctor: Import specified for non-cache zone.")); zone->uz_flags = arg->flags; zone->uz_size = arg->size; +#if 0 /* XXXMJ */ + zone->uz_init = arg->uminit; + zone->uz_fini = arg->fini; +#endif zone->uz_import = arg->import; zone->uz_release = arg->release; zone->uz_arg = arg->arg; @@ -3548,7 +3536,8 @@ zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { counter_u64_add(zone->uz_fails, 1); - zone->uz_fini(item, zone->uz_size); + if (zone->uz_fini != NULL) + zone->uz_fini(item, zone->uz_size); *itemp = NULL; return (error); } @@ -5202,6 +5191,21 @@ return (sz * PAGE_SIZE); } +struct uma_reclaim_args { + int domain; + int req; +}; + +static void +uma_reclaim_domain_cb(uma_zone_t zone, void *arg) +{ + struct uma_reclaim_args *args; + + args = arg; + if ((zone->uz_flags & UMA_ZONE_UNMANAGED) == 0) + uma_zone_reclaim_domain(zone, args->req, args->domain); +} + /* See uma.h */ void uma_reclaim(int req) @@ -5212,23 +5216,23 @@ void uma_reclaim_domain(int req, int domain) { - void *arg; + struct uma_reclaim_args args; bucket_enable(); - arg = (void *)(uintptr_t)domain; + args.domain = domain; + args.req = req; + sx_slock(&uma_reclaim_lock); switch (req) { case UMA_RECLAIM_TRIM: - zone_foreach(zone_trim, arg); - break; case UMA_RECLAIM_DRAIN: - zone_foreach(zone_drain, arg); + zone_foreach(uma_reclaim_domain_cb, &args); break; case UMA_RECLAIM_DRAIN_CPU: - zone_foreach(zone_drain, arg); + zone_foreach(uma_reclaim_domain_cb, &args); pcpu_cache_drain_safe(NULL); - zone_foreach(zone_drain, arg); + zone_foreach(uma_reclaim_domain_cb, &args); break; default: panic("unhandled reclamation request %d", req); @@ -5239,8 +5243,8 @@ * we visit again so that we can free pages that are empty once other * zones are drained. We have to do the same for buckets. */ - zone_drain(slabzones[0], arg); - zone_drain(slabzones[1], arg); + uma_zone_reclaim_domain(slabzones[0], UMA_RECLAIM_DRAIN, domain); + uma_zone_reclaim_domain(slabzones[1], UMA_RECLAIM_DRAIN, domain); bucket_zone_drain(domain); sx_sunlock(&uma_reclaim_lock); } @@ -5288,14 +5292,14 @@ arg = (void *)(uintptr_t)domain; switch (req) { case UMA_RECLAIM_TRIM: - zone_trim(zone, arg); + zone_reclaim(zone, domain, M_NOWAIT, false); break; case UMA_RECLAIM_DRAIN: - zone_drain(zone, arg); + zone_reclaim(zone, domain, M_NOWAIT, true); break; case UMA_RECLAIM_DRAIN_CPU: pcpu_cache_drain_safe(zone); - zone_drain(zone, arg); + zone_reclaim(zone, domain, M_NOWAIT, true); break; default: panic("unhandled reclamation request %d", req);