Index: sys/conf/files =================================================================== --- sys/conf/files +++ sys/conf/files @@ -4942,6 +4942,7 @@ vm/swap_pager.c standard vm/uma_core.c standard vm/uma_dbg.c standard +vm/uma_smr.c standard vm/memguard.c optional DEBUG_MEMGUARD vm/vm_domainset.c standard vm/vm_fault.c standard Index: sys/vm/uma.h =================================================================== --- sys/vm/uma.h +++ sys/vm/uma.h @@ -263,6 +263,17 @@ */ #define UMA_ZONE_FIRSTTOUCH 0x10000 /* First touch NUMA policy */ #define UMA_ZONE_ROUNDROBIN 0x20000 /* Round-robin NUMA policy. */ +#define UMA_ZONE_SMR 0x40000 /* + * Safe memory reclamation defers + * frees until all read sections + * have exited. This flag creates + * a unique SMR context for this + * zone. To share contexts see + * uma_zone_set_smr() below. + * + * See sys/vm/uma_smr.h for more + * details. + */ /* In use by UMA_ZFLAGs: 0xffe00000 */ /* @@ -273,7 +284,7 @@ #define UMA_ZONE_INHERIT \ (UMA_ZONE_NOTOUCH | UMA_ZONE_MALLOC | UMA_ZONE_NOFREE | \ UMA_ZONE_NOTPAGE | UMA_ZONE_PCPU | UMA_ZONE_FIRSTTOUCH | \ - UMA_ZONE_ROUNDROBIN) + UMA_ZONE_ROUNDROBIN | UMA_ZONE_SMR) /* Definitions for align */ #define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */ @@ -600,6 +611,17 @@ void uma_zone_set_freef(uma_zone_t zone, uma_free freef); +/* + * Associate a zone with a smr context that is allocated after creation + * so that multiple zones may share the same context. + */ + +struct uma_smr; +typedef struct uma_smr * uma_smr_t; + +void uma_zone_set_smr(uma_zone_t zone, uma_smr_t smr); +uma_smr_t uma_zone_get_smr(uma_zone_t zone); + /* * These flags are setable in the allocf and visible in the freef. */ Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c +++ sys/vm/uma_core.c @@ -92,6 +92,7 @@ #include #include #include +#include #include #include @@ -454,9 +455,9 @@ uma_bucket_t bucket; /* - * Don't allocate buckets in low memory situations. + * Don't allocate buckets early in boot. */ - if (bucketdisable) + if (__predict_false(booted < BOOT_KVA)) return (NULL); /* @@ -488,6 +489,8 @@ #endif bucket->ub_cnt = 0; bucket->ub_entries = ubz->ubz_entries; + CTR3(KTR_UMA, "bucket_alloc: zone %s(%p) allocated bucket %p", + zone->uz_name, zone, bucket); } return (bucket); @@ -527,6 +530,11 @@ ZONE_LOCK_ASSERT(zone); if ((bucket = TAILQ_FIRST(&zdom->uzd_buckets)) != NULL) { + if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && bucket->ub_epoch) { + if (!uma_smr_poll(zone->uz_smr, bucket->ub_epoch, false)) + return (NULL); + bucket->ub_epoch = 0; + } MPASS(zdom->uzd_nitems >= bucket->ub_cnt); TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link); zdom->uzd_nitems -= bucket->ub_cnt; @@ -551,10 +559,7 @@ KASSERT(!ws || zone->uz_bkt_count < zone->uz_bkt_max, ("%s: zone %p overflow", __func__, zone)); - if (ws) - TAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); - else - TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link); + TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link); zdom->uzd_nitems += bucket->ub_cnt; if (ws && zdom->uzd_imax < zdom->uzd_nitems) zdom->uzd_imax = zdom->uzd_nitems; @@ -941,12 +946,17 @@ if (bucket == NULL || bucket->ub_cnt == 0) return; + if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && bucket->ub_epoch) + uma_smr_wait(zone->uz_smr, bucket->ub_epoch); if (zone->uz_fini) for (i = 0; i < bucket->ub_cnt; i++) zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); if (zone->uz_max_items > 0) zone_free_limit(zone, bucket->ub_cnt); +#ifdef INVARIANTS + bzero(bucket->ub_bucket, sizeof(void *) * bucket->ub_cnt); +#endif bucket->ub_cnt = 0; } @@ -1035,12 +1045,21 @@ zone_put_bucket(zone, &zone->uz_domain[domain], b1, false); b1 = NULL; } + + /* + * Don't flush SMR zone buckets. This leaves the zone without a + * bucket and forces every free to synchronize(). + */ + if ((zone->uz_flags & UMA_ZONE_SMR) != 0) + goto out; b2 = cache_bucket_unload_free(cache); if (b2 != NULL && b2->ub_cnt != 0) { zone_put_bucket(zone, &zone->uz_domain[domain], b2, false); b2 = NULL; } b3 = cache_bucket_unload_cross(cache); + +out: critical_exit(); ZONE_UNLOCK(zone); if (b1) @@ -2356,6 +2375,8 @@ ZONE_LOCK(zone); LIST_FOREACH(z, &keg->uk_zones, uz_link) { if (LIST_NEXT(z, uz_link) == NULL) { + /* Keg zones share smr state. */ + zone->uz_smr = z->uz_smr; LIST_INSERT_AFTER(z, zone, uz_link); break; } @@ -2399,6 +2420,11 @@ zone->uz_fails = EARLY_COUNTER; } + /* Caller requests a private SMR context. */ + if ((zone->uz_flags & (UMA_ZONE_SMR | UMA_ZONE_SECONDARY)) == + UMA_ZONE_SMR) + zone->uz_smr = uma_smr_create(zone->uz_name); + KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) != (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET), ("Invalid zone flag combination")); @@ -2600,6 +2626,7 @@ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); bucket_init(); + uma_smr_init(); } #ifndef UMA_MD_SMALL_ALLOC @@ -3014,9 +3041,14 @@ /* * If we have run out of items in our alloc bucket see * if we can switch with the free bucket. + * + * SMR Zones can't re-use the free bucket until the epoch has + * expired. */ - if (cache->uc_freebucket.ucb_cnt != 0) { - cache_bucket_swap(&cache->uc_freebucket, &cache->uc_allocbucket); + if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && + cache->uc_freebucket.ucb_cnt != 0) { + cache_bucket_swap(&cache->uc_freebucket, + &cache->uc_allocbucket); return (true); } @@ -3697,6 +3729,13 @@ critical_enter(); do { cache = &zone->uz_cpu[curcpu]; + /* + * Try to free into the allocbucket first to give LIFO + * ordering for cache-hot datastructures. Spill over + * into the freebucket if necessary. Alloc will swap + * them if one runs dry. + */ + bucket = &cache->uc_allocbucket; #ifdef NUMA domain = PCPU_GET(domain); if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && @@ -3704,18 +3743,13 @@ bucket = &cache->uc_crossbucket; } else #endif - { - /* - * Try to free into the allocbucket first to give LIFO - * ordering for cache-hot datastructures. Spill over - * into the freebucket if necessary. Alloc will swap - * them if one runs dry. - */ - bucket = &cache->uc_allocbucket; - if (__predict_false(bucket->ucb_cnt >= - bucket->ucb_entries)) - bucket = &cache->uc_freebucket; - } + /* + * If this is a SMR zone or the allocbucket is entry we use + * the free bucket. + */ + if (__predict_false((uz_flags & UMA_ZONE_SMR) != 0 || + bucket->ucb_cnt >= bucket->ucb_entries)) + bucket = &cache->uc_freebucket; if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) { cache_bucket_push(cache, bucket, item); critical_exit(); @@ -3862,15 +3896,16 @@ int itemdomain) { uma_cache_bucket_t cbucket; - uma_bucket_t bucket; + uma_bucket_t newbucket, bucket; int domain; CRITICAL_ASSERT(curthread); - if (zone->uz_bucket_size == 0 || bucketdisable) + if (zone->uz_bucket_size == 0) return false; cache = &zone->uz_cpu[curcpu]; + newbucket = NULL; /* * FIRSTTOUCH domains need to free to the correct zdom. When @@ -3895,14 +3930,29 @@ /* We are no longer associated with this CPU. */ critical_exit(); + /* + * Don't let SMR zones operate without a free bucket. Force + * a synchronize and re-use this one. We will only degrade + * to a synchronize every bucket_size items rather than every + * item if we fail to allocate a bucket. + */ + if ((zone->uz_flags & UMA_ZONE_SMR) != 0) { + if (bucket != NULL) + bucket->ub_epoch = uma_smr_advance(zone->uz_smr); + newbucket = bucket_alloc(zone, udata, M_NOWAIT); + if (newbucket == NULL && bucket != NULL) { + bucket_drain(zone, bucket); + newbucket = bucket; + bucket = NULL; + } + } else if (!bucketdisable) + newbucket = bucket_alloc(zone, udata, M_NOWAIT); + if (bucket != NULL) zone_free_bucket(zone, bucket, udata, domain, itemdomain); - bucket = bucket_alloc(zone, udata, M_NOWAIT); - CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p", - zone->uz_name, zone, bucket); critical_enter(); - if (bucket == NULL) + if ((bucket = newbucket) == NULL) return (false); cache = &zone->uz_cpu[curcpu]; #ifdef NUMA @@ -4031,6 +4081,15 @@ zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) { + /* + * If a free is sent directly to an SMR zone we have to + * synchronize immediately because the item can instantly + * be reallocated. This should only happen in degenerate + * cases when no memory is available for per-cpu caches. + */ + if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && skip == SKIP_DTOR) + uma_smr_synchronize(zone->uz_smr); + item_dtor(zone, item, zone->uz_size, udata, skip); if (skip < SKIP_FINI && zone->uz_fini) @@ -4255,6 +4314,31 @@ keg->uk_allocf = allocf; } +/* See uma.h */ +void +uma_zone_set_smr(uma_zone_t zone, uma_smr_t smr) +{ + uma_keg_t keg; + + ZONE_ASSERT_COLD(zone); + + KEG_GET(zone, keg); + KEG_ASSERT_COLD(keg); + keg->uk_flags |= UMA_ZONE_SMR; + ZONE_LOCK(zone); + zone->uz_flags |= UMA_ZONE_SMR; + zone->uz_smr = smr; + zone_update_caches(zone); + ZONE_UNLOCK(zone); +} + +uma_smr_t +uma_zone_get_smr(uma_zone_t zone) +{ + + return (zone->uz_smr); +} + /* See uma.h */ void uma_zone_reserve(uma_zone_t zone, int items) Index: sys/vm/uma_int.h =================================================================== --- sys/vm/uma_int.h +++ sys/vm/uma_int.h @@ -247,6 +247,7 @@ TAILQ_ENTRY(uma_bucket) ub_link; /* Link into the zone */ int16_t ub_cnt; /* Count of items in bucket. */ int16_t ub_entries; /* Max items. */ + uint32_t ub_epoch; /* epoch value for SMR. */ void *ub_bucket[]; /* actual allocation storage */ }; @@ -529,6 +530,8 @@ struct sysctl_oid *uz_oid; /* sysctl oid pointer. */ int uz_namecnt; /* duplicate name count. */ + uma_smr_t uz_smr; /* Safe memory reclaim context. */ + /* * This HAS to be the last item because we adjust the zone size * based on NCPU and then allocate the space for the zones. Index: sys/vm/uma_smr.h =================================================================== --- /dev/null +++ sys/vm/uma_smr.h @@ -0,0 +1,132 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2019 Jeffrey Roberson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + * + */ + +#ifndef _VM_UMA_SMR_H_ +#define _VM_UMA_SMR_H_ + +typedef uint32_t uma_epoch_t; + +/* + * Safe memory reclamation. See uma_smr.c for a description of the + * algorithm. + * + * Readers synchronize with uma_smr_enter()/exit() and writers may either + * free directly to a SMR UMA zone or use uma_smr_synchronize or wait. + */ + +/* Global SMR state. */ +struct uma_smr_g { + const char *smrg_name; /* Name for debugging/reporting. */ + volatile uma_epoch_t smrg_epoch; /* Newest global epoch value. */ + volatile uma_epoch_t smrg_epoch_min; /* Minimum observed epoch. */ +}; +typedef struct uma_smr_g * uma_smr_g_t; + +/* Per-cpu SMR state. */ +struct uma_smr { + volatile uma_epoch_t smr_epoch; /* Last epoch value. */ + uma_smr_g_t smr_global; /* Global SMR state. */ +}; + +/* + * Enter a read section. + */ +static inline void +uma_smr_enter(uma_smr_t smr) +{ + uma_smr_g_t smrg; + + critical_enter(); + smr = zpcpu_get(smr); + smrg = smr->smr_global; + KASSERT(smr->smr_epoch == 0, + ("uma_smr_enter(%s) does not support recursion.", + smrg->smrg_name)); + + /* Acquire barrier to synchronize with smr_exit and smr_wait(). */ + atomic_add_acq_int(&smr->smr_epoch, smrg->smrg_epoch); +} + +/* + * Exit a safe memory reclamation section. + */ +static inline void +uma_smr_exit(uma_smr_t smr) +{ + uma_smr_g_t smrg; + + smr = zpcpu_get(smr); + smrg = smr->smr_global; + CRITICAL_ASSERT(curthread); + KASSERT(smr->smr_epoch != 0, + ("uma_smr_exit(%s) not in a smr section.", smrg->smrg_name)); + + /* + * Release from above and to retire memory references accessed + * in the recorded epoch. + */ + atomic_store_rel_int(&smr->smr_epoch, 0); + critical_exit(); +} + +/* + * Synchronize returns when all readers have observed the current + * epoch. + */ +void uma_smr_synchronize(uma_smr_t smr); + +/* + * Advances the epoch to indicate a write. Returns the goal epoch + * required to ensure that all modifications are visible. + */ +uma_epoch_t uma_smr_advance(uma_smr_t smr); + +/* + * Returns true if a goal epoch has been reached. If + * wait is true it will busy loop until success. + */ +bool uma_smr_poll(uma_smr_t smr, uma_epoch_t goal, bool wait); + +static inline bool +uma_smr_wait(uma_smr_t smr, uma_epoch_t goal) +{ + + return (uma_smr_poll(smr, goal, true)); +} + +/* Create a new SMR context. */ +uma_smr_t uma_smr_create(const char *name); +void uma_smr_destroy(uma_smr_t smr); + +/* Only at startup. */ +void uma_smr_init(void); + +#endif /* _VM_UMA_SMR_H_ */ Index: sys/vm/uma_smr.c =================================================================== --- /dev/null +++ sys/vm/uma_smr.c @@ -0,0 +1,295 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2019 Jeffrey Roberson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * This is a novel safe memory reclamation technique inspired by + * epoch based reclamation from Samy Al Bahra's concurrency kit which + * in turn was based on work described in: + * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University + * of Cambridge Computing Laboratory. + * + * This is not an implementation of hazard pointers or related + * techniques. The term safe memory reclamation is used as a + * generic descriptor. + * + * This approach could be called 'unbounded epoch' because we + * do not maintain the invariant that only 2 epochs are active. + * This tradeoff allows us to decouple advancing the epoch from + * waiting for threads to observe the epoch. Which further allows + * deferring the expiration check until long after it is likely + * to require busy loops and many epochs may be batched and expired + * by a single scan. Waits for expired epochs may return immediately. + * + * This implements a two handed clock approach to epoch values. One + * hand, the global epoch, is incremented every time a write changes + * the state of the system. Another hand, min_epoch, keeps track of + * the oldest write that was observed by any cpu. Goal epochs that + * fall outside of this range have expired and wrapped. Special + * consideration is given to safely handle wrapping and stale epoch + * values. + * + * Writers that can naturally advance the epoch and wait for it to + * expire sometime later will batch together in a single wait. If + * this time exceeds the zsmr_enter/exit section time the scan will + * never spinwait, allowing effective overlapping of latencies and + * relatively few traversals. + * + * By integrating with the allocator we avoid all of the callout + * queue machinery and are provided with an efficient way to batch + * epoch advancement and waiting. The allocator accumulates a full + * per-cpu cache of memory before advancing the epoch. It then + * delays waiting for this epoch to expire until the memory is + * selected for reuse. In this way we only increment the epoch + * value every 1/(cache size) frees and the waits are done long + * after the epoch has been expired so they need only be verified + * to account for pathological conditions. + * + * If the read overhead of accessing the global cacheline becomes + * burdensome an invariant TSC could be used in place of the + * epoch. The algorithm would then only need to maintain the minimum + * observed tsc. This would trade potential cache synchronization + * overhead for local serialization and computational expense. + */ +uma_zone_t uma_smr_g_zone; +uma_zone_t uma_smr_zone; + +/* + * Modular arithmetic for comparing epoch numbers that have + * potentially wrapped. Copied from tcp_seq.h. + */ +#define EPOCH_LT(a, b) ((int)((a)-(b)) < 0) +#define EPOCH_LEQ(a, b) ((int)((a)-(b)) <= 0) +#define EPOCH_GT(a, b) ((int)((a)-(b)) > 0) +#define EPOCH_GEQ(a, b) ((int)((a)-(b)) >= 0) + +#ifdef INVARIANTS +#define EPOCH_INIT 1 +#define EPOCH_INCR 2 +#else +/* We want to test the wrapping feature in invariants kernels. */ +#define EPOCH_INCR (UINT_MAX / 10000) +#define EPOCH_INIT (UINT_MAX - 100000) +#endif + +/* + * Advance the global epoch and return the new value for use as the + * wait goal. This guarantees that any changes made by the calling + * thread prior to this call will be visible to all threads after + * epoch_min meets or exceeds the return value. + */ +uma_epoch_t +uma_smr_advance(uma_smr_t smr) +{ + uma_smr_g_t g; + uma_epoch_t goal; + + /* + * Modifications not done in a smr section need to be visible + * before advancing the epoch. + */ + atomic_thread_fence_rel(); + + /* + * Increment the global epoch value by 2. Since the epoch is + * initialized to 1 this means the only valid epoch values are + * odd and an observed value of 0 in a particular CPU means it + * is not currently in an epoch section. + */ + g = smr->smr_global; + goal = atomic_fetchadd_int(&g->smrg_epoch, EPOCH_INCR) + EPOCH_INCR; + + /* + * Force a synchronization here if the goal is getting too + * far ahead of the minimum observed epoch. This keeps the + * wrap detecting arithmetic working in pathological cases. + */ + if (goal - g->smrg_epoch_min == INT_MAX / 2) + uma_smr_wait(smr, goal); + + return (goal); +} + +/* + * Loop updating epoch_min if it is below goal. If wait is true + * this will spin until the goal is met. + * + * Returns true if the goal is met and false if not. + */ +bool +uma_smr_poll(uma_smr_t smr, uma_epoch_t goal, bool wait) +{ + uma_smr_g_t g; + uma_smr_t c; + uma_epoch_t epoch, min, e; + int i; + bool success; + + /* + * Use a critical section so that we can avoid ABA races + * caused by long preemption sleeps. + */ + critical_enter(); + g = smr->smr_global; + + /* + * Acquire barrier loads epoch after min so that we can not + * observe an updated minimum that is larger than the epoch. + */ + min = atomic_load_acq_int(&g->smrg_epoch_min); + + /* + * Load epoch prior to reading any of the per-cpu epochs to prevent + * stale comparisons. + */ + epoch = atomic_load_acq_int(&g->smrg_epoch); + + /* + * Detect whether the goal epoch has already been observed. + * + * The goal must be in the range of epoch >= goal >= epoch_min for + * it to be valid. If it is not then the caller held on to it and + * the integer wrapped. If we wrapped back within range the caller + * will harmlessly scan. + */ + if (EPOCH_GEQ(min, goal) || EPOCH_LT(epoch, goal)) { + critical_exit(); + return (true); + } + + /* + * Loop until all cores have observed the goal epoch + * or have gone inactive. Keep track of the oldest + * epoch currently active. + */ + success = true; + min = epoch; + CPU_FOREACH(i) { + c = zpcpu_get_cpu(smr, i); + for (;;) { + e = smr->smr_epoch; + if (e == 0) + break; + + /* + * Check to see if another thread has + * advanced the epoch while being careful + * to handle integer wrapping. + */ + if (EPOCH_GEQ(e, goal)) { + if (EPOCH_GT(min, e)) + min = e; + break; + } + if (!wait) { + success = false; + break; + } + cpu_spinwait(); + } + } + + /* + * Advance the min observed epoch as long as we observed the most + * recent one. + */ + epoch = g->smrg_epoch_min; + do { + if (EPOCH_LEQ(min, epoch)) + break; + } while (atomic_fcmpset_int(&g->smrg_epoch_min, &epoch, min) == 0); + critical_exit(); + + return (success); +} + +void +uma_smr_synchronize(uma_smr_t smr) +{ + uma_epoch_t goal; + + goal = uma_smr_advance(smr); + uma_smr_poll(smr, goal, true); +} + +uma_smr_t +uma_smr_create(const char *name) +{ + uma_smr_t smr, c; + uma_smr_g_t g; + int i; + + g = uma_zalloc(uma_smr_g_zone, M_WAITOK); + smr = uma_zalloc(uma_smr_zone, M_WAITOK); + + g->smrg_name = name; + g->smrg_epoch_min = g->smrg_epoch = EPOCH_INIT; + /* Initialize all CPUS, not just those running. */ + for (i = 0; i <= mp_maxid; i++) { + c = zpcpu_get_cpu(smr, i); + c->smr_epoch = 0; + c->smr_global = g; + } + atomic_thread_fence_seq_cst(); + + return (smr); +} + +void +uma_smr_destroy(uma_smr_t smr) +{ + + uma_smr_synchronize(smr); + uma_zfree(uma_smr_g_zone, smr->smr_global); + uma_zfree(uma_smr_zone, smr); +} + +/* + * Initialize the UMA slab zone. + */ +void +uma_smr_init(void) +{ + + uma_smr_g_zone = uma_zcreate("SMR GLOBAL", sizeof(struct uma_smr_g), + NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0); + uma_smr_zone = uma_zcreate("SMR CPU", sizeof(struct uma_smr), + NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU); +}