Page MenuHomeFreeBSD

D22586.id65086.diff
No OneTemporary

D22586.id65086.diff

Index: sys/conf/files
===================================================================
--- sys/conf/files
+++ sys/conf/files
@@ -4929,6 +4929,7 @@
vm/swap_pager.c standard
vm/uma_core.c standard
vm/uma_dbg.c standard
+vm/uma_smr.c standard
vm/memguard.c optional DEBUG_MEMGUARD
vm/vm_domainset.c standard
vm/vm_fault.c standard
Index: sys/vm/uma.h
===================================================================
--- sys/vm/uma.h
+++ sys/vm/uma.h
@@ -273,6 +273,17 @@
* effort first-touch policy.
*/
#define UMA_ZONE_MINBUCKET 0x20000 /* Use smallest buckets. */
+#define UMA_ZONE_SMR 0x40000 /*
+ * Safe memory reclamation defers
+ * frees until all read sections
+ * have exited. This flag creates
+ * a unique SMR context for this
+ * zone. To share contexts see
+ * uma_zone_set_smr() below.
+ *
+ * See sys/vm/uma_smr.h for more
+ * details.
+ */
/*
* These flags are shared between the keg and zone. In zones wishing to add
@@ -281,7 +292,8 @@
*/
#define UMA_ZONE_INHERIT \
(UMA_ZONE_OFFPAGE | UMA_ZONE_MALLOC | UMA_ZONE_NOFREE | \
- UMA_ZONE_HASH | UMA_ZONE_VTOSLAB | UMA_ZONE_PCPU | UMA_ZONE_NUMA)
+ UMA_ZONE_HASH | UMA_ZONE_VTOSLAB | UMA_ZONE_PCPU | UMA_ZONE_NUMA | \
+ UMA_ZONE_SMR)
/* Definitions for align */
#define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */
@@ -608,6 +620,17 @@
void uma_zone_set_freef(uma_zone_t zone, uma_free freef);
+/*
+ * Associate a zone with a smr context that is allocated after creation
+ * so that multiple zones may share the same context.
+ */
+
+struct uma_smr;
+typedef struct uma_smr * uma_smr_t;
+
+void uma_zone_set_smr(uma_zone_t zone, uma_smr_t smr);
+uma_smr_t uma_zone_get_smr(uma_zone_t zone);
+
/*
* These flags are setable in the allocf and visible in the freef.
*/
Index: sys/vm/uma_core.c
===================================================================
--- sys/vm/uma_core.c
+++ sys/vm/uma_core.c
@@ -91,6 +91,7 @@
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <vm/uma.h>
+#include <vm/uma_smr.h>
#include <vm/uma_int.h>
#include <vm/uma_dbg.h>
@@ -419,11 +420,9 @@
/*
* This is to stop us from allocating per cpu buckets while we're
- * running out of vm.boot_pages. Otherwise, we would exhaust the
- * boot pages. This also prevents us from allocating buckets in
- * low memory situations.
+ * allocating from vm.boot_pages.
*/
- if (bucketdisable)
+ if (__predict_false(booted != BOOT_RUNNING))
return (NULL);
/*
* To limit bucket recursion we store the original zone flags
@@ -454,6 +453,8 @@
#endif
bucket->ub_cnt = 0;
bucket->ub_entries = ubz->ubz_entries;
+ CTR3(KTR_UMA, "bucket_alloc: zone %s(%p) allocated bucket %p",
+ zone->uz_name, zone, bucket);
}
return (bucket);
@@ -493,6 +494,11 @@
ZONE_LOCK_ASSERT(zone);
if ((bucket = TAILQ_FIRST(&zdom->uzd_buckets)) != NULL) {
+ if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && bucket->ub_epoch) {
+ if (!uma_smr_poll(zone->uz_smr, bucket->ub_epoch, false))
+ return (NULL);
+ bucket->ub_epoch = 0;
+ }
MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
zdom->uzd_nitems -= bucket->ub_cnt;
@@ -517,10 +523,7 @@
KASSERT(!ws || zone->uz_bkt_count < zone->uz_bkt_max,
("%s: zone %p overflow", __func__, zone));
- if (ws)
- TAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
- else
- TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
+ TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
zdom->uzd_nitems += bucket->ub_cnt;
if (ws && zdom->uzd_imax < zdom->uzd_nitems)
zdom->uzd_imax = zdom->uzd_nitems;
@@ -764,10 +767,15 @@
if (bucket == NULL)
return;
+ if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && bucket->ub_epoch)
+ uma_smr_wait(zone->uz_smr, bucket->ub_epoch);
if (zone->uz_fini)
for (i = 0; i < bucket->ub_cnt; i++)
zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
+#ifdef INVARIANTS
+ bzero(bucket->ub_bucket, sizeof(void *) * bucket->ub_cnt);
+#endif
if (zone->uz_max_items > 0) {
ZONE_LOCK(zone);
zone->uz_items -= bucket->ub_cnt;
@@ -868,6 +876,14 @@
b1 = cache->uc_allocbucket;
cache->uc_allocbucket = NULL;
}
+
+ /*
+ * Don't flush SMR zone buckets. This leaves the zone without a
+ * bucket and forces every free to synchronize().
+ */
+ if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
+ goto out;
+
if (cache->uc_freebucket) {
if (cache->uc_freebucket->ub_cnt != 0)
zone_put_bucket(zone, &zone->uz_domain[domain],
@@ -878,6 +894,7 @@
}
b3 = cache->uc_crossbucket;
cache->uc_crossbucket = NULL;
+out:
critical_exit();
ZONE_UNLOCK(zone);
if (b1)
@@ -2070,6 +2087,8 @@
ZONE_LOCK(zone);
LIST_FOREACH(z, &keg->uk_zones, uz_link) {
if (LIST_NEXT(z, uz_link) == NULL) {
+ /* Keg zones share smr state. */
+ zone->uz_smr = z->uz_smr;
LIST_INSERT_AFTER(z, zone, uz_link);
break;
}
@@ -2113,6 +2132,11 @@
zone->uz_fails = EARLY_COUNTER;
}
+ /* Caller requests a private SMR context. */
+ if ((zone->uz_flags & (UMA_ZONE_SMR | UMA_ZONE_SECONDARY)) ==
+ UMA_ZONE_SMR)
+ zone->uz_smr = uma_smr_create(zone->uz_name);
+
KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
(UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
("Invalid zone flag combination"));
@@ -2375,6 +2399,8 @@
printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
#endif
booted = BOOT_PAGEALLOC;
+
+ uma_smr_init();
}
void
@@ -2784,9 +2810,13 @@
/*
* If we have run out of items in our alloc bucket see
* if we can switch with the free bucket.
+ *
+ * SMR Zones can't re-use the free bucket until the epoch has
+ * expired.
*/
bucket = cache->uc_freebucket;
- if (bucket != NULL && bucket->ub_cnt != 0) {
+ if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && bucket != NULL &&
+ bucket->ub_cnt != 0) {
cache->uc_freebucket = cache->uc_allocbucket;
cache->uc_allocbucket = bucket;
return (true);
@@ -2863,8 +2893,6 @@
* Fill a bucket and attempt to use it as the alloc bucket.
*/
bucket = zone_alloc_bucket(zone, udata, domain, flags);
- CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
- zone->uz_name, zone, bucket);
critical_enter();
if (bucket == NULL)
return (false);
@@ -3346,7 +3374,6 @@
do {
cpu = curcpu;
cache = &zone->uz_cpu[cpu];
- bucket = cache->uc_allocbucket;
#ifdef UMA_XDOMAIN
if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
@@ -3357,13 +3384,17 @@
bucket = cache->uc_crossbucket;
} else
#endif
-
/*
* Try to free into the allocbucket first to give LIFO ordering
* for cache-hot datastructures. Spill over into the freebucket
* if necessary. Alloc will swap them if one runs dry.
*/
- if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
+ if ((zone->uz_flags & UMA_ZONE_SMR) == 0) {
+ bucket = cache->uc_allocbucket;
+ if (bucket == NULL ||
+ bucket->ub_cnt >= bucket->ub_entries)
+ bucket = cache->uc_freebucket;
+ } else
bucket = cache->uc_freebucket;
if (__predict_true(bucket != NULL &&
bucket->ub_cnt < bucket->ub_entries)) {
@@ -3445,16 +3476,17 @@
cache_free(uma_zone_t zone, uma_cache_t cache, void *udata, void *item,
int itemdomain)
{
- uma_bucket_t bucket;
+ uma_bucket_t bucket, newbucket;
int cpu, domain;
CRITICAL_ASSERT(curthread);
- if (zone->uz_bucket_size == 0 || bucketdisable)
+ if (zone->uz_bucket_size == 0)
return false;
cpu = curcpu;
cache = &zone->uz_cpu[cpu];
+ newbucket = NULL;
/*
* NUMA domains need to free to the correct zdom. When XDOMAIN
@@ -3486,14 +3518,29 @@
/* We are no longer associated with this CPU. */
critical_exit();
+ /*
+ * Don't let SMR zones operate without a free bucket. Force
+ * a synchronize and re-use this one. We will only degrade
+ * to a synchronize every bucket_size items rather than every
+ * item if we fail to allocate a bucket.
+ */
+ if ((zone->uz_flags & UMA_ZONE_SMR) != 0) {
+ if (bucket != NULL)
+ bucket->ub_epoch = uma_smr_advance(zone->uz_smr);
+ newbucket = bucket_alloc(zone, udata, M_NOWAIT);
+ if (newbucket == NULL && bucket != NULL) {
+ bucket_drain(zone, bucket);
+ newbucket = bucket;
+ bucket = NULL;
+ }
+ } else if (!bucketdisable)
+ newbucket = bucket_alloc(zone, udata, M_NOWAIT);
+
if (bucket != NULL)
zone_free_bucket(zone, bucket, udata, domain, itemdomain);
- bucket = bucket_alloc(zone, udata, M_NOWAIT);
- CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
- zone->uz_name, zone, bucket);
critical_enter();
- if (bucket == NULL)
+ if ((bucket = newbucket) == NULL)
return (false);
cpu = curcpu;
cache = &zone->uz_cpu[cpu];
@@ -3615,6 +3662,15 @@
zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
{
+ /*
+ * If a free is sent directly to an SMR zone we have to
+ * synchronize immediately because the item can instantly
+ * be reallocated. This should only happen in degenerate
+ * cases when no memory is available for per-cpu caches.
+ */
+ if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && skip == SKIP_DTOR)
+ uma_smr_synchronize(zone->uz_smr);
+
item_dtor(zone, item, udata, skip);
if (skip < SKIP_FINI && zone->uz_fini)
@@ -3870,6 +3926,29 @@
KEG_UNLOCK(keg);
}
+/* See uma.h */
+void
+uma_zone_set_smr(uma_zone_t zone, uma_smr_t smr)
+{
+ uma_keg_t keg;
+
+ ZONE_LOCK(zone);
+ zone->uz_flags |= UMA_ZONE_SMR;
+ zone->uz_smr = smr;
+ ZONE_UNLOCK(zone);
+ KEG_GET(zone, keg);
+ KEG_LOCK(keg);
+ keg->uk_flags |= UMA_ZONE_SMR;
+ KEG_UNLOCK(keg);
+}
+
+uma_smr_t
+uma_zone_get_smr(uma_zone_t zone)
+{
+
+ return (zone->uz_smr);
+}
+
/* See uma.h */
void
uma_zone_reserve(uma_zone_t zone, int items)
Index: sys/vm/uma_int.h
===================================================================
--- sys/vm/uma_int.h
+++ sys/vm/uma_int.h
@@ -182,6 +182,7 @@
TAILQ_ENTRY(uma_bucket) ub_link; /* Link into the zone */
int16_t ub_cnt; /* Count of items in bucket. */
int16_t ub_entries; /* Max items. */
+ uint32_t ub_epoch; /* epoch value for SMR. */
void *ub_bucket[]; /* actual allocation storage */
};
@@ -415,6 +416,8 @@
struct sysctl_oid *uz_oid; /* sysctl oid pointer. */
int uz_namecnt; /* duplicate name count. */
+ uma_smr_t uz_smr; /* Safe memory reclaim context. */
+
/*
* This HAS to be the last item because we adjust the zone size
* based on NCPU and then allocate the space for the zones.
Index: sys/vm/uma_smr.h
===================================================================
--- /dev/null
+++ sys/vm/uma_smr.h
@@ -0,0 +1,132 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Jeffrey Roberson <jeff@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _VM_UMA_SMR_H_
+#define _VM_UMA_SMR_H_
+
+typedef uint32_t uma_epoch_t;
+
+/*
+ * Safe memory reclamation. See uma_smr.c for a description of the
+ * algorithm.
+ *
+ * Readers synchronize with uma_smr_enter()/exit() and writers may either
+ * free directly to a SMR UMA zone or use uma_smr_synchronize or wait.
+ */
+
+/* Global SMR state. */
+struct uma_smr_g {
+ const char *smrg_name; /* Name for debugging/reporting. */
+ volatile uma_epoch_t smrg_epoch; /* Newest global epoch value. */
+ volatile uma_epoch_t smrg_epoch_min; /* Minimum observed epoch. */
+};
+typedef struct uma_smr_g * uma_smr_g_t;
+
+/* Per-cpu SMR state. */
+struct uma_smr {
+ volatile uma_epoch_t smr_epoch; /* Last epoch value. */
+ uma_smr_g_t smr_global; /* Global SMR state. */
+};
+
+/*
+ * Enter a read section.
+ */
+static inline void
+uma_smr_enter(uma_smr_t smr)
+{
+ uma_smr_g_t smrg;
+
+ critical_enter();
+ smr = zpcpu_get(smr);
+ smrg = smr->smr_global;
+ KASSERT(smr->smr_epoch == 0,
+ ("uma_smr_enter(%s) does not support recursion.",
+ smrg->smrg_name));
+
+ /* Acquire barrier to synchronize with smr_exit and smr_wait(). */
+ atomic_add_acq_int(&smr->smr_epoch, smrg->smrg_epoch);
+}
+
+/*
+ * Exit a safe memory reclamation section.
+ */
+static inline void
+uma_smr_exit(uma_smr_t smr)
+{
+ uma_smr_g_t smrg;
+
+ smr = zpcpu_get(smr);
+ smrg = smr->smr_global;
+ CRITICAL_ASSERT(curthread);
+ KASSERT(smr->smr_epoch != 0,
+ ("uma_smr_exit(%s) not in a smr section.", smrg->smrg_name));
+
+ /*
+ * Release from above and to retire memory references accessed
+ * in the recorded epoch.
+ */
+ atomic_store_rel_int(&smr->smr_epoch, 0);
+ critical_exit();
+}
+
+/*
+ * Synchronize returns when all readers have observed the current
+ * epoch.
+ */
+void uma_smr_synchronize(uma_smr_t smr);
+
+/*
+ * Advances the epoch to indicate a write. Returns the goal epoch
+ * required to ensure that all modifications are visible.
+ */
+uma_epoch_t uma_smr_advance(uma_smr_t smr);
+
+/*
+ * Returns true if a goal epoch has been reached. If
+ * wait is true it will busy loop until success.
+ */
+bool uma_smr_poll(uma_smr_t smr, uma_epoch_t goal, bool wait);
+
+static inline bool
+uma_smr_wait(uma_smr_t smr, uma_epoch_t goal)
+{
+
+ return (uma_smr_poll(smr, goal, true));
+}
+
+/* Create a new SMR context. */
+uma_smr_t uma_smr_create(const char *name);
+void uma_smr_destroy(uma_smr_t smr);
+
+/* Only at startup. */
+void uma_smr_init(void);
+
+#endif /* _VM_UMA_SMR_H_ */
Index: sys/vm/uma_smr.c
===================================================================
--- /dev/null
+++ sys/vm/uma_smr.c
@@ -0,0 +1,295 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Jeffrey Roberson <jeff@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/limits.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/smp.h>
+
+#include <vm/uma.h>
+#include <vm/uma_smr.h>
+
+/*
+ * This is a novel safe memory reclamation technique inspired by
+ * epoch based reclamation from Samy Al Bahra's concurrency kit which
+ * in turn was based on work described in:
+ * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
+ * of Cambridge Computing Laboratory.
+ *
+ * This is not an implementation of hazard pointers or related
+ * techniques. The term safe memory reclamation is used as a
+ * generic descriptor.
+ *
+ * This approach could be called 'unbounded epoch' because we
+ * do not maintain the invariant that only 2 epochs are active.
+ * This tradeoff allows us to decouple advancing the epoch from
+ * waiting for threads to observe the epoch. Which further allows
+ * deferring the expiration check until long after it is likely
+ * to require busy loops and many epochs may be batched and expired
+ * by a single scan. Waits for expired epochs may return immediately.
+ *
+ * This implements a two handed clock approach to epoch values. One
+ * hand, the global epoch, is incremented every time a write changes
+ * the state of the system. Another hand, min_epoch, keeps track of
+ * the oldest write that was observed by any cpu. Goal epochs that
+ * fall outside of this range have expired and wrapped. Special
+ * consideration is given to safely handle wrapping and stale epoch
+ * values.
+ *
+ * Writers that can naturally advance the epoch and wait for it to
+ * expire sometime later will batch together in a single wait. If
+ * this time exceeds the zsmr_enter/exit section time the scan will
+ * never spinwait, allowing effective overlapping of latencies and
+ * relatively few traversals.
+ *
+ * By integrating with the allocator we avoid all of the callout
+ * queue machinery and are provided with an efficient way to batch
+ * epoch advancement and waiting. The allocator accumulates a full
+ * per-cpu cache of memory before advancing the epoch. It then
+ * delays waiting for this epoch to expire until the memory is
+ * selected for reuse. In this way we only increment the epoch
+ * value every 1/(cache size) frees and the waits are done long
+ * after the epoch has been expired so they need only be verified
+ * to account for pathological conditions.
+ *
+ * If the read overhead of accessing the global cacheline becomes
+ * burdensome an invariant TSC could be used in place of the
+ * epoch. The algorithm would then only need to maintain the minimum
+ * observed tsc. This would trade potential cache synchronization
+ * overhead for local serialization and computational expense.
+ */
+uma_zone_t uma_smr_g_zone;
+uma_zone_t uma_smr_zone;
+
+/*
+ * Modular arithmetic for comparing epoch numbers that have
+ * potentially wrapped. Copied from tcp_seq.h.
+ */
+#define EPOCH_LT(a, b) ((int)((a)-(b)) < 0)
+#define EPOCH_LEQ(a, b) ((int)((a)-(b)) <= 0)
+#define EPOCH_GT(a, b) ((int)((a)-(b)) > 0)
+#define EPOCH_GEQ(a, b) ((int)((a)-(b)) >= 0)
+
+#ifdef INVARIANTS
+#define EPOCH_INIT 1
+#define EPOCH_INCR 2
+#else
+/* We want to test the wrapping feature in invariants kernels. */
+#define EPOCH_INCR (UINT_MAX / 10000)
+#define EPOCH_INIT (UINT_MAX - 100000)
+#endif
+
+/*
+ * Advance the global epoch and return the new value for use as the
+ * wait goal. This guarantees that any changes made by the calling
+ * thread prior to this call will be visible to all threads after
+ * epoch_min meets or exceeds the return value.
+ */
+uma_epoch_t
+uma_smr_advance(uma_smr_t smr)
+{
+ uma_smr_g_t g;
+ uma_epoch_t goal;
+
+ /*
+ * Modifications not done in a smr section need to be visible
+ * before advancing the epoch.
+ */
+ atomic_thread_fence_rel();
+
+ /*
+ * Increment the global epoch value by 2. Since the epoch is
+ * initialized to 1 this means the only valid epoch values are
+ * odd and an observed value of 0 in a particular CPU means it
+ * is not currently in an epoch section.
+ */
+ g = smr->smr_global;
+ goal = atomic_fetchadd_int(&g->smrg_epoch, EPOCH_INCR) + EPOCH_INCR;
+
+ /*
+ * Force a synchronization here if the goal is getting too
+ * far ahead of the minimum observed epoch. This keeps the
+ * wrap detecting arithmetic working in pathological cases.
+ */
+ if (goal - g->smrg_epoch_min == INT_MAX / 2)
+ uma_smr_wait(smr, goal);
+
+ return (goal);
+}
+
+/*
+ * Loop updating epoch_min if it is below goal. If wait is true
+ * this will spin until the goal is met.
+ *
+ * Returns true if the goal is met and false if not.
+ */
+bool
+uma_smr_poll(uma_smr_t smr, uma_epoch_t goal, bool wait)
+{
+ uma_smr_g_t g;
+ uma_smr_t c;
+ uma_epoch_t epoch, min, e;
+ int i;
+ bool success;
+
+ /*
+ * Use a critical section so that we can avoid ABA races
+ * caused by long preemption sleeps.
+ */
+ critical_enter();
+ g = smr->smr_global;
+
+ /*
+ * Acquire barrier loads epoch after min so that we can not
+ * observe an updated minimum that is larger than the epoch.
+ */
+ min = atomic_load_acq_int(&g->smrg_epoch_min);
+
+ /*
+ * Load epoch prior to reading any of the per-cpu epochs to prevent
+ * stale comparisons.
+ */
+ epoch = atomic_load_acq_int(&g->smrg_epoch);
+
+ /*
+ * Detect whether the goal epoch has already been observed.
+ *
+ * The goal must be in the range of epoch >= goal >= epoch_min for
+ * it to be valid. If it is not then the caller held on to it and
+ * the integer wrapped. If we wrapped back within range the caller
+ * will harmlessly scan.
+ */
+ if (EPOCH_GEQ(min, goal) || EPOCH_LT(epoch, goal)) {
+ critical_exit();
+ return (true);
+ }
+
+ /*
+ * Loop until all cores have observed the goal epoch
+ * or have gone inactive. Keep track of the oldest
+ * epoch currently active.
+ */
+ success = true;
+ min = epoch;
+ CPU_FOREACH(i) {
+ c = zpcpu_get_cpu(smr, i);
+ for (;;) {
+ e = smr->smr_epoch;
+ if (e == 0)
+ break;
+
+ /*
+ * Check to see if another thread has
+ * advanced the epoch while being careful
+ * to handle integer wrapping.
+ */
+ if (EPOCH_GEQ(e, goal)) {
+ if (EPOCH_GT(min, e))
+ min = e;
+ break;
+ }
+ if (!wait) {
+ success = false;
+ break;
+ }
+ cpu_spinwait();
+ }
+ }
+
+ /*
+ * Advance the min observed epoch as long as we observed the most
+ * recent one.
+ */
+ epoch = g->smrg_epoch_min;
+ do {
+ if (EPOCH_LEQ(min, epoch))
+ break;
+ } while (atomic_fcmpset_int(&g->smrg_epoch_min, &epoch, min) == 0);
+ critical_exit();
+
+ return (success);
+}
+
+void
+uma_smr_synchronize(uma_smr_t smr)
+{
+ uma_epoch_t goal;
+
+ goal = uma_smr_advance(smr);
+ uma_smr_poll(smr, goal, true);
+}
+
+uma_smr_t
+uma_smr_create(const char *name)
+{
+ uma_smr_t smr, c;
+ uma_smr_g_t g;
+ int i;
+
+ g = uma_zalloc(uma_smr_g_zone, M_WAITOK);
+ smr = uma_zalloc(uma_smr_zone, M_WAITOK);
+
+ g->smrg_name = name;
+ g->smrg_epoch_min = g->smrg_epoch = EPOCH_INIT;
+ /* Initialize all CPUS, not just those running. */
+ for (i = 0; i <= mp_maxid; i++) {
+ c = zpcpu_get_cpu(smr, i);
+ c->smr_epoch = 0;
+ c->smr_global = g;
+ }
+ atomic_thread_fence_seq_cst();
+
+ return (smr);
+}
+
+void
+uma_smr_destroy(uma_smr_t smr)
+{
+
+ uma_smr_synchronize(smr);
+ uma_zfree(uma_smr_g_zone, smr->smr_global);
+ uma_zfree(uma_smr_zone, smr);
+}
+
+/*
+ * Initialize the UMA slab zone.
+ */
+void
+uma_smr_init(void)
+{
+
+ uma_smr_g_zone = uma_zcreate("SMR GLOBAL", sizeof(struct uma_smr_g),
+ NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0);
+ uma_smr_zone = uma_zcreate("SMR CPU", sizeof(struct uma_smr),
+ NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU);
+}

File Metadata

Mime Type
text/plain
Expires
Sun, Apr 12, 2:14 PM (11 m, 5 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
31352749
Default Alt Text
D22586.id65086.diff (23 KB)

Event Timeline