Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F147934934
D22586.id67303.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
36 KB
Referenced Files
None
Subscribers
None
D22586.id67303.diff
View Options
Index: sys/conf/files
===================================================================
--- sys/conf/files
+++ sys/conf/files
@@ -3848,6 +3848,7 @@
kern/subr_sglist.c standard
kern/subr_sleepqueue.c standard
kern/subr_smp.c standard
+kern/subr_smr.c standard
kern/subr_stack.c optional ddb | stack | ktr
kern/subr_stats.c optional stats
kern/subr_taskqueue.c standard
Index: sys/kern/subr_smr.c
===================================================================
--- /dev/null
+++ sys/kern/subr_smr.c
@@ -0,0 +1,356 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Jeffrey Roberson <jeff@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/limits.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/smp.h>
+#include <sys/smr.h>
+
+#include <vm/uma.h>
+
+/*
+ * This is a novel safe memory reclamation technique inspired by
+ * epoch based reclamation from Samy Al Bahra's concurrency kit which
+ * in turn was based on work described in:
+ * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
+ * of Cambridge Computing Laboratory.
+ *
+ * This is not an implementation of hazard pointers or related
+ * techniques. The term safe memory reclamation is used as a
+ * generic descriptor for algorithms that defer frees to avoid
+ * use-after-free errors with lockless datastructures.
+ *
+ * The basic approach is to maintain a monotonic write sequence
+ * number that is updated on some application defined granularity.
+ * Readers record the most recent write sequence number they have
+ * observed. A global read sequence number records the lowest
+ * sequence number observed by any reader. Any write older than
+ * this value has been observed by all readers and memory can be
+ * reclaimed. Like epoch we also detect idle readers by storing
+ * an invalid sequence number in the per-cpu state when the read
+ * section is exited.
+ *
+ * The write sequence and read sequence number can be thought of as
+ * a two handed clock. SMR maintains the invariant that all readers
+ * can only see state that existed between these two values. Periodically
+ * the read sequence or hand is advanced towards the write sequence
+ * and memory is reclaimed. When the system is idle the two hands
+ * meet. Readers never advance the sequence number and the global
+ * read sequence number is never higher than the write sequence.
+ * A stored sequence number that falls outside of this range has
+ * expired and needs no scan to reclaim.
+ *
+ * A notable distinction between this SMR and epoch, qsbr, rcu, etc. is
+ * that advancing the sequence number is decoupled from detecting its
+ * observation. This results in a more granular assignment of sequence
+ * numbers even as read latencies prohibit all or some expiration.
+ * It also allows writers to advance the sequence number and save the
+ * poll for expiration until a later time when it is likely to
+ * complete without waiting. The batch granularity and free-to-use
+ * latency is dynamic and can be significantly smaller than in more
+ * strict systems.
+ *
+ * This mechanism is primarily intended to be used in coordination with
+ * UMA. By integrating with the allocator we avoid all of the callout
+ * queue machinery and are provided with an efficient way to batch
+ * sequence advancement and waiting. The allocator accumulates a full
+ * per-cpu cache of memory before advancing the sequence. It then
+ * delays waiting for this sequence to expire until the memory is
+ * selected for reuse. In this way we only increment the sequence
+ * value once for n=cache-size frees and the waits are done long
+ * after the sequence has been expired so they need only be verified
+ * to account for pathological conditions and to advance the read
+ * sequence. Tying the sequence number to the bucket size has the
+ * nice property that as the zone gets busier the buckets get larger
+ * and the sequence writes become fewer. If the coherency of advancing
+ * the write sequence number becomes too costly we can advance
+ * it for every N buckets in exchange for higher free-to-use
+ * latency and consequently higher memory consumption.
+ *
+ * If the read overhead of accessing the global cacheline becomes
+ * especially burdensome an invariant TSC could be used in place of the
+ * sequence. The algorithm would then only need to maintain the minimum
+ * observed tsc. This would trade potential cache synchronization
+ * overhead for local serialization and cpu timestamp overhead.
+ */
+
+/*
+ * A simplified diagram:
+ *
+ * 0 UINT_MAX
+ * | -------------------- sequence number space -------------------- |
+ * ^ rd seq ^ wr seq
+ * | ----- valid sequence numbers ---- |
+ * ^cpuA ^cpuC
+ * | -- free -- | --------- deferred frees -------- | ---- free ---- |
+ *
+ *
+ * In this example cpuA has the lowest sequence number and poll can
+ * advance rd seq. cpuB is not running and is considered to observe
+ * wr seq.
+ *
+ * Freed memory that is tagged with a sequence number between rd seq and
+ * wr seq can not be safely reclaimed because cpuA may hold a reference to
+ * it. Any other memory is guaranteed to be unreferenced.
+ *
+ * Any writer is free to advance wr seq at any time.
+ */
+
+static uma_zone_t smr_g_zone;
+static uma_zone_t smr_zone;
+
+#ifndef INVARIANTS
+#define SMR_SEQ_INIT 1 /* All valid sequence numbers are odd. */
+#define SMR_SEQ_INCR 2
+/*
+ * The maximum distance between rd_seq and wr_seq. For the modular
+ * arithmetic to work a value of UINT_MAX / 2 would be possible but
+ * it is checked after increment so this gives us UINT_MAX / 4 threads
+ * executing before we could actually overflow.
+ */
+#define SMR_SEQ_MAX_DELTA (UINT_MAX / 4)
+#else
+/* We want to test the wrapping feature in invariants kernels. */
+#define SMR_SEQ_INCR (UINT_MAX / 10000)
+#define SMR_SEQ_INIT (UINT_MAX - 100000)
+/* Force extra polls to test the integer overflow detection. */
+#define SMR_SEQ_MAX_DELTA (1000)
+#endif
+
+
+/*
+ * Advance the write sequence and return the new value for use as the
+ * wait goal. This guarantees that any changes made by the calling
+ * thread prior to this call will be visible to all threads after
+ * rd_seq meets or exceeds the return value.
+ */
+smr_seq_t
+smr_advance(smr_t smr)
+{
+ smr_g_t g;
+ smr_seq_t goal;
+
+ /*
+ * Modifications not done in a smr section need to be visible
+ * before advancing the seq.
+ */
+ atomic_thread_fence_rel();
+
+ /*
+ * Increment the global write sequence by 2. Since it is
+ * initialized to 1 this means the only valid values are
+ * odd and an observed value of 0 in a particular CPU means
+ * it is not currently in a read section.
+ */
+ g = smr->c_global;
+ goal = atomic_fetchadd_int(&g->g_wr_seq, SMR_SEQ_INCR) + SMR_SEQ_INCR;
+
+ /*
+ * Force a synchronization here if the goal is getting too
+ * far ahead of the read sequence number. This keeps the
+ * wrap detecting arithmetic working in pathological cases.
+ */
+ if (goal - atomic_load_int(&g->g_rd_seq) >= SMR_SEQ_MAX_DELTA)
+ smr_wait(smr, goal);
+
+ return (goal);
+}
+
+/*
+ * Poll to determine whether all readers have observed the 'goal' write
+ * sequence number.
+ *
+ * If wait is true this will spin until the goal is met.
+ *
+ * This routine will updated the minimum observed read sequence number in
+ * g_rd_seq if it does a scan. It may not do a scan if another call has
+ * advanced g_rd_seq beyond the callers goal already.
+ *
+ * Returns true if the goal is met and false if not.
+ */
+bool
+smr_poll(smr_t smr, smr_seq_t goal, bool wait)
+{
+ smr_g_t g;
+ smr_t c;
+ smr_seq_t g_wr_seq, g_rd_seq, rd_seq, c_seq;
+ int i;
+ bool success;
+
+ /*
+ * Use a critical section so that we can avoid ABA races
+ * caused by long preemption sleeps.
+ */
+ success = true;
+ critical_enter();
+ g = smr->c_global;
+
+ /*
+ * Acquire barrier loads g_wr_seq after g_rd_seq so that we can not
+ * observe an updated read sequence that is larger than write.
+ */
+ g_rd_seq = atomic_load_acq_int(&g->g_rd_seq);
+ g_wr_seq = atomic_load_int(&g->g_wr_seq);
+
+ /*
+ * Detect whether the goal has already been observed.
+ *
+ * The goal must be in the range of g_wr_seq >= goal >= g_rd_seq for
+ * it to be valid. If it is not then the caller held on to it and
+ * the integer wrapped. If we wrapped back within range the caller
+ * will harmlessly scan.
+ */
+ if (SMR_SEQ_GEQ(g_rd_seq, goal) || SMR_SEQ_LT(g_wr_seq, goal))
+ goto out;
+
+ /*
+ * Loop until all cores have observed the goal sequence or have
+ * gone inactive. Keep track of the oldest sequence currently
+ * active as rd_seq.
+ */
+ success = true;
+ rd_seq = g_wr_seq;
+ CPU_FOREACH(i) {
+ c = zpcpu_get_cpu(smr, i);
+ c_seq = 0;
+ for (;;) {
+ c_seq = atomic_load_int(&c->c_seq);
+ if (c_seq == SMR_SEQ_INVALID)
+ break;
+
+ /*
+ * There is a race described in smr.h:smr_enter that
+ * can lead to a stale seq value but not stale data
+ * access. If we find a value out of range here we
+ * pin it to the current min to prevent it from
+ * advancing until that stale section has expired.
+ *
+ * The race is created when a cpu loads the g_wr_seq
+ * value in a local register and then another thread
+ * advances g_wr_seq and calls smr_poll() which will
+ * oberve no value yet in c_seq and advance g_rd_seq
+ * up to g_wr_seq which is beyond the register
+ * cached value. This is only likely to happen on
+ * hypervisor or with a system management interrupt.
+ */
+ if (SMR_SEQ_LT(c_seq, g_rd_seq))
+ c_seq = g_rd_seq;
+
+ /*
+ * If the sequence number meets the goal we are
+ * done with this cpu.
+ */
+ if (SMR_SEQ_GEQ(c_seq, goal))
+ break;
+
+ /*
+ * If we're not waiting we will still scan the rest
+ * of the cpus and update g_rd_seq before returning
+ * an error.
+ */
+ if (!wait) {
+ success = false;
+ break;
+ }
+ cpu_spinwait();
+ }
+
+ /*
+ * Limit the minimum observed rd_seq whether we met the goal
+ * or not.
+ */
+ if (c_seq != SMR_SEQ_INVALID && SMR_SEQ_GT(rd_seq, c_seq))
+ rd_seq = c_seq;
+ }
+
+ /*
+ * Advance the rd_seq as long as we observed the most recent one.
+ */
+ g_rd_seq = atomic_load_int(&g->g_rd_seq);
+ do {
+ if (SMR_SEQ_LEQ(rd_seq, g_rd_seq))
+ break;
+ } while (atomic_fcmpset_int(&g->g_rd_seq, &g_rd_seq, rd_seq) == 0);
+
+out:
+ critical_exit();
+
+ return (success);
+}
+
+smr_t
+smr_create(const char *name)
+{
+ smr_t smr, c;
+ smr_g_t g;
+ int i;
+
+ g = uma_zalloc(smr_g_zone, M_WAITOK);
+ smr = uma_zalloc(smr_zone, M_WAITOK);
+
+ g->g_name = name;
+ g->g_rd_seq = g->g_wr_seq = SMR_SEQ_INIT;
+
+ /* Initialize all CPUS, not just those running. */
+ for (i = 0; i <= mp_maxid; i++) {
+ c = zpcpu_get_cpu(smr, i);
+ c->c_seq = SMR_SEQ_INVALID;
+ c->c_global = g;
+ }
+ atomic_thread_fence_seq_cst();
+
+ return (smr);
+}
+
+void
+smr_destroy(smr_t smr)
+{
+
+ smr_synchronize(smr);
+ uma_zfree(smr_g_zone, smr->c_global);
+ uma_zfree(smr_zone, smr);
+}
+
+/*
+ * Initialize the UMA slab zone.
+ */
+void
+smr_init(void)
+{
+
+ smr_g_zone = uma_zcreate("SMR GLOBAL", sizeof(struct smr_g),
+ NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0);
+ smr_zone = uma_zcreate("SMR CPU", sizeof(struct smr),
+ NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU);
+}
Index: sys/sys/smr.h
===================================================================
--- /dev/null
+++ sys/sys/smr.h
@@ -0,0 +1,187 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019,2020 Jeffrey Roberson <jeff@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _SYS_SMR_H_
+#define _SYS_SMR_H_
+
+/*
+ * Safe memory reclamation. See subr_smr.c for a description of the
+ * algorithm.
+ *
+ * Readers synchronize with smr_enter()/exit() and writers may either
+ * free directly to a SMR UMA zone or use smr_synchronize or wait.
+ */
+
+/* The sequence number is used to record the order and visibility of writes. */
+typedef uint32_t smr_seq_t;
+
+/*
+ * Modular arithmetic for comparing epoch numbers that have
+ * potentially wrapped. Copied from tcp_seq.h.
+ */
+#define SMR_SEQ_LT(a, b) ((int)((a)-(b)) < 0)
+#define SMR_SEQ_LEQ(a, b) ((int)((a)-(b)) <= 0)
+#define SMR_SEQ_GT(a, b) ((int)((a)-(b)) > 0)
+#define SMR_SEQ_GEQ(a, b) ((int)((a)-(b)) >= 0)
+
+#define SMR_SEQ_INVALID 0
+
+/* Global SMR state. */
+struct smr_g {
+ const char *g_name; /* Name for debugging/reporting. */
+ smr_seq_t g_wr_seq; /* Current global write sequence #. */
+ smr_seq_t g_rd_seq; /* Minimum observed read sequence. */
+};
+typedef struct smr_g *smr_g_t;
+
+/* Per-cpu SMR state. */
+struct smr {
+ smr_seq_t c_seq; /* Current observed sequence. */
+ smr_g_t c_global; /* Global SMR state. */
+};
+typedef struct smr *smr_t;
+
+/*
+ * Enter a read section.
+ */
+static inline void
+smr_enter(smr_t smr)
+{
+ smr_g_t g;
+ smr_seq_t wr_seq;
+
+ critical_enter();
+ smr = zpcpu_get(smr);
+ g = smr->c_global;
+ KASSERT(smr->c_seq == 0,
+ ("smr_enter(%s) does not support recursion.", g->g_name));
+
+ /*
+ * Store the current observed write sequence number in our
+ * per-cpu state so that it can be queried via smr_poll().
+ * Frees that are newer than this stored value will be
+ * deferred until we call smr_exit().
+ *
+ * An acquire barrier is used to synchronize with smr_exit()
+ * and smr_poll().
+ *
+ * It is possible that a long delay between loading the wr_seq
+ * and storing the c_seq could create a situation where the
+ * rd_seq advances beyond our stored c_seq. In this situation
+ * only the observed wr_seq is stale, the fence still orders
+ * the load. See smr_poll() for details on how this condition
+ * is detected and handled there.
+ */
+ wr_seq = atomic_load_int(&g->g_wr_seq);
+ /* This is an add because we do not have atomic_store_acq_int */
+ atomic_add_acq_int(&smr->c_seq, wr_seq);
+}
+
+/*
+ * Exit a read section.
+ */
+static inline void
+smr_exit(smr_t smr)
+{
+ smr_g_t g;
+
+ smr = zpcpu_get(smr);
+ g = smr->c_global;
+ CRITICAL_ASSERT(curthread);
+ KASSERT(smr->c_seq != 0,
+ ("smr_exit(%s) not in a smr section.", g->g_name));
+
+ /*
+ * Clear the recorded sequence number. This allows poll() to
+ * detect CPUs not in read sections.
+ *
+ * Use release semantics to retire any stores before the sequence
+ * number is cleared.
+ */
+ atomic_store_rel_int(&smr->c_seq, SMR_SEQ_INVALID);
+ critical_exit();
+}
+
+/*
+ * Advances the write sequence number. Returns the sequence number
+ * required to ensure that all modifications are visible to readers.
+ */
+smr_seq_t smr_advance(smr_t smr);
+
+/*
+ * Returns true if a goal sequence has been reached. If
+ * wait is true this will busy loop until success.
+ */
+bool smr_poll(smr_t smr, smr_seq_t goal, bool wait);
+
+/* Create a new SMR context. */
+smr_t smr_create(const char *name);
+void smr_destroy(smr_t smr);
+
+/*
+ * Blocking wait for all readers to observe 'goal'.
+ */
+static inline bool
+smr_wait(smr_t smr, smr_seq_t goal)
+{
+
+ return (smr_poll(smr, goal, true));
+}
+
+/*
+ * Synchronize advances the write sequence and returns when all
+ * readers have observed it.
+ *
+ * If your application can cache a sequence number returned from
+ * smr_advanced() and poll or wait at a later time there will
+ * be less chance of busy looping while waiting for readers.
+ */
+static inline void
+smr_synchronize(smr_t smr)
+{
+
+ smr_wait(smr, smr_advance(smr));
+}
+
+/*
+ * Return the current epoch value.
+ */
+static inline smr_seq_t
+smr_current(smr_t smr)
+{
+ return (smr->c_global->g_wr_seq);
+}
+
+
+/* Only at startup. */
+void smr_init(void);
+
+#endif /* _SYS_SMR_H_ */
Index: sys/tools/smrstress/Makefile
===================================================================
--- /dev/null
+++ sys/tools/smrstress/Makefile
@@ -0,0 +1,4 @@
+KMOD= smrstress
+SRCS= smrstress.c
+
+.include <bsd.kmod.mk>
Index: sys/tools/smrstress/smrstress.c
===================================================================
--- /dev/null
+++ sys/tools/smrstress/smrstress.c
@@ -0,0 +1,193 @@
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/libkern.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/kthread.h>
+#include <sys/conf.h>
+#include <sys/mbuf.h>
+#include <sys/smp.h>
+#include <sys/smr.h>
+
+#include <vm/uma.h>
+
+#include <machine/stdarg.h>
+
+static uma_zone_t smrs_zone;
+static smr_t smrs_smr;
+
+static int smrs_cpus = 32;
+static int smrs_started;
+static int smrs_iterations = 100000;
+static int smrs_failures = 0;
+static volatile int smrs_completed;
+
+struct smrs {
+ int generation;
+ volatile u_int count;
+};
+
+uintptr_t smrs_current;
+
+static void
+smrs_error(struct smrs *smrs, const char *fmt, ...)
+{
+ va_list ap;
+
+ atomic_add_int(&smrs_failures, 1);
+ printf("SMR ERROR: wr_seq %d, rd_seq %d, c_seq %d, generation %d, count %d ",
+ smrs_smr->c_global->g_wr_seq, smrs_smr->c_global->g_rd_seq,
+ zpcpu_get(smrs_smr)->c_seq, smrs->generation, smrs->count);
+ va_start(ap, fmt);
+ (void)vprintf(fmt, ap);
+ va_end(ap);
+}
+
+static void
+smrs_read(void)
+{
+ struct smrs *cur;
+ int cnt;
+
+ /* Wait for the writer to exit. */
+ while (smrs_completed == 0) {
+ smr_enter(smrs_smr);
+ cur = (void *)atomic_load_ptr(&smrs_current);
+ if (cur->generation == -1)
+ smrs_error(cur, "read early: Use after free!\n");
+ atomic_add_int(&cur->count, 1);
+ DELAY(100);
+ cnt = atomic_fetchadd_int(&cur->count, -1);
+ if (cur->generation == -1)
+ smrs_error(cur, "read late: Use after free!\n");
+ else if (cnt <= 0)
+ smrs_error(cur, "Invalid ref\n");
+ smr_exit(smrs_smr);
+ maybe_yield();
+ }
+}
+
+static void
+smrs_write(void)
+{
+ struct smrs *cur;
+ int i;
+
+ for (i = 0; i < smrs_iterations; i++) {
+ cur = (void *)smrs_current;
+ atomic_store_rel_ptr(&smrs_current,
+ (uintptr_t)uma_zalloc(smrs_zone, M_WAITOK));
+ uma_zfree(smrs_zone, cur);
+ }
+}
+
+static void
+smrs_thread(void *arg)
+{
+ int rw = (intptr_t)arg;
+
+ if (rw == 0)
+ smrs_write();
+ else
+ smrs_read();
+ atomic_add_int(&smrs_completed, 1);
+}
+
+static void
+smrs_start(void)
+{
+ struct smrs *cur;
+ int i;
+
+ smrs_started = smrs_cpus;
+ smrs_completed = 0;
+ atomic_store_rel_ptr(&smrs_current,
+ (uintptr_t)uma_zalloc(smrs_zone, M_WAITOK));
+ for (i = 0; i < smrs_started; i++)
+ kthread_add((void (*)(void *))smrs_thread,
+ (void *)(intptr_t)i, curproc, NULL, 0, 0, "smrs-%d", i);
+
+ while (smrs_completed != smrs_started)
+ pause("prf", hz/2);
+
+ cur = (void *)smrs_current;
+ smrs_current = (uintptr_t)NULL;
+ uma_zfree(smrs_zone, cur);
+
+ printf("Completed %d loops with %d failures\n",
+ smrs_iterations, smrs_failures);
+}
+
+static int
+smrs_ctor(void *mem, int size, void *arg, int flags)
+{
+ static int smr_generation = 0;
+ struct smrs *smrs = mem;
+
+ if (smrs->generation != -1 && smrs->generation != 0)
+ smrs_error(smrs, "ctor: Invalid smr generation on ctor\n");
+ else if (smrs->count != 0)
+ smrs_error(smrs, "ctor: Invalid count\n");
+ smrs->generation = ++smr_generation;
+
+ return (0);
+}
+
+
+static void
+smrs_dtor(void *mem, int size, void *arg)
+{
+ struct smrs *smrs = mem;
+
+ if (smrs->generation == -1)
+ smrs_error(smrs, "dtor: Invalid generation");
+ smrs->generation = -1;
+ if (smrs->count != 0)
+ smrs_error(smrs, "dtor: Invalid count\n");
+}
+
+
+static void
+smrs_init(void)
+{
+
+ smrs_zone = uma_zcreate("smrs", sizeof(struct smrs),
+ smrs_ctor, smrs_dtor, NULL, NULL, UMA_ALIGN_PTR,
+ UMA_ZONE_SMR | UMA_ZONE_ZINIT);
+ smrs_smr = uma_zone_get_smr(smrs_zone);
+}
+
+static void
+smrs_fini(void)
+{
+
+ uma_zdestroy(smrs_zone);
+}
+
+static int
+smrs_modevent(module_t mod, int what, void *arg)
+{
+
+ switch (what) {
+ case MOD_LOAD:
+ smrs_init();
+ smrs_start();
+ break;
+ case MOD_UNLOAD:
+ smrs_fini();
+ break;
+ default:
+ break;
+ }
+ return (0);
+}
+
+moduledata_t smrs_meta = {
+ "smrstress",
+ smrs_modevent,
+ NULL
+};
+DECLARE_MODULE(smrstress, smrs_meta, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
+MODULE_VERSION(smrstress, 1);
Index: sys/vm/uma.h
===================================================================
--- sys/vm/uma.h
+++ sys/vm/uma.h
@@ -259,16 +259,27 @@
* mini-dumps.
*/
#define UMA_ZONE_PCPU 0x8000 /*
- * Allocates mp_maxid + 1 slabs of PAGE_SIZE
+ * Allocates mp_maxid + 1 slabs of
+ * PAGE_SIZE
*/
#define UMA_ZONE_FIRSTTOUCH 0x10000 /* First touch NUMA policy */
#define UMA_ZONE_ROUNDROBIN 0x20000 /* Round-robin NUMA policy. */
+#define UMA_ZONE_SMR 0x40000 /*
+ * Safe memory reclamation defers
+ * frees until all read sections
+ * have exited. This flag creates
+ * a unique SMR context for this
+ * zone. To share contexts see
+ * uma_zone_set_smr() below.
+ *
+ * See sys/smr.h for more details.
+ */
/* In use by UMA_ZFLAGs: 0xffe00000 */
/*
- * These flags are shared between the keg and zone. In zones wishing to add
- * new kegs these flags must be compatible. Some are determined based on
- * physical parameters of the request and may not be provided by the consumer.
+ * These flags are shared between the keg and zone. Some are determined
+ * based on physical parameters of the request and may not be provided by
+ * the consumer.
*/
#define UMA_ZONE_INHERIT \
(UMA_ZONE_NOTOUCH | UMA_ZONE_MALLOC | UMA_ZONE_NOFREE | \
@@ -600,6 +611,18 @@
void uma_zone_set_freef(uma_zone_t zone, uma_free freef);
+/*
+ * Associate a zone with a smr context that is allocated after creation
+ * so that multiple zones may share the same context.
+ */
+struct smr;
+void uma_zone_set_smr(uma_zone_t zone, struct smr *);
+
+/*
+ * Fetch the smr context that was set or made in uma_zcreate().
+ */
+struct smr *uma_zone_get_smr(uma_zone_t zone);
+
/*
* These flags are setable in the allocf and visible in the freef.
*/
Index: sys/vm/uma_core.c
===================================================================
--- sys/vm/uma_core.c
+++ sys/vm/uma_core.c
@@ -77,6 +77,7 @@
#include <sys/sched.h>
#include <sys/sleepqueue.h>
#include <sys/smp.h>
+#include <sys/smr.h>
#include <sys/taskqueue.h>
#include <sys/vmmeter.h>
@@ -273,6 +274,8 @@
static void keg_dtor(void *, int, void *);
static int zone_ctor(void *, int, void *, int);
static void zone_dtor(void *, int, void *);
+static inline void item_dtor(uma_zone_t zone, void *item, int size,
+ void *udata, enum zfreeskip skip);
static int zero_init(void *, int, int);
static void zone_foreach(void (*zfunc)(uma_zone_t, void *), void *);
static void zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *), void *);
@@ -454,9 +457,9 @@
uma_bucket_t bucket;
/*
- * Don't allocate buckets in low memory situations.
+ * Don't allocate buckets early in boot.
*/
- if (bucketdisable)
+ if (__predict_false(booted < BOOT_KVA))
return (NULL);
/*
@@ -488,6 +491,9 @@
#endif
bucket->ub_cnt = 0;
bucket->ub_entries = ubz->ubz_entries;
+ bucket->ub_seq = 0;
+ CTR3(KTR_UMA, "bucket_alloc: zone %s(%p) allocated bucket %p",
+ zone->uz_name, zone, bucket);
}
return (bucket);
@@ -500,6 +506,8 @@
KASSERT(bucket->ub_cnt == 0,
("bucket_free: Freeing a non free bucket."));
+ KASSERT(bucket->ub_seq == 0,
+ ("bucket_free: Freeing an SMR bucket."));
if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
udata = (void *)(uintptr_t)zone->uz_flags;
ubz = bucket_zone_lookup(bucket->ub_entries);
@@ -517,23 +525,38 @@
/*
* Attempt to satisfy an allocation by retrieving a full bucket from one of the
- * zone's caches.
+ * zone's caches. If a bucket is found the zone is not locked on return.
*/
static uma_bucket_t
zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom)
{
uma_bucket_t bucket;
+ int i;
+ bool dtor = false;
ZONE_LOCK_ASSERT(zone);
- if ((bucket = TAILQ_FIRST(&zdom->uzd_buckets)) != NULL) {
- MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
- TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
- zdom->uzd_nitems -= bucket->ub_cnt;
- if (zdom->uzd_imin > zdom->uzd_nitems)
- zdom->uzd_imin = zdom->uzd_nitems;
- zone->uz_bkt_count -= bucket->ub_cnt;
+ if ((bucket = TAILQ_FIRST(&zdom->uzd_buckets)) == NULL)
+ return (NULL);
+
+ if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && bucket->ub_seq) {
+ if (!smr_poll(zone->uz_smr, bucket->ub_seq, false))
+ return (NULL);
+ bucket->ub_seq = 0;
+ dtor = true;
}
+ MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
+ TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
+ zdom->uzd_nitems -= bucket->ub_cnt;
+ if (zdom->uzd_imin > zdom->uzd_nitems)
+ zdom->uzd_imin = zdom->uzd_nitems;
+ zone->uz_bkt_count -= bucket->ub_cnt;
+ ZONE_UNLOCK(zone);
+ if (dtor)
+ for (i = 0; i < bucket->ub_cnt; i++)
+ item_dtor(zone, bucket->ub_bucket[i], zone->uz_size,
+ NULL, SKIP_NONE);
+
return (bucket);
}
@@ -551,10 +574,7 @@
KASSERT(!ws || zone->uz_bkt_count < zone->uz_bkt_max,
("%s: zone %p overflow", __func__, zone));
- if (ws)
- TAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
- else
- TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
+ TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
zdom->uzd_nitems += bucket->ub_cnt;
if (ws && zdom->uzd_imax < zdom->uzd_nitems)
zdom->uzd_imax = zdom->uzd_nitems;
@@ -941,12 +961,22 @@
if (bucket == NULL || bucket->ub_cnt == 0)
return;
+ if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && bucket->ub_seq != 0) {
+ smr_wait(zone->uz_smr, bucket->ub_seq);
+ for (i = 0; i < bucket->ub_cnt; i++)
+ item_dtor(zone, bucket->ub_bucket[i],
+ zone->uz_size, NULL, SKIP_NONE);
+ bucket->ub_seq = 0;
+ }
if (zone->uz_fini)
for (i = 0; i < bucket->ub_cnt; i++)
zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
if (zone->uz_max_items > 0)
zone_free_limit(zone, bucket->ub_cnt);
+#ifdef INVARIANTS
+ bzero(bucket->ub_bucket, sizeof(void *) * bucket->ub_cnt);
+#endif
bucket->ub_cnt = 0;
}
@@ -1035,12 +1065,21 @@
zone_put_bucket(zone, &zone->uz_domain[domain], b1, false);
b1 = NULL;
}
+
+ /*
+ * Don't flush SMR zone buckets. This leaves the zone without a
+ * bucket and forces every free to synchronize().
+ */
+ if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
+ goto out;
b2 = cache_bucket_unload_free(cache);
if (b2 != NULL && b2->ub_cnt != 0) {
zone_put_bucket(zone, &zone->uz_domain[domain], b2, false);
b2 = NULL;
}
b3 = cache_bucket_unload_cross(cache);
+
+out:
critical_exit();
ZONE_UNLOCK(zone);
if (b1)
@@ -2294,7 +2333,7 @@
zone->uz_bucket_size = 0;
zone->uz_bucket_size_min = 0;
zone->uz_bucket_size_max = BUCKET_MAX;
- zone->uz_flags = 0;
+ zone->uz_flags = (arg->flags & UMA_ZONE_SMR);
zone->uz_warning = NULL;
/* The domain structures follow the cpu structures. */
zone->uz_domain =
@@ -2375,7 +2414,7 @@
karg.uminit = arg->uminit;
karg.fini = arg->fini;
karg.align = arg->align;
- karg.flags = arg->flags;
+ karg.flags = (arg->flags & ~UMA_ZONE_SMR);
karg.zone = zone;
error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
flags);
@@ -2399,6 +2438,10 @@
zone->uz_fails = EARLY_COUNTER;
}
+ /* Caller requests a private SMR context. */
+ if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
+ zone->uz_smr = smr_create(zone->uz_name);
+
KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
(UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
("Invalid zone flag combination"));
@@ -2600,6 +2643,7 @@
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
bucket_init();
+ smr_init();
}
#ifndef UMA_MD_SMALL_ALLOC
@@ -3014,9 +3058,14 @@
/*
* If we have run out of items in our alloc bucket see
* if we can switch with the free bucket.
+ *
+ * SMR Zones can't re-use the free bucket until the sequence has
+ * expired.
*/
- if (cache->uc_freebucket.ucb_cnt != 0) {
- cache_bucket_swap(&cache->uc_freebucket, &cache->uc_allocbucket);
+ if ((zone->uz_flags & UMA_ZONE_SMR) == 0 &&
+ cache->uc_freebucket.ucb_cnt != 0) {
+ cache_bucket_swap(&cache->uc_freebucket,
+ &cache->uc_allocbucket);
return (true);
}
@@ -3070,7 +3119,6 @@
}
if ((bucket = zone_fetch_bucket(zone, zdom)) != NULL) {
- ZONE_UNLOCK(zone);
KASSERT(bucket->ub_cnt != 0,
("uma_zalloc_arg: Returning an empty bucket."));
cache_bucket_load_alloc(cache, bucket);
@@ -3665,8 +3713,8 @@
*/
cache = &zone->uz_cpu[curcpu];
uz_flags = cache_uz_flags(cache);
- if (__predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0 ||
- UMA_ALWAYS_CTORDTOR))
+ if (__predict_false(((uz_flags & UMA_ZFLAG_CTORDTOR) != 0 ||
+ UMA_ALWAYS_CTORDTOR) && (uz_flags & UMA_ZONE_SMR) == 0))
item_dtor(zone, item, cache_uz_size(cache), udata, SKIP_NONE);
/*
@@ -3697,6 +3745,13 @@
critical_enter();
do {
cache = &zone->uz_cpu[curcpu];
+ /*
+ * Try to free into the allocbucket first to give LIFO
+ * ordering for cache-hot datastructures. Spill over
+ * into the freebucket if necessary. Alloc will swap
+ * them if one runs dry.
+ */
+ bucket = &cache->uc_allocbucket;
#ifdef NUMA
domain = PCPU_GET(domain);
if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 &&
@@ -3704,18 +3759,13 @@
bucket = &cache->uc_crossbucket;
} else
#endif
- {
- /*
- * Try to free into the allocbucket first to give LIFO
- * ordering for cache-hot datastructures. Spill over
- * into the freebucket if necessary. Alloc will swap
- * them if one runs dry.
- */
- bucket = &cache->uc_allocbucket;
- if (__predict_false(bucket->ucb_cnt >=
- bucket->ucb_entries))
- bucket = &cache->uc_freebucket;
- }
+ /*
+ * If this is a SMR zone or the allocbucket is entry we use
+ * the free bucket.
+ */
+ if (__predict_false((uz_flags & UMA_ZONE_SMR) != 0 ||
+ bucket->ucb_cnt >= bucket->ucb_entries))
+ bucket = &cache->uc_freebucket;
if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) {
cache_bucket_push(cache, bucket, item);
critical_exit();
@@ -3728,7 +3778,8 @@
* If nothing else caught this, we'll just do an internal free.
*/
zfree_item:
- zone_free_item(zone, item, udata, SKIP_DTOR);
+ zone_free_item(zone, item, udata,
+ (uz_flags & UMA_ZONE_SMR) == 0 ? SKIP_DTOR : SKIP_NONE);
}
#ifdef NUMA
@@ -3778,6 +3829,8 @@
if (!TAILQ_EMPTY(&fullbuckets)) {
ZONE_LOCK(zone);
while ((b = TAILQ_FIRST(&fullbuckets)) != NULL) {
+ if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
+ bucket->ub_seq = smr_current(zone->uz_smr);
TAILQ_REMOVE(&fullbuckets, b, ub_link);
if (zone->uz_bkt_count >= zone->uz_bkt_max) {
ZONE_UNLOCK(zone);
@@ -3796,6 +3849,7 @@
}
if (bucket->ub_cnt != 0)
bucket_drain(zone, bucket);
+ bucket->ub_seq = 0;
bucket_free(zone, bucket, udata);
}
#endif
@@ -3862,15 +3916,16 @@
int itemdomain)
{
uma_cache_bucket_t cbucket;
- uma_bucket_t bucket;
+ uma_bucket_t newbucket, bucket;
int domain;
CRITICAL_ASSERT(curthread);
- if (zone->uz_bucket_size == 0 || bucketdisable)
+ if (zone->uz_bucket_size == 0)
return false;
cache = &zone->uz_cpu[curcpu];
+ newbucket = NULL;
/*
* FIRSTTOUCH domains need to free to the correct zdom. When
@@ -3895,14 +3950,29 @@
/* We are no longer associated with this CPU. */
critical_exit();
+ /*
+ * Don't let SMR zones operate without a free bucket. Force
+ * a synchronize and re-use this one. We will only degrade
+ * to a synchronize every bucket_size items rather than every
+ * item if we fail to allocate a bucket.
+ */
+ if ((zone->uz_flags & UMA_ZONE_SMR) != 0) {
+ if (bucket != NULL)
+ bucket->ub_seq = smr_advance(zone->uz_smr);
+ newbucket = bucket_alloc(zone, udata, M_NOWAIT);
+ if (newbucket == NULL && bucket != NULL) {
+ bucket_drain(zone, bucket);
+ newbucket = bucket;
+ bucket = NULL;
+ }
+ } else if (!bucketdisable)
+ newbucket = bucket_alloc(zone, udata, M_NOWAIT);
+
if (bucket != NULL)
zone_free_bucket(zone, bucket, udata, domain, itemdomain);
- bucket = bucket_alloc(zone, udata, M_NOWAIT);
- CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
- zone->uz_name, zone, bucket);
critical_enter();
- if (bucket == NULL)
+ if ((bucket = newbucket) == NULL)
return (false);
cache = &zone->uz_cpu[curcpu];
#ifdef NUMA
@@ -4031,6 +4101,15 @@
zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
{
+ /*
+ * If a free is sent directly to an SMR zone we have to
+ * synchronize immediately because the item can instantly
+ * be reallocated. This should only happen in degenerate
+ * cases when no memory is available for per-cpu caches.
+ */
+ if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && skip == SKIP_DTOR)
+ smr_synchronize(zone->uz_smr);
+
item_dtor(zone, item, zone->uz_size, udata, skip);
if (skip < SKIP_FINI && zone->uz_fini)
@@ -4255,6 +4334,25 @@
keg->uk_allocf = allocf;
}
+/* See uma.h */
+void
+uma_zone_set_smr(uma_zone_t zone, smr_t smr)
+{
+
+ ZONE_ASSERT_COLD(zone);
+
+ zone->uz_flags |= UMA_ZONE_SMR;
+ zone->uz_smr = smr;
+ zone_update_caches(zone);
+}
+
+smr_t
+uma_zone_get_smr(uma_zone_t zone)
+{
+
+ return (zone->uz_smr);
+}
+
/* See uma.h */
void
uma_zone_reserve(uma_zone_t zone, int items)
Index: sys/vm/uma_int.h
===================================================================
--- sys/vm/uma_int.h
+++ sys/vm/uma_int.h
@@ -245,9 +245,10 @@
*/
struct uma_bucket {
TAILQ_ENTRY(uma_bucket) ub_link; /* Link into the zone */
- int16_t ub_cnt; /* Count of items in bucket. */
- int16_t ub_entries; /* Max items. */
- void *ub_bucket[]; /* actual allocation storage */
+ int16_t ub_cnt; /* Count of items in bucket. */
+ int16_t ub_entries; /* Max items. */
+ uint32_t ub_seq; /* SMR sequence number. */
+ void *ub_bucket[]; /* actual allocation storage */
};
typedef struct uma_bucket * uma_bucket_t;
@@ -484,7 +485,7 @@
uint32_t uz_size; /* Size inherited from kegs */
uma_ctor uz_ctor; /* Constructor for each allocation */
uma_dtor uz_dtor; /* Destructor */
- uint64_t uz_spare0;
+ struct smr *uz_smr; /* Safe memory reclaim context. */
uint64_t uz_max_items; /* Maximum number of items to alloc */
uint32_t uz_sleepers; /* Threads sleeping on limit */
uint16_t uz_bucket_size; /* Number of items in full bucket */
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sun, Mar 15, 7:02 PM (5 h, 29 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
29728799
Default Alt Text
D22586.id67303.diff (36 KB)
Attached To
Mode
D22586: Implement safe memory reclamation in UMA.
Attached
Detach File
Event Timeline
Log In to Comment