Index: head/sys/conf/NOTES
===================================================================
--- head/sys/conf/NOTES
+++ head/sys/conf/NOTES
@@ -2772,6 +2772,8 @@
 options 	MAXFILES=999
 
 # Random number generator
+# Alternative algorithm.
+#options 	RANDOM_FENESTRASX
 # Allow the CSPRNG algorithm to be loaded as a module.
 #options 	RANDOM_LOADABLE
 # Select this to allow high-rate but potentially expensive
Index: head/sys/conf/files
===================================================================
--- head/sys/conf/files
+++ head/sys/conf/files
@@ -722,7 +722,7 @@
 	compile-with "${ZSTD_C} ${ZSTD_DECOMPRESS_BLOCK_FLAGS}"
 contrib/zstd/lib/decompress/huf_decompress.c	optional zstdio compile-with ${ZSTD_C}
 # Blake 2
-contrib/libb2/blake2b-ref.c	optional crypto | ipsec | ipsec_support \
+contrib/libb2/blake2b-ref.c	optional crypto | ipsec | ipsec_support | !random_loadable random_fenestrasx \
 	compile-with "${NORMAL_C} -I$S/crypto/blake2 -Wno-cast-qual -DSUFFIX=_ref -Wno-unused-function"
 contrib/libb2/blake2s-ref.c	optional crypto | ipsec | ipsec_support \
 	compile-with "${NORMAL_C} -I$S/crypto/blake2 -Wno-cast-qual -DSUFFIX=_ref -Wno-unused-function"
@@ -2820,7 +2820,14 @@
 dev/random/random_infra.c	standard
 dev/random/random_harvestq.c	standard
 dev/random/randomdev.c		optional !random_loadable
-dev/random/fortuna.c		optional !random_loadable
+dev/random/fenestrasX/fx_brng.c	optional !random_loadable random_fenestrasx
+dev/random/fenestrasX/fx_main.c	optional !random_loadable random_fenestrasx \
+	compile-with "${NORMAL_C} -I$S/crypto/blake2"
+dev/random/fenestrasX/fx_pool.c	optional !random_loadable random_fenestrasx \
+	compile-with "${NORMAL_C} -I$S/crypto/blake2"
+dev/random/fenestrasX/fx_rng.c	optional !random_loadable random_fenestrasx \
+	compile-with "${NORMAL_C} -I$S/crypto/blake2"
+dev/random/fortuna.c		optional !random_loadable !random_fenestrasx
 dev/random/hash.c		optional !random_loadable
 dev/rccgpio/rccgpio.c		optional rccgpio gpio
 dev/re/if_re.c			optional re
Index: head/sys/conf/options
===================================================================
--- head/sys/conf/options
+++ head/sys/conf/options
@@ -966,6 +966,8 @@
 RCTL		opt_global.h
 
 # Random number generator(s)
+# Alternative RNG algorithm.
+RANDOM_FENESTRASX	opt_global.h
 # With this, no entropy processor is loaded, but the entropy
 # harvesting infrastructure is present. This means an entropy
 # processor may be loaded as a module.
Index: head/sys/dev/random/fenestrasX/fx_brng.h
===================================================================
--- head/sys/dev/random/fenestrasX/fx_brng.h
+++ head/sys/dev/random/fenestrasX/fx_brng.h
@@ -0,0 +1,66 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#pragma once
+
+#include <dev/random/fenestrasX/fx_rng.h>
+
+#define	FXRNG_BUFRNG_SZ	128
+
+/*
+ * An object representing a buffered random number generator with forward
+ * secrecy (aka "fast-key-erasure").
+ *
+ * There is a single static root instance of this object associated with the
+ * entropy harvester, as well as additional instances per CPU, lazily allocated
+ * in NUMA-local memory, seeded from output of the root generator.
+ */
+struct fxrng_buffered_rng {
+	struct fxrng_basic_rng	brng_rng;
+#define	FXRNG_BRNG_LOCK(brng)	mtx_lock(&(brng)->brng_rng.rng_lk)
+#define	FXRNG_BRNG_UNLOCK(brng)	mtx_unlock(&(brng)->brng_rng.rng_lk)
+#define	FXRNG_BRNG_ASSERT(brng)	mtx_assert(&(brng)->brng_rng.rng_lk, MA_OWNED)
+#define	FXRNG_BRNG_ASSERT_NOT(brng) \
+	mtx_assert(&(brng)->brng_rng.rng_lk, MA_NOTOWNED)
+
+	/* Entropy reseed generation ("seed version"). */
+	uint64_t	brng_generation;
+
+	/* Buffered output for quick access by small requests. */
+	uint8_t		brng_buffer[FXRNG_BUFRNG_SZ];
+	uint8_t		brng_avail_idx;
+};
+
+void fxrng_brng_init(struct fxrng_buffered_rng *);
+void fxrng_brng_produce_seed_data_internal(struct fxrng_buffered_rng *, void *,
+    size_t, uint64_t *seed_generation);
+void fxrng_brng_read(struct fxrng_buffered_rng *, void *, size_t);
+
+void fxrng_brng_reseed(const void *, size_t);
+struct harvest_event;
+void fxrng_brng_src_reseed(const struct harvest_event *);
Index: head/sys/dev/random/fenestrasX/fx_brng.c
===================================================================
--- head/sys/dev/random/fenestrasX/fx_brng.c
+++ head/sys/dev/random/fenestrasX/fx_brng.c
@@ -0,0 +1,295 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/fail.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/random.h>
+#include <sys/sdt.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+
+#include <dev/random/randomdev.h>
+#include <dev/random/random_harvestq.h>
+#include <dev/random/uint128.h>
+
+#include <dev/random/fenestrasX/fx_brng.h>
+#include <dev/random/fenestrasX/fx_priv.h>
+#include <dev/random/fenestrasX/fx_rng.h>
+
+/*
+ * Implementation of a buffered RNG, described in § 1.2-1.4 of the whitepaper.
+ */
+
+/*
+ * Initialize a buffered rng instance (either the static root instance, or a
+ * per-cpu instance on the heap.  Both should be zero initialized before this
+ * routine.
+ */
+void
+fxrng_brng_init(struct fxrng_buffered_rng *rng)
+{
+	fxrng_rng_init(&rng->brng_rng, rng == &fxrng_root);
+
+	/* I.e., the buffer is empty. */
+	rng->brng_avail_idx = sizeof(rng->brng_buffer);
+
+	/*
+	 * It is fine and correct for brng_generation and brng_buffer to be
+	 * zero values.
+	 *
+	 * brng_prf and brng_generation must be initialized later.
+	 * Initialization is special for the root BRNG.  PCPU child instances
+	 * use fxrng_brng_produce_seed_data_internal() below.
+	 */
+}
+
+/*
+ * Directly reseed the root BRNG from a first-time entropy source,
+ * incorporating the existing BRNG state.  The main motivation for doing so "is
+ * to ensure that as soon as an entropy source produces data, PRNG output
+ * depends on the data from that source." (§ 3.1)
+ *
+ * The root BRNG is locked on entry and initial keying (brng_generation > 0)
+ * has already been performed.  The root BRNG is unlocked on return.
+ */
+void
+fxrng_brng_src_reseed(const struct harvest_event *event)
+{
+	struct fxrng_buffered_rng *rng;
+
+	rng = &fxrng_root;
+	FXRNG_BRNG_ASSERT(rng);
+	ASSERT_DEBUG(rng->brng_generation > 0, "root RNG not seeded");
+
+	fxrng_rng_src_reseed(&rng->brng_rng, event);
+	FXRNG_BRNG_ASSERT(rng);
+
+	/*
+	 * Bump root generation (which is costly) to force downstream BRNGs to
+	 * reseed and quickly incorporate the new entropy.  The intuition is
+	 * that this tradeoff is worth it because new sources show up extremely
+	 * rarely (limiting cost) and if they can contribute any entropy to a
+	 * weak state, we want to propagate it to all generators ASAP.
+	 */
+	rng->brng_generation++;
+	atomic_store_rel_64(&fxrng_root_generation, rng->brng_generation);
+	FXRNG_BRNG_UNLOCK(rng);
+}
+
+/*
+ * Reseed a brng from some amount of pooled entropy (determined in fx_pool.c by
+ * fxent_timer_reseed_npools).  For initial seeding, we pool entropy in a
+ * single pool and use this API as well (fxrng_alg_seeded).
+ */
+void
+fxrng_brng_reseed(const void *entr, size_t sz)
+{
+	struct fxrng_buffered_rng *rng;
+
+	rng = &fxrng_root;
+	FXRNG_BRNG_LOCK(rng);
+
+	fxrng_rng_reseed(&rng->brng_rng, (rng->brng_generation > 0), entr, sz);
+	FXRNG_BRNG_ASSERT(rng);
+
+	rng->brng_generation++;
+	atomic_store_rel_64(&fxrng_root_generation, rng->brng_generation);
+	FXRNG_BRNG_UNLOCK(rng);
+}
+
+/*
+ * Grab some bytes off an initialized, current generation RNG.
+ *
+ * (Does not handle reseeding if our generation is stale.)
+ *
+ * Locking protocol is a bit odd.  The RNG is locked on entrance, but the lock
+ * is dropped on exit.  This avoids holding a lock during expensive and slow
+ * RNG generation.
+ */
+static void
+fxrng_brng_getbytes_internal(struct fxrng_buffered_rng *rng, void *buf,
+    size_t nbytes)
+{
+
+	FXRNG_BRNG_ASSERT(rng);
+
+	/* Make the zero request impossible for the rest of the logic. */
+	if (__predict_false(nbytes == 0)) {
+		FXRNG_BRNG_UNLOCK(rng);
+		goto out;
+	}
+
+	/* Fast/easy case: Use some bytes from the buffer. */
+	if (rng->brng_avail_idx + nbytes <= sizeof(rng->brng_buffer)) {
+		memcpy(buf, &rng->brng_buffer[rng->brng_avail_idx], nbytes);
+		explicit_bzero(&rng->brng_buffer[rng->brng_avail_idx], nbytes);
+		rng->brng_avail_idx += nbytes;
+		FXRNG_BRNG_UNLOCK(rng);
+		goto out;
+	}
+
+	/* Buffer case: */
+	if (nbytes < sizeof(rng->brng_buffer)) {
+		size_t rem;
+
+		/* Drain anything left in the buffer first. */
+		if (rng->brng_avail_idx < sizeof(rng->brng_buffer)) {
+			rem = sizeof(rng->brng_buffer) - rng->brng_avail_idx;
+			ASSERT_DEBUG(nbytes > rem, "invariant");
+
+			memcpy(buf, &rng->brng_buffer[rng->brng_avail_idx], rem);
+
+			buf = (uint8_t*)buf + rem;
+			nbytes -= rem;
+			ASSERT_DEBUG(nbytes != 0, "invariant");
+		}
+
+		/*
+		 * Partial fill from first buffer, have to rekey and generate a
+		 * new buffer to do the rest.
+		 */
+		fxrng_rng_genrandom_internal(&rng->brng_rng, rng->brng_buffer,
+		    sizeof(rng->brng_buffer), false);
+		FXRNG_BRNG_ASSERT(rng);
+		rng->brng_avail_idx = 0;
+
+		memcpy(buf, &rng->brng_buffer[rng->brng_avail_idx], nbytes);
+		explicit_bzero(&rng->brng_buffer[rng->brng_avail_idx], nbytes);
+		rng->brng_avail_idx += nbytes;
+		FXRNG_BRNG_UNLOCK(rng);
+		goto out;
+	}
+
+	/* Large request; skip the buffer. */
+	fxrng_rng_genrandom_internal(&rng->brng_rng, buf, nbytes, true);
+
+out:
+	FXRNG_BRNG_ASSERT_NOT(rng);
+	return;
+}
+
+/*
+ * API to get a new key for a downstream RNG.  Returns the new key in 'buf', as
+ * well as the generator's reseed_generation.
+ *
+ * 'rng' is locked on entry and unlocked on return.
+ *
+ * Only valid after confirming the caller's seed version or reseed_generation
+ * matches roots (or we are root).  (For now, this is only used to reseed the
+ * per-CPU generators from root.)
+ */
+void
+fxrng_brng_produce_seed_data_internal(struct fxrng_buffered_rng *rng,
+    void *buf, size_t keysz, uint64_t *seed_generation)
+{
+	FXRNG_BRNG_ASSERT(rng);
+	ASSERT_DEBUG(keysz == FX_CHACHA20_KEYSIZE, "keysz: %zu", keysz);
+
+	*seed_generation = rng->brng_generation;
+	fxrng_brng_getbytes_internal(rng, buf, keysz);
+	FXRNG_BRNG_ASSERT_NOT(rng);
+}
+
+/*
+ * Read from an allocated and initialized buffered BRNG.  This a high-level
+ * API, but doesn't handle PCPU BRNG allocation.
+ *
+ * BRNG is locked on entry.  It is unlocked on return.
+ */
+void
+fxrng_brng_read(struct fxrng_buffered_rng *rng, void *buf, size_t nbytes)
+{
+	uint8_t newkey[FX_CHACHA20_KEYSIZE];
+
+	FXRNG_BRNG_ASSERT(rng);
+
+	/* Fast path: there hasn't been a global reseed since last read. */
+	if (rng->brng_generation == atomic_load_acq_64(&fxrng_root_generation))
+		goto done_reseeding;
+
+	ASSERT(rng != &fxrng_root, "root rng inconsistent seed version");
+
+	/*
+	 * Slow path: We need to rekey from the parent BRNG to incorporate new
+	 * entropy material.
+	 *
+	 * Lock order is always root -> percpu.
+	 */
+	FXRNG_BRNG_UNLOCK(rng);
+	FXRNG_BRNG_LOCK(&fxrng_root);
+	FXRNG_BRNG_LOCK(rng);
+
+	/*
+	 * If we lost the reseeding race when the lock was dropped, don't
+	 * duplicate work.
+	 */
+	if (__predict_false(rng->brng_generation ==
+	    atomic_load_acq_64(&fxrng_root_generation))) {
+		FXRNG_BRNG_UNLOCK(&fxrng_root);
+		goto done_reseeding;
+	}
+
+	fxrng_brng_produce_seed_data_internal(&fxrng_root, newkey,
+	    sizeof(newkey), &rng->brng_generation);
+
+	FXRNG_BRNG_ASSERT_NOT(&fxrng_root);
+	FXRNG_BRNG_ASSERT(rng);
+
+	fxrng_rng_setkey(&rng->brng_rng, newkey, sizeof(newkey));
+	explicit_bzero(newkey, sizeof(newkey));
+
+	/*
+	 * A reseed invalidates any previous buffered contents.  Here, we
+	 * forward the available index to the end of the buffer, i.e., empty.
+	 * Requests that would use the buffer (< 128 bytes) will refill its
+	 * contents on demand.
+	 *
+	 * It is explicitly ok that we do not zero out any remaining buffer
+	 * bytes; they will never be handed out to callers, and they reveal
+	 * nothing about the reseeded key (which came from the root BRNG).
+	 * (§ 1.3)
+	 */
+	rng->brng_avail_idx = sizeof(rng->brng_buffer);
+
+done_reseeding:
+	if (rng != &fxrng_root)
+		FXRNG_BRNG_ASSERT_NOT(&fxrng_root);
+	FXRNG_BRNG_ASSERT(rng);
+
+	fxrng_brng_getbytes_internal(rng, buf, nbytes);
+	FXRNG_BRNG_ASSERT_NOT(rng);
+}
Index: head/sys/dev/random/fenestrasX/fx_hash.h
===================================================================
--- head/sys/dev/random/fenestrasX/fx_hash.h
+++ head/sys/dev/random/fenestrasX/fx_hash.h
@@ -0,0 +1,72 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#pragma once
+
+#include <dev/random/fenestrasX/fx_priv.h>
+#define	blake2b_init	blake2b_init_ref
+#define	blake2b_update	blake2b_update_ref
+#define	blake2b_final	blake2b_final_ref
+#include <contrib/libb2/blake2.h>
+
+#define	FXRNG_HASH_SZ	BLAKE2B_OUTBYTES	/* 64 */
+
+/*
+ * Wrappers for hash function abstraction.
+ */
+struct fxrng_hash {
+	blake2b_state	state;
+};
+
+static inline void
+fxrng_hash_init(struct fxrng_hash *h)
+{
+	int rc;
+
+	rc = blake2b_init(&h->state, FXRNG_HASH_SZ);
+	ASSERT(rc == 0, "blake2b_init");
+}
+
+static inline void
+fxrng_hash_update(struct fxrng_hash *h, const void *buf, size_t sz)
+{
+	int rc;
+
+	rc = blake2b_update(&h->state, buf, sz);
+	ASSERT(rc == 0, "blake2b_update");
+}
+
+static inline void
+fxrng_hash_finish(struct fxrng_hash *h, uint8_t buf[static FXRNG_HASH_SZ], size_t sz)
+{
+	int rc;
+
+	rc = blake2b_final(&h->state, buf, sz);
+	ASSERT(rc == 0, "blake2b_final(sz=%zu)", sz);
+	explicit_bzero(h, sizeof(*h));
+}
Index: head/sys/dev/random/fenestrasX/fx_main.c
===================================================================
--- head/sys/dev/random/fenestrasX/fx_main.c
+++ head/sys/dev/random/fenestrasX/fx_main.c
@@ -0,0 +1,274 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This random algorithm is derived in part from the "Windows 10 random number
+ * generation infrastructure" whitepaper published by Niels Ferguson and
+ * Microsoft: https://aka.ms/win10rng
+ *
+ * It is also inspired by DJB's writing on buffered key-erasure PRNGs:
+ * https://blog.cr.yp.to/20170723-random.html
+ *
+ * The Windows 10 RNG bears some similarity to Fortuna, which Ferguson was also
+ * involved with.  Notable differences include:
+ *  - Extended to multi-CPU design
+ *  - Extended to pre-buffer some PRNG output
+ *  - Pool-based reseeding is solely time-based (rather than on-access w/
+ *    pacing)
+ *  - Extended to specify efficient userspace design
+ *  - Always-available design (requires the equivalent of loader(8) for all
+ *    boots; probably relatively easy given the limited platforms Windows 10
+ *    supports)
+ *
+ * Some aspects of the design document I found confusing and may have
+ * misinterpreted:
+ *  - Relationship between root PRNG seed version and periodic reseed pool use.
+ *    I interpreted these as separate sequences.  The root PRNG seed version is
+ *    bumped both by the periodic pool based reseed, and also special
+ *    conditions such as the first time an entropy source provides entropy.  I
+ *    don't think first-time entropy sources should cause us to skip an entropy
+ *    pool reseed.
+ *  - Initial seeding.  The paper is pretty terse on the subject.  My
+ *    interpretation of the document is that the Windows RNG infrastructure
+ *    relies on the loader(8)-provided material for initial seeding and either
+ *    ignores or doesn't start entropy sources until after that time.  So when
+ *    the paper says that first-time entropy source material "bypasses the
+ *    pools," the root PRNG state already has been keyed for the first time and
+ *    can generate 256 bits, mix it with the first-time entropy, and reseed
+ *    immediately.
+ *
+ * Some notable design choices in this implementation divergent from that
+ * specified in the document above:
+ *  - Blake2b instead of SHA-2 512 for entropy pooling
+ *  - Chacha20 instead of AES-CTR DRBG for PRF
+ *  - Initial seeding.  We treat the 0->1 seed version (brng_generation) edge
+ *    as the transition from blocked to unblocked.  That edge is also the first
+ *    time the key of the root BRNG's PRF is set.  We perform initial seeding
+ *    when the first request for entropy arrives.
+ *    • As a result: Entropy callbacks prior to this edge do not have a keyed
+ *      root PRNG, so bypassing the pools is kind of meaningless.  Instead,
+ *      they feed into pool0.  (They also do not set the root PRNG key or bump
+ *      the root PRNG seed version.)
+ *    • Entropy callbacks after the edge behave like the specification.
+ *    • All one-off sources are fed into pool0 and the result used to seed the
+ *      root BRNG during the initial seed step.
+ *    • All memory needed for initial seeding must be preallocated or static or
+ *      fit on the stack; random reads can occur in nonsleepable contexts and
+ *      we cannot allocate M_WAITOK.  (We also cannot fail to incorporate any
+ *      present one-off source, to the extent it is in the control of
+ *      software.)
+ * - Timer interval reseeding.  We also start the timer-based reseeding at
+ *   initial seed, but unlike the design, our initial seed is some time after
+ *   load (usually within the order of micro- or milliseconds due to
+ *   stack_guard on x86, but conceivably later if nothing reads from random for
+ *   a while).
+ *
+ * Not yet implemented, not in scope, or todo:
+ *  - arc4random(9) injection/replacement
+ *  - Userspace portions -- shared page, like timehands vdso?
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/domainset.h>
+#include <sys/fail.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/random.h>
+#include <sys/sdt.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_page.h>
+#include <vm/vm_phys.h>
+#include <vm/vm_pagequeue.h>
+
+#include <dev/random/randomdev.h>
+#include <dev/random/random_harvestq.h>
+#include <dev/random/uint128.h>
+
+#include <dev/random/fenestrasX/fx_brng.h>
+#include <dev/random/fenestrasX/fx_hash.h>
+#include <dev/random/fenestrasX/fx_pool.h>
+#include <dev/random/fenestrasX/fx_priv.h>
+#include <dev/random/fenestrasX/fx_rng.h>
+
+struct fxrng_buffered_rng fxrng_root;
+uint64_t __read_mostly fxrng_root_generation;
+DPCPU_DEFINE_STATIC(struct fxrng_buffered_rng *, fxrng_brng);
+
+/*
+ * Top-level read API from randomdev.  Responsible for NOWAIT-allocating
+ * per-cpu NUMA-local BRNGs, if needed and satisfiable; subroutines handle
+ * reseeding if the local BRNG is stale and rekeying when necessary.  In
+ * low-memory conditions when a local BRNG cannot be allocated, the request is
+ * simply forwarded to the root BRNG.
+ *
+ * It is a precondition is that the root BRNG initial seeding has completed and
+ * the root generation number >0.
+ */
+static void
+fxrng_alg_read(uint8_t *output, size_t nbytes)
+{
+	struct fxrng_buffered_rng **pcpu_brng_p, *rng, *tmp;
+	struct pcpu *pcpu;
+
+	pcpu = get_pcpu();
+
+	/*
+	 * The following statement directly accesses an implementation detail
+	 * of DPCPU, but the macros cater only to pinned threads; we want to
+	 * operate on our initial CPU, without pinning, *even if* we migrate.
+	 */
+	pcpu_brng_p = _DPCPU_PTR(pcpu->pc_dynamic, fxrng_brng);
+
+	rng = (void *)atomic_load_acq_ptr((uintptr_t *)pcpu_brng_p);
+
+	/*
+	 * Usually the pcpu BRNG has already been allocated, but we do it
+	 * on-demand and need to check first.  BRNGs are never deallocated and
+	 * are valid as soon as the pointer is initialized.
+	 */
+	if (__predict_false(rng == NULL)) {
+		uint8_t newkey[FX_CHACHA20_KEYSIZE];
+		struct domainset *ds;
+		int domain;
+
+		domain = pcpu->pc_domain;
+
+		/*
+		 * Allocate pcpu BRNGs off-domain on weird NUMA machines like
+		 * AMD Threadripper 2990WX, which has 2 NUMA nodes without
+		 * local memory controllers.  The PREF policy is automatically
+		 * converted to something appropriate when domains are empty.
+		 * (FIXED is not.)
+		 *
+		 * Otherwise, allocate strictly CPU-local memory.  The
+		 * rationale is this: if there is a memory shortage such that
+		 * PREF policy would fallback to RR, we have no business
+		 * wasting memory on a faster BRNG.  So, use a FIXED domainset
+		 * policy.  If we cannot allocate, that's fine!  We fall back
+		 * to invoking the root BRNG.
+		 */
+		if (VM_DOMAIN_EMPTY(domain))
+			ds = DOMAINSET_PREF(domain);
+		else
+			ds = DOMAINSET_FIXED(domain);
+
+		rng = malloc_domainset(sizeof(*rng), M_ENTROPY, ds,
+		    M_NOWAIT | M_ZERO);
+		if (rng == NULL) {
+			/* Relatively easy case: fall back to root BRNG. */
+			rng = &fxrng_root;
+			goto have_valid_rng;
+		}
+
+		fxrng_brng_init(rng);
+
+		/*
+		 * The root BRNG is always up and available.  Requests are
+		 * always satisfiable.  This is a design invariant.
+		 */
+		ASSERT_DEBUG(atomic_load_acq_64(&fxrng_root_generation) != 0,
+		    "%s: attempting to seed child BRNG when root hasn't "
+		    "been initialized yet.", __func__);
+
+		FXRNG_BRNG_LOCK(&fxrng_root);
+#ifdef WITNESS
+		/* Establish lock order root->pcpu for WITNESS. */
+		FXRNG_BRNG_LOCK(rng);
+		FXRNG_BRNG_UNLOCK(rng);
+#endif
+		fxrng_brng_produce_seed_data_internal(&fxrng_root, newkey,
+		    sizeof(newkey), &rng->brng_generation);
+		FXRNG_BRNG_ASSERT_NOT(&fxrng_root);
+
+		fxrng_rng_setkey(&rng->brng_rng, newkey, sizeof(newkey));
+		explicit_bzero(newkey, sizeof(newkey));
+
+		/*
+		 * We have a valid RNG.  Try to install it, or grab the other
+		 * one if we lost the race.
+		 */
+		tmp = NULL;
+		while (tmp == NULL)
+			if (atomic_fcmpset_ptr((uintptr_t *)pcpu_brng_p,
+			    (uintptr_t *)&tmp, (uintptr_t)rng))
+				goto have_valid_rng;
+
+		/*
+		 * We lost the race.  There's nothing sensitive about
+		 * our BRNG's PRF state, because it will never be used
+		 * for anything and the key doesn't expose any
+		 * information about the parent (root) generator's
+		 * state -- it has already rekeyed.  The generation
+		 * number is public, and a zero counter isn't sensitive.
+		 */
+		free(rng, M_ENTROPY);
+		/*
+		 * Use the winner's PCPU BRNG.
+		 */
+		rng = tmp;
+	}
+
+have_valid_rng:
+	/* At this point we have a valid, initialized and seeded rng pointer. */
+	FXRNG_BRNG_LOCK(rng);
+	fxrng_brng_read(rng, output, nbytes);
+	FXRNG_BRNG_ASSERT_NOT(rng);
+}
+
+static void
+fxrng_init_alg(void *dummy __unused)
+{
+	DPCPU_ZERO(fxrng_brng);
+	fxrng_brng_init(&fxrng_root);
+	fxrng_pools_init();
+}
+SYSINIT(random_alg, SI_SUB_RANDOM, SI_ORDER_SECOND, fxrng_init_alg, NULL);
+
+/*
+ * Public visibility struct referenced directly by other parts of randomdev.
+ */
+const struct random_algorithm random_alg_context = {
+	.ra_ident = "fenestrasX",
+	.ra_pre_read = (void (*)(void))nullop,
+	.ra_read = fxrng_alg_read,
+	.ra_seeded = fxrng_alg_seeded,
+	.ra_event_processor = fxrng_event_processor,
+	.ra_poolcount = FXRNG_NPOOLS,
+};
Index: head/sys/dev/random/fenestrasX/fx_pool.h
===================================================================
--- head/sys/dev/random/fenestrasX/fx_pool.h
+++ head/sys/dev/random/fenestrasX/fx_pool.h
@@ -0,0 +1,36 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#pragma once
+
+#define	FXRNG_NPOOLS	8
+
+bool fxrng_alg_seeded(void);
+struct harvest_event;
+void fxrng_event_processor(struct harvest_event *);
+void fxrng_pools_init(void);
Index: head/sys/dev/random/fenestrasX/fx_pool.c
===================================================================
--- head/sys/dev/random/fenestrasX/fx_pool.c
+++ head/sys/dev/random/fenestrasX/fx_pool.c
@@ -0,0 +1,615 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/domainset.h>
+#include <sys/fail.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
+#include <sys/random.h>
+#include <sys/sdt.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/taskqueue.h>
+
+#include <machine/atomic.h>
+#include <machine/smp.h>
+
+#include <dev/random/randomdev.h>
+#include <dev/random/random_harvestq.h>
+
+#include <dev/random/fenestrasX/fx_brng.h>
+#include <dev/random/fenestrasX/fx_hash.h>
+#include <dev/random/fenestrasX/fx_pool.h>
+#include <dev/random/fenestrasX/fx_priv.h>
+
+/*
+ * Timer-based reseed interval growth factor and limit in seconds. (§ 3.2)
+ */
+#define	FXENT_RESSED_INTVL_GFACT	3
+#define	FXENT_RESEED_INTVL_MAX		3600
+
+/*
+ * Pool reseed schedule.  Initially, only pool 0 is active.  Until the timer
+ * interval reaches INTVL_MAX, only pool 0 is used.
+ *
+ * After reaching INTVL_MAX, pool k is either activated (if inactive) or used
+ * (if active) every 3^k timer reseeds.  (§ 3.3)
+ *
+ * (Entropy harvesting only round robins across active pools.)
+ */
+#define	FXENT_RESEED_BASE		3
+
+/*
+ * Number of bytes from high quality sources to allocate to pool 0 before
+ * normal round-robin allocation after each timer reseed. (§ 3.4)
+ */
+#define	FXENT_HI_SRC_POOL0_BYTES	32
+
+/*
+ * § 3.1
+ *
+ * Low sources provide unconditioned entropy, such as mouse movements; high
+ * sources are assumed to provide high-quality random bytes.  Pull sources are
+ * those which can be polled, i.e., anything randomdev calls a "random_source."
+ *
+ * In the whitepaper, low sources are pull.  For us, at least in the existing
+ * design, low-quality sources push into some global ring buffer and then get
+ * forwarded into the RNG by a thread that continually polls.  Presumably their
+ * design batches low entopy signals in some way (SHA512?) and only requests
+ * them dynamically on reseed.  I'm not sure what the benefit is vs feeding
+ * into the pools directly.
+ */
+enum fxrng_ent_access_cls {
+	FXRNG_PUSH,
+	FXRNG_PULL,
+};
+enum fxrng_ent_source_cls {
+	FXRNG_HI,
+	FXRNG_LO,
+	FXRNG_GARBAGE,
+};
+struct fxrng_ent_cls {
+	enum fxrng_ent_access_cls	entc_axx_cls;
+	enum fxrng_ent_source_cls	entc_src_cls;
+};
+
+static const struct fxrng_ent_cls fxrng_hi_pull = {
+	.entc_axx_cls = FXRNG_PULL,
+	.entc_src_cls = FXRNG_HI,
+};
+static const struct fxrng_ent_cls fxrng_hi_push = {
+	.entc_axx_cls = FXRNG_PUSH,
+	.entc_src_cls = FXRNG_HI,
+};
+static const struct fxrng_ent_cls fxrng_lo_push = {
+	.entc_axx_cls = FXRNG_PUSH,
+	.entc_src_cls = FXRNG_LO,
+};
+static const struct fxrng_ent_cls fxrng_garbage = {
+	.entc_axx_cls = FXRNG_PUSH,
+	.entc_src_cls = FXRNG_GARBAGE,
+};
+
+/*
+ * This table is a mapping of randomdev's current source abstractions to the
+ * designations above; at some point, if the design seems reasonable, it would
+ * make more sense to pull this up into the abstraction layer instead.
+ */
+static const struct fxrng_ent_char {
+	const struct fxrng_ent_cls	*entc_cls;
+} fxrng_ent_char[ENTROPYSOURCE] = {
+	[RANDOM_CACHED] = {
+		.entc_cls = &fxrng_hi_push,
+	},
+	[RANDOM_ATTACH] = {
+		.entc_cls = &fxrng_lo_push,
+	},
+	[RANDOM_KEYBOARD] = {
+		.entc_cls = &fxrng_lo_push,
+	},
+	[RANDOM_MOUSE] = {
+		.entc_cls = &fxrng_lo_push,
+	},
+	[RANDOM_NET_TUN] = {
+		.entc_cls = &fxrng_lo_push,
+	},
+	[RANDOM_NET_ETHER] = {
+		.entc_cls = &fxrng_lo_push,
+	},
+	[RANDOM_NET_NG] = {
+		.entc_cls = &fxrng_lo_push,
+	},
+	[RANDOM_INTERRUPT] = {
+		.entc_cls = &fxrng_lo_push,
+	},
+	[RANDOM_SWI] = {
+		.entc_cls = &fxrng_lo_push,
+	},
+	[RANDOM_FS_ATIME] = {
+		.entc_cls = &fxrng_lo_push,
+	},
+	[RANDOM_UMA] = {
+		.entc_cls = &fxrng_lo_push,
+	},
+	[RANDOM_PURE_OCTEON] = {
+		.entc_cls = &fxrng_hi_push,	/* Could be made pull. */
+	},
+	[RANDOM_PURE_SAFE] = {
+		.entc_cls = &fxrng_hi_push,
+	},
+	[RANDOM_PURE_GLXSB] = {
+		.entc_cls = &fxrng_hi_push,
+	},
+	[RANDOM_PURE_HIFN] = {
+		.entc_cls = &fxrng_hi_push,
+	},
+	[RANDOM_PURE_RDRAND] = {
+		.entc_cls = &fxrng_hi_pull,
+	},
+	[RANDOM_PURE_NEHEMIAH] = {
+		.entc_cls = &fxrng_hi_pull,
+	},
+	[RANDOM_PURE_RNDTEST] = {
+		.entc_cls = &fxrng_garbage,
+	},
+	[RANDOM_PURE_VIRTIO] = {
+		.entc_cls = &fxrng_hi_pull,
+	},
+	[RANDOM_PURE_BROADCOM] = {
+		.entc_cls = &fxrng_hi_push,
+	},
+	[RANDOM_PURE_CCP] = {
+		.entc_cls = &fxrng_hi_pull,
+	},
+	[RANDOM_PURE_DARN] = {
+		.entc_cls = &fxrng_hi_pull,
+	},
+	[RANDOM_PURE_TPM] = {
+		.entc_cls = &fxrng_hi_push,
+	},
+	[RANDOM_PURE_VMGENID] = {
+		.entc_cls = &fxrng_hi_push,
+	},
+};
+
+/* Useful for single-bit-per-source state. */
+BITSET_DEFINE(fxrng_bits, ENTROPYSOURCE);
+
+/* XXX Borrowed from not-yet-committed D22702. */
+#ifndef BIT_TEST_SET_ATOMIC_ACQ
+#define	BIT_TEST_SET_ATOMIC_ACQ(_s, n, p)	\
+	(atomic_testandset_acq_long(		\
+	    &(p)->__bits[__bitset_word((_s), (n))], (n)) != 0)
+#endif
+#define	FXENT_TEST_SET_ATOMIC_ACQ(n, p) \
+	BIT_TEST_SET_ATOMIC_ACQ(ENTROPYSOURCE, n, p)
+
+/* For special behavior on first-time entropy sources. (§ 3.1) */
+static struct fxrng_bits __read_mostly fxrng_seen;
+
+/* For special behavior for high-entropy sources after a reseed. (§ 3.4) */
+_Static_assert(FXENT_HI_SRC_POOL0_BYTES <= UINT8_MAX, "");
+static uint8_t __read_mostly fxrng_reseed_seen[ENTROPYSOURCE];
+
+/* Entropy pools.  Lock order is ENT -> RNG(root) -> RNG(leaf). */
+static struct mtx fxent_pool_lk;
+MTX_SYSINIT(fx_pool, &fxent_pool_lk, "fx entropy pool lock", MTX_DEF);
+#define	FXENT_LOCK()		mtx_lock(&fxent_pool_lk)
+#define	FXENT_UNLOCK()		mtx_unlock(&fxent_pool_lk)
+#define	FXENT_ASSERT(rng)	mtx_assert(&fxent_pool_lk, MA_OWNED)
+#define	FXENT_ASSERT_NOT(rng)	mtx_assert(&fxent_pool_lk, MA_NOTOWNED)
+static struct fxrng_hash fxent_pool[FXRNG_NPOOLS];
+static unsigned __read_mostly fxent_nactpools = 1;
+static struct timeout_task fxent_reseed_timer;
+static int __read_mostly fxent_timer_ready;
+
+/*
+ * Track number of bytes of entropy harvested from high-quality sources prior
+ * to initial keying.  The idea is to collect more jitter entropy when fewer
+ * high-quality bytes were available and less if we had other good sources.  We
+ * want to provide always-on availability but don't necessarily have *any*
+ * great sources on some platforms.
+ *
+ * Like fxrng_ent_char: at some point, if the design seems reasonable, it would
+ * make more sense to pull this up into the abstraction layer instead.
+ *
+ * Jitter entropy is unimplemented for now.
+ */
+static unsigned long fxrng_preseed_ent;
+
+void
+fxrng_pools_init(void)
+{
+	size_t i;
+
+	for (i = 0; i < nitems(fxent_pool); i++)
+		fxrng_hash_init(&fxent_pool[i]);
+}
+
+static inline bool
+fxrng_hi_source(enum random_entropy_source src)
+{
+	return (fxrng_ent_char[src].entc_cls->entc_src_cls == FXRNG_HI);
+}
+
+/*
+ * A racy check that this high-entropy source's event should contribute to
+ * pool0 on the basis of per-source byte count.  The check is racy for two
+ * reasons:
+ *   - Performance: The vast majority of the time, we've already taken 32 bytes
+ *     from any present high quality source and the racy check lets us avoid
+ *     dirtying the cache for the global array.
+ *   - Correctness: It's fine that the check is racy.  The failure modes are:
+ *     • False positive: We will detect when we take the lock.
+ *     • False negative: We still collect the entropy; it just won't be
+ *       preferentially placed in pool0 in this case.
+ */
+static inline bool
+fxrng_hi_pool0_eligible_racy(enum random_entropy_source src)
+{
+	return (atomic_load_acq_8(&fxrng_reseed_seen[src]) <
+	    FXENT_HI_SRC_POOL0_BYTES);
+}
+
+/*
+ * Top level entropy processing API from randomdev.
+ *
+ * Invoked by the core randomdev subsystem both for preload entropy, "push"
+ * sources (like interrupts, keyboard, etc) and pull sources (RDRAND, etc).
+ */
+void
+fxrng_event_processor(struct harvest_event *event)
+{
+	enum random_entropy_source src;
+	unsigned pool;
+	bool first_time, first_32;
+
+	src = event->he_source;
+
+	ASSERT_DEBUG(event->he_size <= sizeof(event->he_entropy),
+	    "%s: he_size: %u > sizeof(he_entropy): %zu", __func__,
+	    (unsigned)event->he_size, sizeof(event->he_entropy));
+
+	/*
+	 * Zero bytes of source entropy doesn't count as observing this source
+	 * for the first time.  We still harvest the counter entropy.
+	 */
+	first_time = event->he_size > 0 &&
+	    !FXENT_TEST_SET_ATOMIC_ACQ(src, &fxrng_seen);
+	if (__predict_false(first_time)) {
+		/*
+		 * "The first time [any source] provides entropy, it is used to
+		 * directly reseed the root PRNG.  The entropy pools are
+		 * bypassed." (§ 3.1)
+		 *
+		 * Unlike Windows, we cannot rely on loader(8) seed material
+		 * being present, so we perform initial keying in the kernel.
+		 * We use brng_generation 0 to represent an unkeyed state.
+		 *
+		 * Prior to initial keying, it doesn't make sense to try to mix
+		 * the entropy directly with the root PRNG state, as the root
+		 * PRNG is unkeyed.  Instead, we collect pre-keying dynamic
+		 * entropy in pool0 and do not bump the root PRNG seed version
+		 * or set its key.  Initial keying will incorporate pool0 and
+		 * bump the brng_generation (seed version).
+		 *
+		 * After initial keying, we do directly mix in first-time
+		 * entropy sources.  We use the root BRNG to generate 32 bytes
+		 * and use fxrng_hash to mix it with the new entropy source and
+		 * re-key with the first 256 bits of hash output.
+		 */
+		FXENT_LOCK();
+		FXRNG_BRNG_LOCK(&fxrng_root);
+		if (__predict_true(fxrng_root.brng_generation > 0)) {
+			/* Bypass the pools: */
+			FXENT_UNLOCK();
+			fxrng_brng_src_reseed(event);
+			FXRNG_BRNG_ASSERT_NOT(&fxrng_root);
+			return;
+		}
+
+		/*
+		 * Keying the root PRNG requires both FXENT_LOCK and the PRNG's
+		 * lock, so we only need to hold on to the pool lock to prevent
+		 * initial keying without this entropy.
+		 */
+		FXRNG_BRNG_UNLOCK(&fxrng_root);
+
+		/* Root PRNG hasn't been keyed yet, just accumulate event. */
+		fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
+		    sizeof(event->he_somecounter));
+		fxrng_hash_update(&fxent_pool[0], event->he_entropy,
+		    event->he_size);
+
+		if (fxrng_hi_source(src)) {
+			/* Prevent overflow. */
+			if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
+				fxrng_preseed_ent += event->he_size;
+		}
+		FXENT_UNLOCK();
+		return;
+	}
+	/* !first_time */
+
+	/*
+	 * "The first 32 bytes produced by a high entropy source after a reseed
+	 * from the pools is always put in pool 0." (§ 3.4)
+	 *
+	 * The first-32-byte tracking data in fxrng_reseed_seen is reset in
+	 * fxent_timer_reseed_npools() below.
+	 */
+	first_32 = event->he_size > 0 &&
+	    fxrng_hi_source(src) &&
+	    atomic_load_acq_int(&fxent_nactpools) > 1 &&
+	    fxrng_hi_pool0_eligible_racy(src);
+	if (__predict_false(first_32)) {
+		unsigned rem, seen;
+
+		FXENT_LOCK();
+		seen = fxrng_reseed_seen[src];
+		if (seen == FXENT_HI_SRC_POOL0_BYTES)
+			goto round_robin;
+
+		rem = FXENT_HI_SRC_POOL0_BYTES - seen;
+		rem = MIN(rem, event->he_size);
+
+		fxrng_reseed_seen[src] = seen + rem;
+
+		/*
+		 * We put 'rem' bytes in pool0, and any remaining bytes are
+		 * round-robin'd across other pools.
+		 */
+		fxrng_hash_update(&fxent_pool[0],
+		    ((uint8_t *)event->he_entropy) + event->he_size - rem,
+		    rem);
+		if (rem == event->he_size) {
+			fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
+			    sizeof(event->he_somecounter));
+			FXENT_UNLOCK();
+			return;
+		}
+
+		/*
+		 * If fewer bytes were needed than this even provied, We only
+		 * take the last rem bytes of the entropy buffer and leave the
+		 * timecounter to be round-robin'd with the remaining entropy.
+		 */
+		event->he_size -= rem;
+		goto round_robin;
+	}
+	/* !first_32 */
+
+	FXENT_LOCK();
+
+round_robin:
+	FXENT_ASSERT();
+	pool = event->he_destination % fxent_nactpools;
+	fxrng_hash_update(&fxent_pool[pool], event->he_entropy,
+	    event->he_size);
+	fxrng_hash_update(&fxent_pool[pool], &event->he_somecounter,
+	    sizeof(event->he_somecounter));
+
+	if (__predict_false(fxrng_hi_source(src) &&
+	    atomic_load_acq_64(&fxrng_root_generation) == 0)) {
+		/* Prevent overflow. */
+		if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
+			fxrng_preseed_ent += event->he_size;
+	}
+	FXENT_UNLOCK();
+}
+
+/*
+ * Top level "seeded" API/signal from randomdev.
+ *
+ * This is our warning that a request is coming: we need to be seeded.  In
+ * fenestrasX, a request for random bytes _never_ fails.  "We (ed: ditto) have
+ * observed that there are many callers that never check for the error code,
+ * even if they are generating cryptographic key material." (§ 1.6)
+ *
+ * If we returned 'false', both read_random(9) and chacha20_randomstir()
+ * (arc4random(9)) will blindly charge on with something almost certainly worse
+ * than what we've got, or are able to get quickly enough.
+ */
+bool
+fxrng_alg_seeded(void)
+{
+	uint8_t hash[FXRNG_HASH_SZ];
+	sbintime_t sbt;
+
+	/* The vast majority of the time, we expect to already be seeded. */
+	if (__predict_true(atomic_load_acq_64(&fxrng_root_generation) != 0))
+		return (true);
+
+	/*
+	 * Take the lock and recheck; only one thread needs to do the initial
+	 * seeding work.
+	 */
+	FXENT_LOCK();
+	if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
+		FXENT_UNLOCK();
+		return (true);
+	}
+	/* XXX Any one-off initial seeding goes here. */
+
+	fxrng_hash_finish(&fxent_pool[0], hash, sizeof(hash));
+	fxrng_hash_init(&fxent_pool[0]);
+
+	fxrng_brng_reseed(hash, sizeof(hash));
+	FXENT_UNLOCK();
+
+	randomdev_unblock();
+	explicit_bzero(hash, sizeof(hash));
+
+	/*
+	 * This may be called too early for taskqueue_thread to be initialized.
+	 * fxent_pool_timer_init will detect if we've already unblocked and
+	 * queue the first timer reseed at that point.
+	 */
+	if (atomic_load_acq_int(&fxent_timer_ready) != 0) {
+		sbt = SBT_1S;
+		taskqueue_enqueue_timeout_sbt(taskqueue_thread,
+		    &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
+	}
+	return (true);
+}
+
+/*
+ * Timer-based reseeds and pool expansion.
+ */
+static void
+fxent_timer_reseed_npools(unsigned n)
+{
+	/*
+	 * 64 * 8 => moderately large 512 bytes.  Could be static, as we are
+	 * only used in a static context.  On the other hand, this is in
+	 * threadqueue TASK context and we're likely nearly at top of stack
+	 * already.
+	 */
+	uint8_t hash[FXRNG_HASH_SZ * FXRNG_NPOOLS];
+	unsigned i;
+
+	ASSERT_DEBUG(n > 0 && n <= FXRNG_NPOOLS, "n:%u", n);
+
+	FXENT_ASSERT();
+	/*
+	 * Collect entropy from pools 0..n-1 by concatenating the output hashes
+	 * and then feeding them into fxrng_brng_reseed, which will hash the
+	 * aggregate together with the current root PRNG keystate to produce a
+	 * new key.  It will also bump the global generation counter
+	 * appropriately.
+	 */
+	for (i = 0; i < n; i++) {
+		fxrng_hash_finish(&fxent_pool[i], hash + i * FXRNG_HASH_SZ,
+		    FXRNG_HASH_SZ);
+		fxrng_hash_init(&fxent_pool[i]);
+	}
+
+	fxrng_brng_reseed(hash, n * FXRNG_HASH_SZ);
+	explicit_bzero(hash, n * FXRNG_HASH_SZ);
+
+	/*
+	 * "The first 32 bytes produced by a high entropy source after a reseed
+	 * from the pools is always put in pool 0." (§ 3.4)
+	 *
+	 * So here we reset the tracking (somewhat naively given the majority
+	 * of sources on most machines are not what we consider "high", but at
+	 * 32 bytes it's smaller than a cache line), so the next 32 bytes are
+	 * prioritized into pool0.
+	 *
+	 * See corresponding use of fxrng_reseed_seen in fxrng_event_processor.
+	 */
+	memset(fxrng_reseed_seen, 0, sizeof(fxrng_reseed_seen));
+	FXENT_ASSERT();
+}
+
+static void
+fxent_timer_reseed(void *ctx __unused, int pending __unused)
+{
+	static unsigned reseed_intvl_sec = 1;
+	/* Only reseeds after FXENT_RESEED_INTVL_MAX is achieved. */
+	static uint64_t reseed_number = 1;
+
+	unsigned next_ival, i, k;
+	sbintime_t sbt;
+
+	if (reseed_intvl_sec < FXENT_RESEED_INTVL_MAX) {
+		next_ival = FXENT_RESSED_INTVL_GFACT * reseed_intvl_sec;
+		if (next_ival > FXENT_RESEED_INTVL_MAX)
+			next_ival = FXENT_RESEED_INTVL_MAX;
+		FXENT_LOCK();
+		fxent_timer_reseed_npools(1);
+		FXENT_UNLOCK();
+	} else {
+		/*
+		 * The creation of entropy pools beyond 0 is enabled when the
+		 * reseed interval hits the maximum. (§ 3.3)
+		 */
+		next_ival = reseed_intvl_sec;
+
+		/*
+		 * Pool 0 is used every reseed; pool 1..0 every 3rd reseed; and in
+		 * general, pool n..0 every 3^n reseeds.
+		 */
+		k = reseed_number;
+		reseed_number++;
+
+		/* Count how many pools, from [0, i), to use for reseed. */
+		for (i = 1; i < MIN(fxent_nactpools + 1, FXRNG_NPOOLS); i++) {
+			if ((k % FXENT_RESEED_BASE) != 0)
+				break;
+			k /= FXENT_RESEED_BASE;
+		}
+
+		/*
+		 * If we haven't activated pool i yet, activate it and only
+		 * reseed from [0, i-1).  (§ 3.3)
+		 */
+		FXENT_LOCK();
+		if (i == fxent_nactpools + 1) {
+			fxent_timer_reseed_npools(fxent_nactpools);
+			fxent_nactpools++;
+		} else {
+			/* Just reseed from [0, i). */
+			fxent_timer_reseed_npools(i);
+		}
+		FXENT_UNLOCK();
+	}
+
+	/* Schedule the next reseed. */
+	sbt = next_ival * SBT_1S;
+	taskqueue_enqueue_timeout_sbt(taskqueue_thread, &fxent_reseed_timer,
+	    -sbt, (sbt / 3), C_PREL(2));
+
+	reseed_intvl_sec = next_ival;
+}
+
+static void
+fxent_pool_timer_init(void *dummy __unused)
+{
+	sbintime_t sbt;
+
+	TIMEOUT_TASK_INIT(taskqueue_thread, &fxent_reseed_timer, 0,
+	    fxent_timer_reseed, NULL);
+
+	if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
+		sbt = SBT_1S;
+		taskqueue_enqueue_timeout_sbt(taskqueue_thread,
+		    &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
+	}
+	atomic_store_rel_int(&fxent_timer_ready, 1);
+}
+/* After taskqueue_thread is initialized in SI_SUB_TASKQ:SI_ORDER_SECOND. */
+SYSINIT(fxent_pool_timer_init, SI_SUB_TASKQ, SI_ORDER_ANY,
+    fxent_pool_timer_init, NULL);
Index: head/sys/dev/random/fenestrasX/fx_priv.h
===================================================================
--- head/sys/dev/random/fenestrasX/fx_priv.h
+++ head/sys/dev/random/fenestrasX/fx_priv.h
@@ -0,0 +1,49 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#pragma once
+
+#define	ASSERT(x, fmt, ...) do {					\
+	if (__predict_true(x))					\
+		break;							\
+	panic("%s:%d: Assertion '" #x "' in function %s failed: " fmt, 	\
+	    __FILE__, __LINE__, __func__, ## __VA_ARGS__);		\
+} while (false)
+#ifdef INVARIANTS
+#define	ASSERT_DEBUG(x, fmt, ...) do {					\
+	if (__predict_true(x))					\
+		break;							\
+	panic("%s:%d: Assertion '" #x "' in function %s failed: " fmt, 	\
+	    __FILE__, __LINE__, __func__, ## __VA_ARGS__);		\
+} while (false)
+#else
+#define	ASSERT_DEBUG(...)
+#endif
+
+extern struct fxrng_buffered_rng fxrng_root;
+extern uint64_t __read_mostly fxrng_root_generation;
Index: head/sys/dev/random/fenestrasX/fx_rng.h
===================================================================
--- head/sys/dev/random/fenestrasX/fx_rng.h
+++ head/sys/dev/random/fenestrasX/fx_rng.h
@@ -0,0 +1,72 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#pragma once
+
+#include <crypto/chacha20/chacha.h>
+#include <dev/random/fenestrasX/fx_priv.h>
+
+#define	FX_CHACHA20_KEYSIZE	32
+
+struct fxrng_basic_rng {
+	struct mtx		rng_lk;
+#define	FXRNG_RNG_LOCK(rng)	mtx_lock(&(rng)->rng_lk)
+#define	FXRNG_RNG_UNLOCK(rng)	mtx_unlock(&(rng)->rng_lk)
+#define	FXRNG_RNG_ASSERT(rng)	mtx_assert(&(rng)->rng_lk, MA_OWNED)
+#define	FXRNG_RNG_ASSERT_NOT(rng)	mtx_assert(&(rng)->rng_lk, MA_NOTOWNED)
+	/* CTR-mode cipher state, including 128-bit embedded counter. */
+	struct chacha_ctx	rng_prf;
+};
+
+/*
+ * Initialize a basic rng instance (half of a buffered FX BRNG).
+ * Memory should be zero initialized before this routine.
+ */
+static inline void
+fxrng_rng_init(struct fxrng_basic_rng *rng, bool is_root_rng)
+{
+	if (is_root_rng)
+		mtx_init(&rng->rng_lk, "fx root brng lk", NULL, MTX_DEF);
+	else
+		mtx_init(&rng->rng_lk, "fx pcpu brng lk", NULL, MTX_DEF);
+}
+
+static inline void
+fxrng_rng_setkey(struct fxrng_basic_rng *rng, const void *newkey, size_t len)
+{
+	ASSERT_DEBUG(len == FX_CHACHA20_KEYSIZE, "chacha20 uses 256 bit keys");
+	chacha_keysetup(&rng->rng_prf, newkey, len * 8);
+}
+
+void fxrng_rng_genrandom_internal(struct fxrng_basic_rng *, void *, size_t,
+    bool return_unlocked);
+
+void fxrng_rng_reseed(struct fxrng_basic_rng *, bool seeded, const void *,
+    size_t);
+void fxrng_rng_src_reseed(struct fxrng_basic_rng *,
+    const struct harvest_event *);
Index: head/sys/dev/random/fenestrasX/fx_rng.c
===================================================================
--- head/sys/dev/random/fenestrasX/fx_rng.c
+++ head/sys/dev/random/fenestrasX/fx_rng.c
@@ -0,0 +1,249 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/fail.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/random.h>
+#include <sys/sdt.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+#include <machine/stdarg.h>
+
+#define CHACHA_EMBED
+#define KEYSTREAM_ONLY
+#define CHACHA_NONCE0_CTR128
+#include <crypto/chacha20/chacha.h>
+#include <crypto/rijndael/rijndael-api-fst.h>
+#include <crypto/sha2/sha256.h>
+
+#include <dev/random/hash.h>
+#include <dev/random/randomdev.h>
+#include <dev/random/random_harvestq.h>
+#include <dev/random/uint128.h>
+
+#include <dev/random/fenestrasX/fx_hash.h>
+#include <dev/random/fenestrasX/fx_priv.h>
+#include <dev/random/fenestrasX/fx_rng.h>
+
+_Static_assert(FX_CHACHA20_KEYSIZE == RANDOM_KEYSIZE, "");
+
+#include <crypto/chacha20/chacha.c>
+
+static void
+fxrng_rng_keystream_internal(struct chacha_ctx *prf, void *buf, size_t nbytes)
+{
+	size_t chunklen;
+
+	while (nbytes > 0) {
+		chunklen = MIN(nbytes,
+		    rounddown((size_t)UINT32_MAX, CHACHA_BLOCKLEN));
+
+		chacha_encrypt_bytes(prf, NULL, buf, chunklen);
+		buf = (uint8_t *)buf + chunklen;
+		nbytes -= chunklen;
+	}
+}
+
+/*
+ * This subroutine pulls the counter out of Chacha, which for whatever reason
+ * always encodes and decodes counters in a little endian format, and adds
+ * 'addend' to it, saving the result in Chacha.
+ */
+static void
+fxrng_chacha_nonce_add64(struct chacha_ctx *ctx, uint64_t addend)
+{
+	uint128_t ctr;	/* Native-endian. */
+#if BYTE_ORDER == BIG_ENDIAN
+	uint128_t lectr;
+
+	chacha_ctrsave(ctx, (void *)&lectr);
+	ctr = le128dec(&lectr);
+#else
+	chacha_ctrsave(ctx, (void *)&ctr);
+#endif
+
+	uint128_add64(&ctr, addend);
+
+	/* chacha_ivsetup() does not modify the key, and we rely on that. */
+#if BYTE_ORDER == BIG_ENDIAN
+	le128enc(&lectr, ctr);
+	chacha_ivsetup(ctx, NULL, (const void *)&lectr);
+	explicit_bzero(&lectr, sizeof(lectr));
+#else
+	chacha_ivsetup(ctx, NULL, (const void *)&ctr);
+#endif
+	explicit_bzero(&ctr, sizeof(ctr));
+}
+
+/*
+ * Generate from the unbuffered source PRNG.
+ *
+ * Handles fast key erasure (rekeys the PRF with a generated key under lock).
+ *
+ * RNG lock is required on entry.  If return_unlocked is true, RNG lock will
+ * be dropped on return.
+ */
+void
+fxrng_rng_genrandom_internal(struct fxrng_basic_rng *rng, void *buf,
+    size_t nbytes, bool return_unlocked)
+{
+	struct chacha_ctx ctx_copy, *p_ctx;
+	uint8_t newkey[FX_CHACHA20_KEYSIZE];
+	size_t blockcount;
+
+	FXRNG_RNG_ASSERT(rng);
+
+	/* Save off the initial output of the generator for rekeying. */
+	fxrng_rng_keystream_internal(&rng->rng_prf, newkey, sizeof(newkey));
+
+	if (return_unlocked) {
+		memcpy(&ctx_copy, &rng->rng_prf, sizeof(ctx_copy));
+		p_ctx = &ctx_copy;
+
+		/*
+		 * Forward the Chacha counter state over the blocks we promise
+		 * to generate for the caller without the lock.
+		 */
+		blockcount = howmany(nbytes, CHACHA_BLOCKLEN);
+		fxrng_chacha_nonce_add64(&rng->rng_prf, blockcount);
+
+		/* Re-key before dropping the lock. */
+		chacha_keysetup(&rng->rng_prf, newkey, sizeof(newkey) * 8);
+		explicit_bzero(newkey, sizeof(newkey));
+
+		FXRNG_RNG_UNLOCK(rng);
+	} else {
+		p_ctx = &rng->rng_prf;
+	}
+
+	fxrng_rng_keystream_internal(p_ctx, buf, nbytes);
+
+	if (return_unlocked) {
+		explicit_bzero(&ctx_copy, sizeof(ctx_copy));
+		FXRNG_RNG_ASSERT_NOT(rng);
+	} else {
+		/* Re-key before exit. */
+		chacha_keysetup(&rng->rng_prf, newkey, sizeof(newkey) * 8);
+		explicit_bzero(newkey, sizeof(newkey));
+		FXRNG_RNG_ASSERT(rng);
+	}
+}
+
+/*
+ * Helper to reseed the root RNG, incorporating the existing RNG state.
+ *
+ * The root RNG is locked on entry and locked on return.
+ */
+static void
+fxrng_rng_reseed_internal(struct fxrng_basic_rng *rng, bool seeded,
+    const void *src, size_t sz, ...)
+{
+	union {
+		uint8_t root_state[FX_CHACHA20_KEYSIZE];
+		uint8_t hash_out[FXRNG_HASH_SZ];
+	} u;
+	struct fxrng_hash mix;
+	va_list ap;
+
+	_Static_assert(FX_CHACHA20_KEYSIZE <= FXRNG_HASH_SZ, "");
+
+	FXRNG_RNG_ASSERT(rng);
+
+	fxrng_hash_init(&mix);
+	if (seeded) {
+		fxrng_rng_keystream_internal(&rng->rng_prf, u.root_state,
+		    sizeof(u.root_state));
+		fxrng_hash_update(&mix, u.root_state, sizeof(u.root_state));
+	}
+	fxrng_hash_update(&mix, src, sz);
+
+	va_start(ap, sz);
+	while (true) {
+		src = va_arg(ap, const void *);
+		if (src == NULL)
+			break;
+		sz = va_arg(ap, size_t);
+		fxrng_hash_update(&mix, src, sz);
+	}
+	va_end(ap);
+
+	fxrng_hash_finish(&mix, u.hash_out, sizeof(u.hash_out));
+
+	/*
+	 * Take the first keysize (32) bytes of our digest (64 bytes).  It is
+	 * also possible to just have Blake2 emit fewer bytes, but our wrapper
+	 * API doesn't provide that functionality and there isn't anything
+	 * obviously wrong with emitting more hash bytes.
+	 *
+	 * keysetup does not reset the embedded counter, and we rely on that
+	 * property.
+	 */
+	chacha_keysetup(&rng->rng_prf, u.hash_out, FX_CHACHA20_KEYSIZE * 8);
+
+	/* 'mix' zeroed by fxrng_hash_finish(). */
+	explicit_bzero(u.hash_out, sizeof(u.hash_out));
+
+	FXRNG_RNG_ASSERT(rng);
+}
+
+/*
+ * Directly reseed the root RNG from a first-time entropy source,
+ * incorporating the existing RNG state, called by fxrng_brng_src_reseed.
+ *
+ * The root RNG is locked on entry and locked on return.
+ */
+void
+fxrng_rng_src_reseed(struct fxrng_basic_rng *rng,
+    const struct harvest_event *event)
+{
+	fxrng_rng_reseed_internal(rng, true, &event->he_somecounter,
+	    sizeof(event->he_somecounter), (const void *)event->he_entropy,
+	    (size_t)event->he_size, NULL);
+}
+
+/*
+ * Reseed the root RNG from pooled entropy, incorporating the existing RNG
+ * state, called by fxrng_brng_reseed.
+ *
+ * The root RNG is locked on entry and locked on return.
+ */
+void
+fxrng_rng_reseed(struct fxrng_basic_rng *rng, bool seeded, const void *entr,
+    size_t sz)
+{
+	fxrng_rng_reseed_internal(rng, seeded, entr, sz, NULL);
+}