diff --git a/sys/crypto/ccp/ccp.c b/sys/crypto/ccp/ccp.c index 7db9a27ab059..c3d40f6e99ac 100644 --- a/sys/crypto/ccp/ccp.c +++ b/sys/crypto/ccp/ccp.c @@ -1,789 +1,789 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2017 Chelsio Communications, Inc. * Copyright (c) 2017 Conrad Meyer * All rights reserved. * Largely borrowed from ccr(4), Written by: John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #include #include #include #include #include "cryptodev_if.h" #include "ccp.h" #include "ccp_hardware.h" MALLOC_DEFINE(M_CCP, "ccp", "AMD CCP crypto"); /* * Need a global softc available for garbage random_source API, which lacks any * context pointer. It's also handy for debugging. */ struct ccp_softc *g_ccp_softc; bool g_debug_print = false; SYSCTL_BOOL(_hw_ccp, OID_AUTO, debug, CTLFLAG_RWTUN, &g_debug_print, 0, "Set to enable debugging log messages"); static struct pciid { uint32_t devid; const char *desc; } ccp_ids[] = { { 0x14561022, "AMD CCP-5a" }, { 0x14681022, "AMD CCP-5b" }, { 0x15df1022, "AMD CCP-5a" }, }; -static struct random_source random_ccp = { +static const struct random_source random_ccp = { .rs_ident = "AMD CCP TRNG", .rs_source = RANDOM_PURE_CCP, .rs_read = random_ccp_read, }; /* * ccp_populate_sglist() generates a scatter/gather list that covers the entire * crypto operation buffer. */ static int ccp_populate_sglist(struct sglist *sg, struct crypto_buffer *cb) { int error; sglist_reset(sg); switch (cb->cb_type) { case CRYPTO_BUF_MBUF: error = sglist_append_mbuf(sg, cb->cb_mbuf); break; case CRYPTO_BUF_SINGLE_MBUF: error = sglist_append_single_mbuf(sg, cb->cb_mbuf); break; case CRYPTO_BUF_UIO: error = sglist_append_uio(sg, cb->cb_uio); break; case CRYPTO_BUF_CONTIG: error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); break; case CRYPTO_BUF_VMPAGE: error = sglist_append_vmpages(sg, cb->cb_vm_page, cb->cb_vm_page_offset, cb->cb_vm_page_len); break; default: error = EINVAL; } return (error); } static int ccp_probe(device_t dev) { struct pciid *ip; uint32_t id; id = pci_get_devid(dev); for (ip = ccp_ids; ip < &ccp_ids[nitems(ccp_ids)]; ip++) { if (id == ip->devid) { device_set_desc(dev, ip->desc); return (0); } } return (ENXIO); } static void ccp_initialize_queues(struct ccp_softc *sc) { struct ccp_queue *qp; size_t i; for (i = 0; i < nitems(sc->queues); i++) { qp = &sc->queues[i]; qp->cq_softc = sc; qp->cq_qindex = i; mtx_init(&qp->cq_lock, "ccp queue", NULL, MTX_DEF); /* XXX - arbitrarily chosen sizes */ qp->cq_sg_crp = sglist_alloc(32, M_WAITOK); /* Two more SGEs than sg_crp to accommodate ipad. */ qp->cq_sg_ulptx = sglist_alloc(34, M_WAITOK); qp->cq_sg_dst = sglist_alloc(2, M_WAITOK); } } static void ccp_free_queues(struct ccp_softc *sc) { struct ccp_queue *qp; size_t i; for (i = 0; i < nitems(sc->queues); i++) { qp = &sc->queues[i]; mtx_destroy(&qp->cq_lock); sglist_free(qp->cq_sg_crp); sglist_free(qp->cq_sg_ulptx); sglist_free(qp->cq_sg_dst); } } static int ccp_attach(device_t dev) { struct ccp_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; sc->cid = crypto_get_driverid(dev, sizeof(struct ccp_session), CRYPTOCAP_F_HARDWARE); if (sc->cid < 0) { device_printf(dev, "could not get crypto driver id\n"); return (ENXIO); } error = ccp_hw_attach(dev); if (error != 0) return (error); mtx_init(&sc->lock, "ccp", NULL, MTX_DEF); ccp_initialize_queues(sc); if (g_ccp_softc == NULL) { g_ccp_softc = sc; if ((sc->hw_features & VERSION_CAP_TRNG) != 0) random_source_register(&random_ccp); } return (0); } static int ccp_detach(device_t dev) { struct ccp_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->lock); sc->detaching = true; mtx_unlock(&sc->lock); crypto_unregister_all(sc->cid); if (g_ccp_softc == sc && (sc->hw_features & VERSION_CAP_TRNG) != 0) random_source_deregister(&random_ccp); ccp_hw_detach(dev); ccp_free_queues(sc); if (g_ccp_softc == sc) g_ccp_softc = NULL; mtx_destroy(&sc->lock); return (0); } static void ccp_init_hmac_digest(struct ccp_session *s, const char *key, int klen) { union authctx auth_ctx; const struct auth_hash *axf; u_int i; /* * If the key is larger than the block size, use the digest of * the key as the key instead. */ axf = s->hmac.auth_hash; if (klen > axf->blocksize) { axf->Init(&auth_ctx); axf->Update(&auth_ctx, key, klen); axf->Final(s->hmac.ipad, &auth_ctx); explicit_bzero(&auth_ctx, sizeof(auth_ctx)); klen = axf->hashsize; } else memcpy(s->hmac.ipad, key, klen); memset(s->hmac.ipad + klen, 0, axf->blocksize - klen); memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize); for (i = 0; i < axf->blocksize; i++) { s->hmac.ipad[i] ^= HMAC_IPAD_VAL; s->hmac.opad[i] ^= HMAC_OPAD_VAL; } } static bool ccp_aes_check_keylen(int alg, int klen) { switch (klen * 8) { case 128: case 192: if (alg == CRYPTO_AES_XTS) return (false); break; case 256: break; case 512: if (alg != CRYPTO_AES_XTS) return (false); break; default: return (false); } return (true); } static void ccp_aes_setkey(struct ccp_session *s, int alg, const void *key, int klen) { unsigned kbits; if (alg == CRYPTO_AES_XTS) kbits = (klen / 2) * 8; else kbits = klen * 8; switch (kbits) { case 128: s->blkcipher.cipher_type = CCP_AES_TYPE_128; break; case 192: s->blkcipher.cipher_type = CCP_AES_TYPE_192; break; case 256: s->blkcipher.cipher_type = CCP_AES_TYPE_256; break; default: panic("should not get here"); } s->blkcipher.key_len = klen; memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len); } static bool ccp_auth_supported(struct ccp_softc *sc, const struct crypto_session_params *csp) { if ((sc->hw_features & VERSION_CAP_SHA) == 0) return (false); switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: if (csp->csp_auth_key == NULL) return (false); break; default: return (false); } return (true); } static bool ccp_cipher_supported(struct ccp_softc *sc, const struct crypto_session_params *csp) { if ((sc->hw_features & VERSION_CAP_AES) == 0) return (false); switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (csp->csp_ivlen != AES_BLOCK_LEN) return (false); break; case CRYPTO_AES_ICM: if (csp->csp_ivlen != AES_BLOCK_LEN) return (false); break; case CRYPTO_AES_XTS: if (csp->csp_ivlen != AES_XTS_IV_LEN) return (false); break; default: return (false); } return (ccp_aes_check_keylen(csp->csp_cipher_alg, csp->csp_cipher_klen)); } static int ccp_probesession(device_t dev, const struct crypto_session_params *csp) { struct ccp_softc *sc; if (csp->csp_flags != 0) return (EINVAL); sc = device_get_softc(dev); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!ccp_auth_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!ccp_cipher_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: if ((sc->hw_features & VERSION_CAP_AES) == 0) return (EINVAL); break; default: return (EINVAL); } break; case CSP_MODE_ETA: if (!ccp_auth_supported(sc, csp) || !ccp_cipher_supported(sc, csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_HARDWARE); } static int ccp_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct ccp_softc *sc; struct ccp_session *s; const struct auth_hash *auth_hash; enum ccp_aes_mode cipher_mode; unsigned auth_mode; unsigned q; /* XXX reconcile auth_mode with use by ccp_sha */ switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: auth_hash = &auth_hash_hmac_sha1; auth_mode = SHA1; break; case CRYPTO_SHA2_256_HMAC: auth_hash = &auth_hash_hmac_sha2_256; auth_mode = SHA2_256; break; case CRYPTO_SHA2_384_HMAC: auth_hash = &auth_hash_hmac_sha2_384; auth_mode = SHA2_384; break; case CRYPTO_SHA2_512_HMAC: auth_hash = &auth_hash_hmac_sha2_512; auth_mode = SHA2_512; break; default: auth_hash = NULL; auth_mode = 0; break; } switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: cipher_mode = CCP_AES_MODE_CBC; break; case CRYPTO_AES_ICM: cipher_mode = CCP_AES_MODE_CTR; break; case CRYPTO_AES_NIST_GCM_16: cipher_mode = CCP_AES_MODE_GCTR; break; case CRYPTO_AES_XTS: cipher_mode = CCP_AES_MODE_XTS; break; default: cipher_mode = CCP_AES_MODE_ECB; break; } sc = device_get_softc(dev); mtx_lock(&sc->lock); if (sc->detaching) { mtx_unlock(&sc->lock); return (ENXIO); } s = crypto_get_driver_session(cses); /* Just grab the first usable queue for now. */ for (q = 0; q < nitems(sc->queues); q++) if ((sc->valid_queues & (1 << q)) != 0) break; if (q == nitems(sc->queues)) { mtx_unlock(&sc->lock); return (ENXIO); } s->queue = q; switch (csp->csp_mode) { case CSP_MODE_AEAD: s->mode = GCM; break; case CSP_MODE_ETA: s->mode = AUTHENC; break; case CSP_MODE_DIGEST: s->mode = HMAC; break; case CSP_MODE_CIPHER: s->mode = BLKCIPHER; break; } if (s->mode == GCM) { if (csp->csp_auth_mlen == 0) s->gmac.hash_len = AES_GMAC_HASH_LEN; else s->gmac.hash_len = csp->csp_auth_mlen; } else if (auth_hash != NULL) { s->hmac.auth_hash = auth_hash; s->hmac.auth_mode = auth_mode; if (csp->csp_auth_mlen == 0) s->hmac.hash_len = auth_hash->hashsize; else s->hmac.hash_len = csp->csp_auth_mlen; ccp_init_hmac_digest(s, csp->csp_auth_key, csp->csp_auth_klen); } if (cipher_mode != CCP_AES_MODE_ECB) { s->blkcipher.cipher_mode = cipher_mode; if (csp->csp_cipher_key != NULL) ccp_aes_setkey(s, csp->csp_cipher_alg, csp->csp_cipher_key, csp->csp_cipher_klen); } s->active = true; mtx_unlock(&sc->lock); return (0); } static void ccp_freesession(device_t dev, crypto_session_t cses) { struct ccp_session *s; s = crypto_get_driver_session(cses); if (s->pending != 0) device_printf(dev, "session %p freed with %d pending requests\n", s, s->pending); s->active = false; } static int ccp_process(device_t dev, struct cryptop *crp, int hint) { const struct crypto_session_params *csp; struct ccp_softc *sc; struct ccp_queue *qp; struct ccp_session *s; int error; bool qpheld; qpheld = false; qp = NULL; csp = crypto_get_params(crp->crp_session); s = crypto_get_driver_session(crp->crp_session); sc = device_get_softc(dev); mtx_lock(&sc->lock); qp = &sc->queues[s->queue]; mtx_unlock(&sc->lock); error = ccp_queue_acquire_reserve(qp, 1 /* placeholder */, M_NOWAIT); if (error != 0) goto out; qpheld = true; error = ccp_populate_sglist(qp->cq_sg_crp, &crp->crp_buf); if (error != 0) goto out; if (crp->crp_auth_key != NULL) { KASSERT(s->hmac.auth_hash != NULL, ("auth key without HMAC")); ccp_init_hmac_digest(s, crp->crp_auth_key, csp->csp_auth_klen); } if (crp->crp_cipher_key != NULL) ccp_aes_setkey(s, csp->csp_cipher_alg, crp->crp_cipher_key, csp->csp_cipher_klen); switch (s->mode) { case HMAC: if (s->pending != 0) { error = EAGAIN; break; } error = ccp_hmac(qp, s, crp); break; case BLKCIPHER: if (s->pending != 0) { error = EAGAIN; break; } error = ccp_blkcipher(qp, s, crp); break; case AUTHENC: if (s->pending != 0) { error = EAGAIN; break; } error = ccp_authenc(qp, s, crp); break; case GCM: if (s->pending != 0) { error = EAGAIN; break; } error = ccp_gcm(qp, s, crp); break; } if (error == 0) s->pending++; out: if (qpheld) { if (error != 0) { /* * Squash EAGAIN so callers don't uselessly and * expensively retry if the ring was full. */ if (error == EAGAIN) error = ENOMEM; ccp_queue_abort(qp); } else ccp_queue_release(qp); } if (error != 0) { DPRINTF(dev, "%s: early error:%d\n", __func__, error); crp->crp_etype = error; crypto_done(crp); } return (0); } static device_method_t ccp_methods[] = { DEVMETHOD(device_probe, ccp_probe), DEVMETHOD(device_attach, ccp_attach), DEVMETHOD(device_detach, ccp_detach), DEVMETHOD(cryptodev_probesession, ccp_probesession), DEVMETHOD(cryptodev_newsession, ccp_newsession), DEVMETHOD(cryptodev_freesession, ccp_freesession), DEVMETHOD(cryptodev_process, ccp_process), DEVMETHOD_END }; static driver_t ccp_driver = { "ccp", ccp_methods, sizeof(struct ccp_softc) }; DRIVER_MODULE(ccp, pci, ccp_driver, NULL, NULL); MODULE_VERSION(ccp, 1); MODULE_DEPEND(ccp, crypto, 1, 1, 1); MODULE_DEPEND(ccp, random_device, 1, 1, 1); #if 0 /* There are enough known issues that we shouldn't load automatically */ MODULE_PNP_INFO("W32:vendor/device", pci, ccp, ccp_ids, nitems(ccp_ids)); #endif static int ccp_queue_reserve_space(struct ccp_queue *qp, unsigned n, int mflags) { struct ccp_softc *sc; mtx_assert(&qp->cq_lock, MA_OWNED); sc = qp->cq_softc; if (n < 1 || n >= (1 << sc->ring_size_order)) return (EINVAL); while (true) { if (ccp_queue_get_ring_space(qp) >= n) return (0); if ((mflags & M_WAITOK) == 0) return (EAGAIN); qp->cq_waiting = true; msleep(&qp->cq_tail, &qp->cq_lock, 0, "ccpqfull", 0); } } int ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags) { int error; mtx_lock(&qp->cq_lock); qp->cq_acq_tail = qp->cq_tail; error = ccp_queue_reserve_space(qp, n, mflags); if (error != 0) mtx_unlock(&qp->cq_lock); return (error); } void ccp_queue_release(struct ccp_queue *qp) { mtx_assert(&qp->cq_lock, MA_OWNED); if (qp->cq_tail != qp->cq_acq_tail) { wmb(); ccp_queue_write_tail(qp); } mtx_unlock(&qp->cq_lock); } void ccp_queue_abort(struct ccp_queue *qp) { unsigned i; mtx_assert(&qp->cq_lock, MA_OWNED); /* Wipe out any descriptors associated with this aborted txn. */ for (i = qp->cq_acq_tail; i != qp->cq_tail; i = (i + 1) % (1 << qp->cq_softc->ring_size_order)) { memset(&qp->desc_ring[i], 0, sizeof(qp->desc_ring[i])); } qp->cq_tail = qp->cq_acq_tail; mtx_unlock(&qp->cq_lock); } #ifdef DDB #define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo) #define db_show_lock(lk) _db_show_lock(&(lk)->lock_object) static void db_show_ccp_sc(struct ccp_softc *sc) { db_printf("ccp softc at %p\n", sc); db_printf(" cid: %d\n", (int)sc->cid); db_printf(" lock: "); db_show_lock(&sc->lock); db_printf(" detaching: %d\n", (int)sc->detaching); db_printf(" ring_size_order: %u\n", sc->ring_size_order); db_printf(" hw_version: %d\n", (int)sc->hw_version); db_printf(" hw_features: %b\n", (int)sc->hw_features, "\20\24ELFC\23TRNG\22Zip_Compress\16Zip_Decompress\13ECC\12RSA" "\11SHA\0103DES\07AES"); db_printf(" hw status:\n"); db_ccp_show_hw(sc); } static void db_show_ccp_qp(struct ccp_queue *qp) { db_printf(" lock: "); db_show_lock(&qp->cq_lock); db_printf(" cq_qindex: %u\n", qp->cq_qindex); db_printf(" cq_softc: %p\n", qp->cq_softc); db_printf(" head: %u\n", qp->cq_head); db_printf(" tail: %u\n", qp->cq_tail); db_printf(" acq_tail: %u\n", qp->cq_acq_tail); db_printf(" desc_ring: %p\n", qp->desc_ring); db_printf(" completions_ring: %p\n", qp->completions_ring); db_printf(" descriptors (phys): 0x%jx\n", (uintmax_t)qp->desc_ring_bus_addr); db_printf(" hw status:\n"); db_ccp_show_queue_hw(qp); } DB_SHOW_COMMAND(ccp, db_show_ccp) { struct ccp_softc *sc; unsigned unit, qindex; if (!have_addr) goto usage; unit = (unsigned)addr; sc = devclass_get_softc(devclass_find("ccp"), unit); if (sc == NULL) { db_printf("No such device ccp%u\n", unit); goto usage; } if (count == -1) { db_show_ccp_sc(sc); return; } qindex = (unsigned)count; if (qindex >= nitems(sc->queues)) { db_printf("No such queue %u\n", qindex); goto usage; } db_show_ccp_qp(&sc->queues[qindex]); return; usage: db_printf("usage: show ccp [,]\n"); return; } #endif /* DDB */ diff --git a/sys/dev/qcom_rnd/qcom_rnd.c b/sys/dev/qcom_rnd/qcom_rnd.c index 5d1bdac14981..af89c697f45d 100644 --- a/sys/dev/qcom_rnd/qcom_rnd.c +++ b/sys/dev/qcom_rnd/qcom_rnd.c @@ -1,254 +1,254 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021, Adrian Chadd * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for Qualcomm MSM entropy device. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct qcom_rnd_softc { device_t dev; int reg_rid; struct resource *reg; }; static int qcom_rnd_modevent(module_t, int, void *); static int qcom_rnd_probe(device_t); static int qcom_rnd_attach(device_t); static int qcom_rnd_detach(device_t); static int qcom_rnd_harvest(struct qcom_rnd_softc *, void *, size_t *); static unsigned qcom_rnd_read(void *, unsigned); -static struct random_source random_qcom_rnd = { +static const struct random_source random_qcom_rnd = { .rs_ident = "Qualcomm Entropy Adapter", .rs_source = RANDOM_PURE_QUALCOMM, .rs_read = qcom_rnd_read, }; /* Kludge for API limitations of random(4). */ static _Atomic(struct qcom_rnd_softc *) g_qcom_rnd_softc; static int qcom_rnd_modevent(module_t mod, int type, void *unused) { int error; switch (type) { case MOD_LOAD: case MOD_QUIESCE: case MOD_UNLOAD: case MOD_SHUTDOWN: error = 0; break; default: error = EOPNOTSUPP; break; } return (error); } static int qcom_rnd_probe(device_t dev) { if (! ofw_bus_status_okay(dev)) { return (ENXIO); } if (ofw_bus_is_compatible(dev, "qcom,prng") == 0) { return (ENXIO); } return (0); } static int qcom_rnd_attach(device_t dev) { struct qcom_rnd_softc *sc, *exp; uint32_t reg; sc = device_get_softc(dev); /* Found a compatible device! */ sc->dev = dev; exp = NULL; if (!atomic_compare_exchange_strong_explicit(&g_qcom_rnd_softc, &exp, sc, memory_order_release, memory_order_acquire)) { return (ENXIO); } sc->reg_rid = 0; sc->reg = bus_alloc_resource_anywhere(dev, SYS_RES_MEMORY, &sc->reg_rid, 0x140, RF_ACTIVE); if (sc->reg == NULL) { device_printf(dev, "Couldn't allocate memory resource!\n"); return (ENXIO); } device_set_desc(dev, "Qualcomm PRNG"); /* * Check to see whether the PRNG has already been setup or not. */ bus_barrier(sc->reg, 0, 0x120, BUS_SPACE_BARRIER_READ); reg = bus_read_4(sc->reg, QCOM_RND_PRNG_CONFIG); if (reg & QCOM_RND_PRNG_CONFIG_HW_ENABLE) { device_printf(dev, "PRNG HW already enabled\n"); } else { /* * Do PRNG setup and then enable it. */ reg = bus_read_4(sc->reg, QCOM_RND_PRNG_LFSR_CFG); reg &= QCOM_RND_PRNG_LFSR_CFG_MASK; reg |= QCOM_RND_PRNG_LFSR_CFG_CLOCKS; bus_write_4(sc->reg, QCOM_RND_PRNG_LFSR_CFG, reg); bus_barrier(sc->reg, 0, 0x120, BUS_SPACE_BARRIER_WRITE); reg = bus_read_4(sc->reg, QCOM_RND_PRNG_CONFIG); reg |= QCOM_RND_PRNG_CONFIG_HW_ENABLE; bus_write_4(sc->reg, QCOM_RND_PRNG_CONFIG, reg); bus_barrier(sc->reg, 0, 0x120, BUS_SPACE_BARRIER_WRITE); } random_source_register(&random_qcom_rnd); return (0); } static int qcom_rnd_detach(device_t dev) { struct qcom_rnd_softc *sc; sc = device_get_softc(dev); KASSERT( atomic_load_explicit(&g_qcom_rnd_softc, memory_order_acquire) == sc, ("only one global instance at a time")); random_source_deregister(&random_qcom_rnd); if (sc->reg != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->reg_rid, sc->reg); } atomic_store_explicit(&g_qcom_rnd_softc, NULL, memory_order_release); return (0); } static int qcom_rnd_harvest(struct qcom_rnd_softc *sc, void *buf, size_t *sz) { /* * Add data to buf until we either run out of entropy or we * fill the buffer. * * Note - be mindful of the provided buffer size; we're reading * 4 bytes at a time but we only want to supply up to the max * buffer size, so don't write past it! */ size_t rz = 0; uint32_t reg; while (rz < *sz) { bus_barrier(sc->reg, 0, 0x120, BUS_SPACE_BARRIER_READ); reg = bus_read_4(sc->reg, QCOM_RND_PRNG_STATUS); if ((reg & QCOM_RND_PRNG_STATUS_DATA_AVAIL) == 0) break; reg = bus_read_4(sc->reg, QCOM_RND_PRNG_DATA_OUT); memcpy(((char *) buf) + rz, ®, sizeof(uint32_t)); rz += sizeof(uint32_t); } if (rz == 0) return (EAGAIN); *sz = rz; return (0); } static unsigned qcom_rnd_read(void *buf, unsigned usz) { struct qcom_rnd_softc *sc; size_t sz; int error; sc = g_qcom_rnd_softc; if (sc == NULL) return (0); sz = usz; error = qcom_rnd_harvest(sc, buf, &sz); if (error != 0) return (0); return (sz); } static device_method_t qcom_rnd_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, qcom_rnd_probe), DEVMETHOD(device_attach, qcom_rnd_attach), DEVMETHOD(device_detach, qcom_rnd_detach), DEVMETHOD_END }; static driver_t qcom_rnd_driver = { "qcom_rnd", qcom_rnd_methods, sizeof(struct qcom_rnd_softc) }; DRIVER_MODULE(qcom_rnd_random, simplebus, qcom_rnd_driver, qcom_rnd_modevent, 0); DRIVER_MODULE(qcom_rnd_random, ofwbus, qcom_rnd_driver, qcom_rnd_modevent, 0); MODULE_DEPEND(qcom_rnd_random, random_device, 1, 1, 1); MODULE_VERSION(qcom_rnd_random, 1); diff --git a/sys/dev/random/armv8rng.c b/sys/dev/random/armv8rng.c index 61698bfff820..524d80317681 100644 --- a/sys/dev/random/armv8rng.c +++ b/sys/dev/random/armv8rng.c @@ -1,135 +1,135 @@ /*- * Copyright (c) 2022 The FreeBSD Foundation * * This software was developed by Andrew Turner under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include static u_int random_rndr_read(void *, u_int); static bool has_rndr; -static struct random_source random_armv8_rndr = { +static const struct random_source random_armv8_rndr = { .rs_ident = "Armv8 rndr RNG", .rs_source = RANDOM_PURE_ARMV8, .rs_read = random_rndr_read, }; static inline int random_rndr_read_one(u_long *buf) { u_long val; int loop, ret; loop = 10; do { __asm __volatile( /* Read the random number */ "mrs %0, " __XSTRING(RNDRRS_REG) "\n" /* 1 on success, 0 on failure */ "cset %w1, ne\n" : "=&r" (val), "=&r"(ret) :: "cc"); } while (ret != 0 && --loop > 0); if (ret != 0) *buf = val; return (ret); } static u_int random_rndr_read(void *buf, u_int c) { u_long *b; u_int count; b = buf; for (count = 0; count < c; count += sizeof(*b)) { if (!random_rndr_read_one(b)) break; b++; } return (count); } static int rndr_modevent(module_t mod, int type, void *unused) { uint64_t reg; int error = 0; switch (type) { case MOD_LOAD: has_rndr = false; if (get_kernel_reg(ID_AA64ISAR0_EL1, ®) && ID_AA64ISAR0_RNDR_VAL(reg) != ID_AA64ISAR0_RNDR_NONE) { has_rndr = true; random_source_register(&random_armv8_rndr); printf("random: fast provider: \"%s\"\n", random_armv8_rndr.rs_ident); } break; case MOD_UNLOAD: if (has_rndr) random_source_deregister(&random_armv8_rndr); break; case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } static moduledata_t rndr_mod = { "rndr", rndr_modevent, 0 }; DECLARE_MODULE(rndr, rndr_mod, SI_SUB_RANDOM, SI_ORDER_FOURTH); MODULE_VERSION(rndr, 1); MODULE_DEPEND(rndr, random_harvestq, 1, 1, 1); diff --git a/sys/dev/random/darn.c b/sys/dev/random/darn.c index 4db1718db8f3..a23fdf0343d3 100644 --- a/sys/dev/random/darn.c +++ b/sys/dev/random/darn.c @@ -1,146 +1,146 @@ /*- * Copyright (c) 2018 Justin Hibbits * Copyright (c) 2013 The FreeBSD Foundation * Copyright (c) 2013 David E. O'Brien * Copyright (c) 2012 Konstantin Belousov * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include /* * Power ISA 3.0 adds a "darn" instruction (Deliver A Random Number). The RNG * backing this instruction conforms to NIST SP800-90B and SP800-90C at the * point of hardware design, and provides a minimum of 0.5 bits of entropy per * bit. */ #define RETRY_COUNT 10 static u_int random_darn_read(void *, u_int); -static struct random_source random_darn = { +static const struct random_source random_darn = { .rs_ident = "PowerISA DARN random number generator", .rs_source = RANDOM_PURE_DARN, .rs_read = random_darn_read }; static inline int darn_rng_store(u_long *buf) { u_long rndval; int retry; for (retry = RETRY_COUNT; retry > 0; --retry) { /* "DARN %rN, 1" instruction */ /* * Arguments for DARN: rN and "L", where "L" can be one of: * 0 - 32-bit conditional random number * 1 - Conditional random number (conditioned to remove bias) * 2 - Raw random number (unprocessed, may include bias) * 3 - Reserved */ __asm __volatile(".long 0x7c0105e6 | (%0 << 21)" : "+r"(rndval)); if (rndval != ~0) break; } *buf = rndval; return (retry); } /* It is required that buf length is a multiple of sizeof(u_long). */ static u_int random_darn_read(void *buf, u_int c) { u_long *b, rndval; u_int count; KASSERT(c % sizeof(*b) == 0, ("partial read %d", c)); b = buf; for (count = c; count > 0; count -= sizeof(*b)) { if (darn_rng_store(&rndval) == 0) break; *b++ = rndval; } return (c - count); } static int darn_modevent(module_t mod, int type, void *unused) { int error = 0; switch (type) { case MOD_LOAD: if (cpu_features2 & PPC_FEATURE2_DARN) { random_source_register(&random_darn); printf("random: fast provider: \"%s\"\n", random_darn.rs_ident); } break; case MOD_UNLOAD: if (cpu_features2 & PPC_FEATURE2_DARN) random_source_deregister(&random_darn); break; case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } static moduledata_t darn_mod = { "darn", darn_modevent, 0 }; DECLARE_MODULE(darn, darn_mod, SI_SUB_RANDOM, SI_ORDER_FOURTH); MODULE_VERSION(darn, 1); MODULE_DEPEND(darn, random_harvestq, 1, 1, 1); diff --git a/sys/dev/random/ivy.c b/sys/dev/random/ivy.c index 114f6e3fe277..4614ccb32ebe 100644 --- a/sys/dev/random/ivy.c +++ b/sys/dev/random/ivy.c @@ -1,197 +1,197 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * Copyright (c) 2013 David E. O'Brien * Copyright (c) 2012 Konstantin Belousov * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define RETRY_COUNT 10 static bool has_rdrand, has_rdseed; static u_int random_ivy_read(void *, u_int); -static struct random_source random_ivy = { +static const struct random_source random_ivy = { .rs_ident = "Intel Secure Key RNG", .rs_source = RANDOM_PURE_RDRAND, .rs_read = random_ivy_read }; SYSCTL_NODE(_kern_random, OID_AUTO, rdrand, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "rdrand (ivy) entropy source"); static bool acquire_independent_seed_samples = false; SYSCTL_BOOL(_kern_random_rdrand, OID_AUTO, rdrand_independent_seed, CTLFLAG_RWTUN, &acquire_independent_seed_samples, 0, "If non-zero, use more expensive and slow, but safer, seeded samples " "where RDSEED is not present."); static bool x86_rdrand_store(u_long *buf) { u_long rndval, seed_iterations, i; int retry; /* Per [1], "ยง 5.2.6 Generating Seeds from RDRAND," * machines lacking RDSEED will guarantee RDRAND is reseeded every 8kB * of generated output. * * [1]: https://software.intel.com/en-us/articles/intel-digital-random-number-generator-drng-software-implementation-guide#inpage-nav-6-8 */ if (acquire_independent_seed_samples) seed_iterations = 8 * 1024 / sizeof(*buf); else seed_iterations = 1; for (i = 0; i < seed_iterations; i++) { retry = RETRY_COUNT; __asm __volatile( "1:\n\t" "rdrand %1\n\t" /* read randomness into rndval */ "jc 2f\n\t" /* CF is set on success, exit retry loop */ "dec %0\n\t" /* otherwise, retry-- */ "jne 1b\n\t" /* and loop if retries are not exhausted */ "2:" : "+r" (retry), "=r" (rndval) : : "cc"); if (retry == 0) return (false); } *buf = rndval; return (true); } static bool x86_rdseed_store(u_long *buf) { u_long rndval; int retry; retry = RETRY_COUNT; __asm __volatile( "1:\n\t" "rdseed %1\n\t" /* read randomness into rndval */ "jc 2f\n\t" /* CF is set on success, exit retry loop */ "dec %0\n\t" /* otherwise, retry-- */ "jne 1b\n\t" /* and loop if retries are not exhausted */ "2:" : "+r" (retry), "=r" (rndval) : : "cc"); *buf = rndval; return (retry != 0); } static bool x86_unimpl_store(u_long *buf __unused) { panic("%s called", __func__); } DEFINE_IFUNC(static, bool, x86_rng_store, (u_long *buf)) { has_rdrand = (cpu_feature2 & CPUID2_RDRAND); has_rdseed = (cpu_stdext_feature & CPUID_STDEXT_RDSEED); if (has_rdseed) return (x86_rdseed_store); else if (has_rdrand) return (x86_rdrand_store); else return (x86_unimpl_store); } /* It is required that buf length is a multiple of sizeof(u_long). */ static u_int random_ivy_read(void *buf, u_int c) { u_long *b, rndval; u_int count; KASSERT(c % sizeof(*b) == 0, ("partial read %d", c)); b = buf; for (count = c; count > 0; count -= sizeof(*b)) { if (!x86_rng_store(&rndval)) break; *b++ = rndval; } return (c - count); } static int rdrand_modevent(module_t mod, int type, void *unused) { int error = 0; switch (type) { case MOD_LOAD: if (has_rdrand || has_rdseed) { random_source_register(&random_ivy); printf("random: fast provider: \"%s\"\n", random_ivy.rs_ident); } break; case MOD_UNLOAD: if (has_rdrand || has_rdseed) random_source_deregister(&random_ivy); break; case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } static moduledata_t rdrand_mod = { "rdrand", rdrand_modevent, 0 }; DECLARE_MODULE(rdrand, rdrand_mod, SI_SUB_RANDOM, SI_ORDER_FOURTH); MODULE_VERSION(rdrand, 1); MODULE_DEPEND(rdrand, random_harvestq, 1, 1, 1); diff --git a/sys/dev/random/nehemiah.c b/sys/dev/random/nehemiah.c index e01fdb952108..ddda959c8fd2 100644 --- a/sys/dev/random/nehemiah.c +++ b/sys/dev/random/nehemiah.c @@ -1,133 +1,133 @@ /*- * Copyright (c) 2013-2015 Mark R V Murray * Copyright (c) 2013 David E. O'Brien * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include static u_int random_nehemiah_read(void *, u_int); -static struct random_source random_nehemiah = { +static const struct random_source random_nehemiah = { .rs_ident = "VIA Nehemiah Padlock RNG", .rs_source = RANDOM_PURE_NEHEMIAH, .rs_read = random_nehemiah_read }; /* This H/W source never stores more than 8 bytes in one go */ /* ARGSUSED */ static __inline size_t VIA_RNG_store(void *buf) { uint32_t retval = 0; uint32_t rate = 0; __asm __volatile( "movl $0,%%edx\n\t" ".byte 0x0f, 0xa7, 0xc0" : "=a" (retval), "+d" (rate), "+D" (buf) : : "memory" ); if (rate == 0) return (retval&0x1f); return (0); } /* It is specifically allowed that buf is a multiple of sizeof(long) */ static u_int random_nehemiah_read(void *buf, u_int c) { uint8_t *b; size_t count, ret; uint64_t tmp; fpu_kern_enter(curthread, NULL, FPU_KERN_NORMAL | FPU_KERN_NOCTX); b = buf; for (count = c; count > 0; count -= ret) { ret = MIN(VIA_RNG_store(&tmp), count); memcpy(b, &tmp, ret); b += ret; } fpu_kern_leave(curthread, NULL); return (c); } static int nehemiah_modevent(module_t mod, int type, void *unused) { int error = 0; switch (type) { case MOD_LOAD: if (via_feature_rng & VIA_HAS_RNG) { random_source_register(&random_nehemiah); printf("random: fast provider: \"%s\"\n", random_nehemiah.rs_ident); } break; case MOD_UNLOAD: if (via_feature_rng & VIA_HAS_RNG) { random_source_deregister(&random_nehemiah); } break; case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } static moduledata_t nehemiah_mod = { "nehemiah", nehemiah_modevent, 0 }; DECLARE_MODULE(nehemiah, nehemiah_mod, SI_SUB_RANDOM, SI_ORDER_FOURTH); MODULE_VERSION(nehemiah, 1); MODULE_DEPEND(nehemiah, random_harvestq, 1, 1, 1); diff --git a/sys/dev/random/random_harvestq.c b/sys/dev/random/random_harvestq.c index f38fd8e92c36..b1ff99b36720 100644 --- a/sys/dev/random/random_harvestq.c +++ b/sys/dev/random/random_harvestq.c @@ -1,691 +1,691 @@ /*- * Copyright (c) 2017 Oliver Pinter * Copyright (c) 2017 W. Dean Freeman * Copyright (c) 2000-2015 Mark R V Murray * Copyright (c) 2013 Arthur Mesh * Copyright (c) 2004 Robert N. M. Watson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(RANDOM_ENABLE_ETHER) #define _RANDOM_HARVEST_ETHER_OFF 0 #else #define _RANDOM_HARVEST_ETHER_OFF (1u << RANDOM_NET_ETHER) #endif #if defined(RANDOM_ENABLE_UMA) #define _RANDOM_HARVEST_UMA_OFF 0 #else #define _RANDOM_HARVEST_UMA_OFF (1u << RANDOM_UMA) #endif /* * Note that random_sources_feed() will also use this to try and split up * entropy into a subset of pools per iteration with the goal of feeding * HARVESTSIZE into every pool at least once per second. */ #define RANDOM_KTHREAD_HZ 10 static void random_kthread(void); static void random_sources_feed(void); /* * Random must initialize much earlier than epoch, but we can initialize the * epoch code before SMP starts. Prior to SMP, we can safely bypass * concurrency primitives. */ static __read_mostly bool epoch_inited; static __read_mostly epoch_t rs_epoch; /* * How many events to queue up. We create this many items in * an 'empty' queue, then transfer them to the 'harvest' queue with * supplied junk. When used, they are transferred back to the * 'empty' queue. */ #define RANDOM_RING_MAX 1024 #define RANDOM_ACCUM_MAX 8 /* 1 to let the kernel thread run, 0 to terminate, -1 to mark completion */ volatile int random_kthread_control; /* Allow the sysadmin to select the broad category of * entropy types to harvest. */ __read_frequently u_int hc_source_mask; struct random_sources { CK_LIST_ENTRY(random_sources) rrs_entries; - struct random_source *rrs_source; + const struct random_source *rrs_source; }; static CK_LIST_HEAD(sources_head, random_sources) source_list = CK_LIST_HEAD_INITIALIZER(source_list); SYSCTL_NODE(_kern_random, OID_AUTO, harvest, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Entropy Device Parameters"); /* * Put all the harvest queue context stuff in one place. * this make is a bit easier to lock and protect. */ static struct harvest_context { /* The harvest mutex protects all of harvest_context and * the related data. */ struct mtx hc_mtx; /* Round-robin destination cache. */ u_int hc_destination[ENTROPYSOURCE]; /* The context of the kernel thread processing harvested entropy */ struct proc *hc_kthread_proc; /* * A pair of buffers for queued events. New events are added to the * active queue while the kthread processes the other one in parallel. */ struct entropy_buffer { struct harvest_event ring[RANDOM_RING_MAX]; u_int pos; } hc_entropy_buf[2]; u_int hc_active_buf; struct fast_entropy_accumulator { volatile u_int pos; uint32_t buf[RANDOM_ACCUM_MAX]; } hc_entropy_fast_accumulator; } harvest_context; #define RANDOM_HARVEST_INIT_LOCK() mtx_init(&harvest_context.hc_mtx, \ "entropy harvest mutex", NULL, MTX_SPIN) #define RANDOM_HARVEST_LOCK() mtx_lock_spin(&harvest_context.hc_mtx) #define RANDOM_HARVEST_UNLOCK() mtx_unlock_spin(&harvest_context.hc_mtx) static struct kproc_desc random_proc_kp = { "rand_harvestq", random_kthread, &harvest_context.hc_kthread_proc, }; /* Pass the given event straight through to Fortuna/Whatever. */ static __inline void random_harvestq_fast_process_event(struct harvest_event *event) { p_random_alg_context->ra_event_processor(event); explicit_bzero(event, sizeof(*event)); } static void random_kthread(void) { struct harvest_context *hc; hc = &harvest_context; for (random_kthread_control = 1; random_kthread_control;) { struct entropy_buffer *buf; u_int entries; /* Deal with queued events. */ RANDOM_HARVEST_LOCK(); buf = &hc->hc_entropy_buf[hc->hc_active_buf]; entries = buf->pos; buf->pos = 0; hc->hc_active_buf = (hc->hc_active_buf + 1) % nitems(hc->hc_entropy_buf); RANDOM_HARVEST_UNLOCK(); for (u_int i = 0; i < entries; i++) random_harvestq_fast_process_event(&buf->ring[i]); /* Poll sources of noise. */ random_sources_feed(); /* XXX: FIX!! Increase the high-performance data rate? Need some measurements first. */ for (u_int i = 0; i < RANDOM_ACCUM_MAX; i++) { if (hc->hc_entropy_fast_accumulator.buf[i]) { random_harvest_direct(&hc->hc_entropy_fast_accumulator.buf[i], sizeof(hc->hc_entropy_fast_accumulator.buf[0]), RANDOM_UMA); hc->hc_entropy_fast_accumulator.buf[i] = 0; } } /* XXX: FIX!! This is a *great* place to pass hardware/live entropy to random(9) */ tsleep_sbt(&hc->hc_kthread_proc, 0, "-", SBT_1S/RANDOM_KTHREAD_HZ, 0, C_PREL(1)); } random_kthread_control = -1; wakeup(&hc->hc_kthread_proc); kproc_exit(0); /* NOTREACHED */ } SYSINIT(random_device_h_proc, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, kproc_start, &random_proc_kp); _Static_assert(SI_SUB_KICK_SCHEDULER > SI_SUB_RANDOM, "random kthread starting before subsystem initialization"); static void rs_epoch_init(void *dummy __unused) { rs_epoch = epoch_alloc("Random Sources", EPOCH_PREEMPT); epoch_inited = true; } SYSINIT(rs_epoch_init, SI_SUB_EPOCH, SI_ORDER_ANY, rs_epoch_init, NULL); /* * Run through all fast sources reading entropy for the given * number of rounds, which should be a multiple of the number * of entropy accumulation pools in use; it is 32 for Fortuna. */ static void random_sources_feed(void) { uint32_t entropy[HARVESTSIZE]; struct epoch_tracker et; struct random_sources *rrs; u_int i, n, npools; bool rse_warm; rse_warm = epoch_inited; /* * Evenly-ish distribute pool population across the second based on how * frequently random_kthread iterates. * * For Fortuna, the math currently works out as such: * * 64 bits * 4 pools = 256 bits per iteration * 256 bits * 10 Hz = 2560 bits per second, 320 B/s * */ npools = howmany(p_random_alg_context->ra_poolcount, RANDOM_KTHREAD_HZ); /*- * If we're not seeded yet, attempt to perform a "full seed", filling * all of the PRNG's pools with entropy; if there is enough entropy * available from "fast" entropy sources this will allow us to finish * seeding and unblock the boot process immediately rather than being * stuck for a few seconds with random_kthread gradually collecting a * small chunk of entropy every 1 / RANDOM_KTHREAD_HZ seconds. * * We collect RANDOM_FORTUNA_DEFPOOLSIZE bytes per pool, i.e. enough * to fill Fortuna's pools in the default configuration. With another * PRNG or smaller pools for Fortuna, we might collect more entropy * than needed to fill the pools, but this is harmless; alternatively, * a different PRNG, larger pools, or fast entropy sources which are * not able to provide as much entropy as we request may result in the * not being fully seeded (and thus remaining blocked) but in that * case we will return here after 1 / RANDOM_KTHREAD_HZ seconds and * try again for a large amount of entropy. */ if (!p_random_alg_context->ra_seeded()) npools = howmany(p_random_alg_context->ra_poolcount * RANDOM_FORTUNA_DEFPOOLSIZE, sizeof(entropy)); /* * Step over all of live entropy sources, and feed their output * to the system-wide RNG. */ if (rse_warm) epoch_enter_preempt(rs_epoch, &et); CK_LIST_FOREACH(rrs, &source_list, rrs_entries) { for (i = 0; i < npools; i++) { n = rrs->rrs_source->rs_read(entropy, sizeof(entropy)); KASSERT((n <= sizeof(entropy)), ("%s: rs_read returned too much data (%u > %zu)", __func__, n, sizeof(entropy))); /* * Sometimes the HW entropy source doesn't have anything * ready for us. This isn't necessarily untrustworthy. * We don't perform any other verification of an entropy * source (i.e., length is allowed to be anywhere from 1 * to sizeof(entropy), quality is unchecked, etc), so * don't balk verbosely at slow random sources either. * There are reports that RDSEED on x86 metal falls * behind the rate at which we query it, for example. * But it's still a better entropy source than RDRAND. */ if (n == 0) continue; random_harvest_direct(entropy, n, rrs->rrs_source->rs_source); } } if (rse_warm) epoch_exit_preempt(rs_epoch, &et); explicit_bzero(entropy, sizeof(entropy)); } static int random_check_uint_harvestmask(SYSCTL_HANDLER_ARGS) { static const u_int user_immutable_mask = (((1 << ENTROPYSOURCE) - 1) & (-1UL << RANDOM_PURE_START)) | _RANDOM_HARVEST_ETHER_OFF | _RANDOM_HARVEST_UMA_OFF; int error; u_int value, orig_value; orig_value = value = hc_source_mask; error = sysctl_handle_int(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (flsl(value) > ENTROPYSOURCE) return (EINVAL); /* * Disallow userspace modification of pure entropy sources. */ hc_source_mask = (value & ~user_immutable_mask) | (orig_value & user_immutable_mask); return (0); } SYSCTL_PROC(_kern_random_harvest, OID_AUTO, mask, CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, random_check_uint_harvestmask, "IU", "Entropy harvesting mask"); static int random_print_harvestmask(SYSCTL_HANDLER_ARGS) { struct sbuf sbuf; int error, i; error = sysctl_wire_old_buffer(req, 0); if (error == 0) { sbuf_new_for_sysctl(&sbuf, NULL, 128, req); for (i = ENTROPYSOURCE - 1; i >= 0; i--) sbuf_cat(&sbuf, (hc_source_mask & (1 << i)) ? "1" : "0"); error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); } return (error); } SYSCTL_PROC(_kern_random_harvest, OID_AUTO, mask_bin, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, random_print_harvestmask, "A", "Entropy harvesting mask (printable)"); static const char *random_source_descr[ENTROPYSOURCE] = { [RANDOM_CACHED] = "CACHED", [RANDOM_ATTACH] = "ATTACH", [RANDOM_KEYBOARD] = "KEYBOARD", [RANDOM_MOUSE] = "MOUSE", [RANDOM_NET_TUN] = "NET_TUN", [RANDOM_NET_ETHER] = "NET_ETHER", [RANDOM_NET_NG] = "NET_NG", [RANDOM_INTERRUPT] = "INTERRUPT", [RANDOM_SWI] = "SWI", [RANDOM_FS_ATIME] = "FS_ATIME", [RANDOM_UMA] = "UMA", [RANDOM_CALLOUT] = "CALLOUT", /* ENVIRONMENTAL_END */ [RANDOM_PURE_OCTEON] = "PURE_OCTEON", /* PURE_START */ [RANDOM_PURE_SAFE] = "PURE_SAFE", [RANDOM_PURE_GLXSB] = "PURE_GLXSB", [RANDOM_PURE_HIFN] = "PURE_HIFN", [RANDOM_PURE_RDRAND] = "PURE_RDRAND", [RANDOM_PURE_NEHEMIAH] = "PURE_NEHEMIAH", [RANDOM_PURE_RNDTEST] = "PURE_RNDTEST", [RANDOM_PURE_VIRTIO] = "PURE_VIRTIO", [RANDOM_PURE_BROADCOM] = "PURE_BROADCOM", [RANDOM_PURE_CCP] = "PURE_CCP", [RANDOM_PURE_DARN] = "PURE_DARN", [RANDOM_PURE_TPM] = "PURE_TPM", [RANDOM_PURE_VMGENID] = "PURE_VMGENID", [RANDOM_PURE_QUALCOMM] = "PURE_QUALCOMM", [RANDOM_PURE_ARMV8] = "PURE_ARMV8", /* "ENTROPYSOURCE" */ }; static int random_print_harvestmask_symbolic(SYSCTL_HANDLER_ARGS) { struct sbuf sbuf; int error, i; bool first; first = true; error = sysctl_wire_old_buffer(req, 0); if (error == 0) { sbuf_new_for_sysctl(&sbuf, NULL, 128, req); for (i = ENTROPYSOURCE - 1; i >= 0; i--) { if (i >= RANDOM_PURE_START && (hc_source_mask & (1 << i)) == 0) continue; if (!first) sbuf_cat(&sbuf, ","); sbuf_cat(&sbuf, !(hc_source_mask & (1 << i)) ? "[" : ""); sbuf_cat(&sbuf, random_source_descr[i]); sbuf_cat(&sbuf, !(hc_source_mask & (1 << i)) ? "]" : ""); first = false; } error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); } return (error); } SYSCTL_PROC(_kern_random_harvest, OID_AUTO, mask_symbolic, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, random_print_harvestmask_symbolic, "A", "Entropy harvesting mask (symbolic)"); static void random_harvestq_init(void *unused __unused) { static const u_int almost_everything_mask = (((1 << (RANDOM_ENVIRONMENTAL_END + 1)) - 1) & ~_RANDOM_HARVEST_ETHER_OFF & ~_RANDOM_HARVEST_UMA_OFF); hc_source_mask = almost_everything_mask; RANDOM_HARVEST_INIT_LOCK(); harvest_context.hc_active_buf = 0; } SYSINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_init, NULL); /* * Subroutine to slice up a contiguous chunk of 'entropy' and feed it into the * underlying algorithm. Returns number of bytes actually fed into underlying * algorithm. */ static size_t random_early_prime(char *entropy, size_t len) { struct harvest_event event; size_t i; len = rounddown(len, sizeof(event.he_entropy)); if (len == 0) return (0); for (i = 0; i < len; i += sizeof(event.he_entropy)) { event.he_somecounter = random_get_cyclecount(); event.he_size = sizeof(event.he_entropy); event.he_source = RANDOM_CACHED; event.he_destination = harvest_context.hc_destination[RANDOM_CACHED]++; memcpy(event.he_entropy, entropy + i, sizeof(event.he_entropy)); random_harvestq_fast_process_event(&event); } explicit_bzero(entropy, len); return (len); } /* * Subroutine to search for known loader-loaded files in memory and feed them * into the underlying algorithm early in boot. Returns the number of bytes * loaded (zero if none were loaded). */ static size_t random_prime_loader_file(const char *type) { uint8_t *keyfile, *data; size_t size; keyfile = preload_search_by_type(type); if (keyfile == NULL) return (0); data = preload_fetch_addr(keyfile); size = preload_fetch_size(keyfile); if (data == NULL) return (0); return (random_early_prime(data, size)); } /* * This is used to prime the RNG by grabbing any early random stuff * known to the kernel, and inserting it directly into the hashing * module, currently Fortuna. */ static void random_harvestq_prime(void *unused __unused) { size_t size; /* * Get entropy that may have been preloaded by loader(8) * and use it to pre-charge the entropy harvest queue. */ size = random_prime_loader_file(RANDOM_CACHED_BOOT_ENTROPY_MODULE); if (bootverbose) { if (size > 0) printf("random: read %zu bytes from preloaded cache\n", size); else printf("random: no preloaded entropy cache\n"); } size = random_prime_loader_file(RANDOM_PLATFORM_BOOT_ENTROPY_MODULE); if (bootverbose) { if (size > 0) printf("random: read %zu bytes from platform bootloader\n", size); else printf("random: no platform bootloader entropy\n"); } } SYSINIT(random_device_prime, SI_SUB_RANDOM, SI_ORDER_MIDDLE, random_harvestq_prime, NULL); static void random_harvestq_deinit(void *unused __unused) { /* Command the hash/reseed thread to end and wait for it to finish */ random_kthread_control = 0; while (random_kthread_control >= 0) tsleep(&harvest_context.hc_kthread_proc, 0, "harvqterm", hz/5); } SYSUNINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_deinit, NULL); /*- * Entropy harvesting queue routine. * * This is supposed to be fast; do not do anything slow in here! * It is also illegal (and morally reprehensible) to insert any * high-rate data here. "High-rate" is defined as a data source * that is likely to fill up the buffer in much less than 100ms. * This includes the "always-on" sources like the Intel "rdrand" * or the VIA Nehamiah "xstore" sources. */ /* XXXRW: get_cyclecount() is cheap on most modern hardware, where cycle * counters are built in, but on older hardware it will do a real time clock * read which can be quite expensive. */ void random_harvest_queue_(const void *entropy, u_int size, enum random_entropy_source origin) { struct harvest_context *hc; struct entropy_buffer *buf; struct harvest_event *event; KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE, ("%s: origin %d invalid", __func__, origin)); hc = &harvest_context; RANDOM_HARVEST_LOCK(); buf = &hc->hc_entropy_buf[hc->hc_active_buf]; if (buf->pos < RANDOM_RING_MAX) { event = &buf->ring[buf->pos++]; event->he_somecounter = random_get_cyclecount(); event->he_source = origin; event->he_destination = hc->hc_destination[origin]++; if (size <= sizeof(event->he_entropy)) { event->he_size = size; memcpy(event->he_entropy, entropy, size); } else { /* Big event, so squash it */ event->he_size = sizeof(event->he_entropy[0]); event->he_entropy[0] = jenkins_hash(entropy, size, (uint32_t)(uintptr_t)event); } } RANDOM_HARVEST_UNLOCK(); } /*- * Entropy harvesting fast routine. * * This is supposed to be very fast; do not do anything slow in here! * This is the right place for high-rate harvested data. */ void random_harvest_fast_(const void *entropy, u_int size) { u_int pos; pos = harvest_context.hc_entropy_fast_accumulator.pos; harvest_context.hc_entropy_fast_accumulator.buf[pos] ^= jenkins_hash(entropy, size, random_get_cyclecount()); harvest_context.hc_entropy_fast_accumulator.pos = (pos + 1)%RANDOM_ACCUM_MAX; } /*- * Entropy harvesting direct routine. * * This is not supposed to be fast, but will only be used during * (e.g.) booting when initial entropy is being gathered. */ void random_harvest_direct_(const void *entropy, u_int size, enum random_entropy_source origin) { struct harvest_event event; KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE, ("%s: origin %d invalid\n", __func__, origin)); size = MIN(size, sizeof(event.he_entropy)); event.he_somecounter = random_get_cyclecount(); event.he_size = size; event.he_source = origin; event.he_destination = harvest_context.hc_destination[origin]++; memcpy(event.he_entropy, entropy, size); random_harvestq_fast_process_event(&event); } void random_harvest_register_source(enum random_entropy_source source) { hc_source_mask |= (1 << source); } void random_harvest_deregister_source(enum random_entropy_source source) { hc_source_mask &= ~(1 << source); } void -random_source_register(struct random_source *rsource) +random_source_register(const struct random_source *rsource) { struct random_sources *rrs; KASSERT(rsource != NULL, ("invalid input to %s", __func__)); rrs = malloc(sizeof(*rrs), M_ENTROPY, M_WAITOK); rrs->rrs_source = rsource; random_harvest_register_source(rsource->rs_source); printf("random: registering fast source %s\n", rsource->rs_ident); RANDOM_HARVEST_LOCK(); CK_LIST_INSERT_HEAD(&source_list, rrs, rrs_entries); RANDOM_HARVEST_UNLOCK(); } void -random_source_deregister(struct random_source *rsource) +random_source_deregister(const struct random_source *rsource) { struct random_sources *rrs = NULL; KASSERT(rsource != NULL, ("invalid input to %s", __func__)); random_harvest_deregister_source(rsource->rs_source); RANDOM_HARVEST_LOCK(); CK_LIST_FOREACH(rrs, &source_list, rrs_entries) if (rrs->rrs_source == rsource) { CK_LIST_REMOVE(rrs, rrs_entries); break; } RANDOM_HARVEST_UNLOCK(); if (rrs != NULL && epoch_inited) epoch_wait_preempt(rs_epoch); free(rrs, M_ENTROPY); } static int random_source_handler(SYSCTL_HANDLER_ARGS) { struct epoch_tracker et; struct random_sources *rrs; struct sbuf sbuf; int error, count; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf_new_for_sysctl(&sbuf, NULL, 64, req); count = 0; epoch_enter_preempt(rs_epoch, &et); CK_LIST_FOREACH(rrs, &source_list, rrs_entries) { sbuf_cat(&sbuf, (count++ ? ",'" : "'")); sbuf_cat(&sbuf, rrs->rrs_source->rs_ident); sbuf_cat(&sbuf, "'"); } epoch_exit_preempt(rs_epoch, &et); error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); return (error); } SYSCTL_PROC(_kern_random, OID_AUTO, random_sources, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, random_source_handler, "A", "List of active fast entropy sources."); MODULE_VERSION(random_harvestq, 1); diff --git a/sys/dev/random/randomdev.h b/sys/dev/random/randomdev.h index e1c9ac7b680d..6d742447ea8b 100644 --- a/sys/dev/random/randomdev.h +++ b/sys/dev/random/randomdev.h @@ -1,113 +1,113 @@ /*- * Copyright (c) 2000-2015 Mark R V Murray * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SYS_DEV_RANDOM_RANDOMDEV_H_INCLUDED #define SYS_DEV_RANDOM_RANDOMDEV_H_INCLUDED #ifdef _KERNEL /* This header contains only those definitions that are global * and non algorithm-specific for the entropy processor */ #ifdef SYSCTL_DECL /* from sysctl.h */ SYSCTL_DECL(_kern_random); SYSCTL_DECL(_kern_random_initial_seeding); #define RANDOM_CHECK_UINT(name, min, max) \ static int \ random_check_uint_##name(SYSCTL_HANDLER_ARGS) \ { \ if (oidp->oid_arg1 != NULL) { \ if (*(u_int *)(oidp->oid_arg1) <= (min)) \ *(u_int *)(oidp->oid_arg1) = (min); \ else if (*(u_int *)(oidp->oid_arg1) > (max)) \ *(u_int *)(oidp->oid_arg1) = (max); \ } \ return (sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, \ req)); \ } #endif /* SYSCTL_DECL */ MALLOC_DECLARE(M_ENTROPY); extern bool random_bypass_before_seeding; extern bool read_random_bypassed_before_seeding; extern bool arc4random_bypassed_before_seeding; extern bool random_bypass_disable_warnings; #endif /* _KERNEL */ struct harvest_event; typedef void random_alg_pre_read_t(void); typedef void random_alg_read_t(uint8_t *, size_t); typedef bool random_alg_seeded_t(void); typedef void random_alg_eventprocessor_t(struct harvest_event *); typedef u_int random_source_read_t(void *, u_int); /* * Random Algorithm is a processor of randomness for the kernel * and for userland. */ struct random_algorithm { const char *ra_ident; u_int ra_poolcount; random_alg_pre_read_t *ra_pre_read; random_alg_read_t *ra_read; random_alg_seeded_t *ra_seeded; random_alg_eventprocessor_t *ra_event_processor; }; #if defined(RANDOM_LOADABLE) extern const struct random_algorithm *p_random_alg_context; #else extern const struct random_algorithm random_alg_context; #define p_random_alg_context (&random_alg_context) #endif #ifdef _KERNEL /* * Random Source is a source of entropy that can provide * specified or approximate amount of entropy immediately * upon request. */ struct random_source { const char *rs_ident; enum random_entropy_source rs_source; random_source_read_t *rs_read; }; -void random_source_register(struct random_source *); -void random_source_deregister(struct random_source *); +void random_source_register(const struct random_source *); +void random_source_deregister(const struct random_source *); #endif /* _KERNEL */ void randomdev_unblock(void); #endif /* SYS_DEV_RANDOM_RANDOMDEV_H_INCLUDED */ diff --git a/sys/dev/virtio/random/virtio_random.c b/sys/dev/virtio/random/virtio_random.c index d54e2e6b70d4..4a661ad0c19d 100644 --- a/sys/dev/virtio/random/virtio_random.c +++ b/sys/dev/virtio/random/virtio_random.c @@ -1,330 +1,330 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO entropy device. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct vtrnd_softc { device_t vtrnd_dev; uint64_t vtrnd_features; struct virtqueue *vtrnd_vq; eventhandler_tag eh; bool inactive; struct sglist *vtrnd_sg; uint32_t *vtrnd_value; }; static int vtrnd_modevent(module_t, int, void *); static int vtrnd_probe(device_t); static int vtrnd_attach(device_t); static int vtrnd_detach(device_t); static int vtrnd_shutdown(device_t); static int vtrnd_negotiate_features(struct vtrnd_softc *); static int vtrnd_setup_features(struct vtrnd_softc *); static int vtrnd_alloc_virtqueue(struct vtrnd_softc *); static int vtrnd_harvest(struct vtrnd_softc *, void *, size_t *); static void vtrnd_enqueue(struct vtrnd_softc *sc); static unsigned vtrnd_read(void *, unsigned); #define VTRND_FEATURES 0 static struct virtio_feature_desc vtrnd_feature_desc[] = { { 0, NULL } }; -static struct random_source random_vtrnd = { +static const struct random_source random_vtrnd = { .rs_ident = "VirtIO Entropy Adapter", .rs_source = RANDOM_PURE_VIRTIO, .rs_read = vtrnd_read, }; /* Kludge for API limitations of random(4). */ static _Atomic(struct vtrnd_softc *) g_vtrnd_softc; static device_method_t vtrnd_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtrnd_probe), DEVMETHOD(device_attach, vtrnd_attach), DEVMETHOD(device_detach, vtrnd_detach), DEVMETHOD(device_shutdown, vtrnd_shutdown), DEVMETHOD_END }; static driver_t vtrnd_driver = { "vtrnd", vtrnd_methods, sizeof(struct vtrnd_softc) }; VIRTIO_DRIVER_MODULE(virtio_random, vtrnd_driver, vtrnd_modevent, NULL); MODULE_VERSION(virtio_random, 1); MODULE_DEPEND(virtio_random, virtio, 1, 1, 1); MODULE_DEPEND(virtio_random, random_device, 1, 1, 1); VIRTIO_SIMPLE_PNPINFO(virtio_random, VIRTIO_ID_ENTROPY, "VirtIO Entropy Adapter"); static int vtrnd_modevent(module_t mod, int type, void *unused) { int error; switch (type) { case MOD_LOAD: case MOD_QUIESCE: case MOD_UNLOAD: case MOD_SHUTDOWN: error = 0; break; default: error = EOPNOTSUPP; break; } return (error); } static int vtrnd_probe(device_t dev) { return (VIRTIO_SIMPLE_PROBE(dev, virtio_random)); } static int vtrnd_attach(device_t dev) { struct vtrnd_softc *sc, *exp; size_t len; int error; sc = device_get_softc(dev); sc->vtrnd_dev = dev; virtio_set_feature_desc(dev, vtrnd_feature_desc); len = sizeof(*sc->vtrnd_value) * HARVESTSIZE; sc->vtrnd_value = malloc_aligned(len, len, M_DEVBUF, M_WAITOK); sc->vtrnd_sg = sglist_build(sc->vtrnd_value, len, M_WAITOK); error = vtrnd_setup_features(sc); if (error) { device_printf(dev, "cannot setup features\n"); goto fail; } error = vtrnd_alloc_virtqueue(sc); if (error) { device_printf(dev, "cannot allocate virtqueue\n"); goto fail; } exp = NULL; if (!atomic_compare_exchange_strong_explicit(&g_vtrnd_softc, &exp, sc, memory_order_release, memory_order_acquire)) { error = EEXIST; goto fail; } sc->eh = EVENTHANDLER_REGISTER(shutdown_post_sync, vtrnd_shutdown, dev, SHUTDOWN_PRI_LAST + 1); /* ??? */ if (sc->eh == NULL) { device_printf(dev, "Shutdown event registration failed\n"); error = ENXIO; goto fail; } sc->inactive = false; random_source_register(&random_vtrnd); vtrnd_enqueue(sc); fail: if (error) vtrnd_detach(dev); return (error); } static int vtrnd_detach(device_t dev) { struct vtrnd_softc *sc; uint32_t rdlen; sc = device_get_softc(dev); KASSERT( atomic_load_explicit(&g_vtrnd_softc, memory_order_acquire) == sc, ("only one global instance at a time")); sc->inactive = true; if (sc->eh != NULL) { EVENTHANDLER_DEREGISTER(shutdown_post_sync, sc->eh); sc->eh = NULL; } random_source_deregister(&random_vtrnd); /* clear the queue */ virtqueue_poll(sc->vtrnd_vq, &rdlen); atomic_store_explicit(&g_vtrnd_softc, NULL, memory_order_release); sglist_free(sc->vtrnd_sg); zfree(sc->vtrnd_value, M_DEVBUF); return (0); } static int vtrnd_shutdown(device_t dev) { struct vtrnd_softc *sc; sc = device_get_softc(dev); sc->inactive = true; return(0); } static int vtrnd_negotiate_features(struct vtrnd_softc *sc) { device_t dev; uint64_t features; dev = sc->vtrnd_dev; features = VTRND_FEATURES; sc->vtrnd_features = virtio_negotiate_features(dev, features); return (virtio_finalize_features(dev)); } static int vtrnd_setup_features(struct vtrnd_softc *sc) { int error; error = vtrnd_negotiate_features(sc); if (error) return (error); return (0); } static int vtrnd_alloc_virtqueue(struct vtrnd_softc *sc) { device_t dev; struct vq_alloc_info vq_info; dev = sc->vtrnd_dev; VQ_ALLOC_INFO_INIT(&vq_info, 0, NULL, sc, &sc->vtrnd_vq, "%s request", device_get_nameunit(dev)); return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info)); } static void vtrnd_enqueue(struct vtrnd_softc *sc) { struct virtqueue *vq; int error __diagused; vq = sc->vtrnd_vq; KASSERT(virtqueue_empty(vq), ("%s: non-empty queue", __func__)); error = virtqueue_enqueue(vq, sc, sc->vtrnd_sg, 0, 1); KASSERT(error == 0, ("%s: virtqueue_enqueue returned error: %d", __func__, error)); virtqueue_notify(vq); } static int vtrnd_harvest(struct vtrnd_softc *sc, void *buf, size_t *sz) { struct virtqueue *vq; void *cookie; uint32_t rdlen; if (sc->inactive) return (EDEADLK); vq = sc->vtrnd_vq; cookie = virtqueue_dequeue(vq, &rdlen); if (cookie == NULL) return (EAGAIN); KASSERT(cookie == sc, ("%s: cookie mismatch", __func__)); *sz = MIN(rdlen, *sz); memcpy(buf, sc->vtrnd_value, *sz); vtrnd_enqueue(sc); return (0); } static unsigned vtrnd_read(void *buf, unsigned usz) { struct vtrnd_softc *sc; size_t sz; int error; sc = g_vtrnd_softc; if (sc == NULL) return (0); sz = usz; error = vtrnd_harvest(sc, buf, &sz); if (error != 0) return (0); return (sz); }