Index: head/sys/crypto/armv8/armv8_crypto.c =================================================================== --- head/sys/crypto/armv8/armv8_crypto.c (revision 365477) +++ head/sys/crypto/armv8/armv8_crypto.c (revision 365478) @@ -1,383 +1,383 @@ /*- * Copyright (c) 2005-2008 Pawel Jakub Dawidek * Copyright (c) 2010 Konstantin Belousov * Copyright (c) 2014,2016 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This is based on the aesni code. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct armv8_crypto_softc { int dieing; int32_t cid; struct rwlock lock; }; static struct mtx *ctx_mtx; static struct fpu_kern_ctx **ctx_vfp; #define AQUIRE_CTX(i, ctx) \ do { \ (i) = PCPU_GET(cpuid); \ mtx_lock(&ctx_mtx[(i)]); \ (ctx) = ctx_vfp[(i)]; \ } while (0) #define RELEASE_CTX(i, ctx) \ do { \ mtx_unlock(&ctx_mtx[(i)]); \ (i) = -1; \ (ctx) = NULL; \ } while (0) static int armv8_crypto_cipher_process(struct armv8_crypto_session *, struct cryptop *); MALLOC_DEFINE(M_ARMV8_CRYPTO, "armv8_crypto", "ARMv8 Crypto Data"); static void armv8_crypto_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "armv8crypto", -1) == NULL && BUS_ADD_CHILD(parent, 10, "armv8crypto", -1) == 0) panic("ARMv8 crypto: could not attach"); } static int armv8_crypto_probe(device_t dev) { uint64_t reg; int ret = ENXIO; reg = READ_SPECIALREG(id_aa64isar0_el1); switch (ID_AA64ISAR0_AES_VAL(reg)) { case ID_AA64ISAR0_AES_BASE: case ID_AA64ISAR0_AES_PMULL: ret = 0; break; } device_set_desc_copy(dev, "AES-CBC"); /* TODO: Check more fields as we support more features */ return (ret); } static int armv8_crypto_attach(device_t dev) { struct armv8_crypto_softc *sc; int i; sc = device_get_softc(dev); sc->dieing = 0; sc->cid = crypto_get_driverid(dev, sizeof(struct armv8_crypto_session), CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC | CRYPTOCAP_F_ACCEL_SOFTWARE); if (sc->cid < 0) { device_printf(dev, "Could not get crypto driver id.\n"); return (ENOMEM); } rw_init(&sc->lock, "armv8crypto"); ctx_mtx = malloc(sizeof(*ctx_mtx) * (mp_maxid + 1), M_ARMV8_CRYPTO, M_WAITOK|M_ZERO); ctx_vfp = malloc(sizeof(*ctx_vfp) * (mp_maxid + 1), M_ARMV8_CRYPTO, M_WAITOK|M_ZERO); CPU_FOREACH(i) { ctx_vfp[i] = fpu_kern_alloc_ctx(0); mtx_init(&ctx_mtx[i], "armv8cryptoctx", NULL, MTX_DEF|MTX_NEW); } return (0); } static int armv8_crypto_detach(device_t dev) { struct armv8_crypto_softc *sc; int i; sc = device_get_softc(dev); rw_wlock(&sc->lock); sc->dieing = 1; rw_wunlock(&sc->lock); crypto_unregister_all(sc->cid); rw_destroy(&sc->lock); CPU_FOREACH(i) { if (ctx_vfp[i] != NULL) { mtx_destroy(&ctx_mtx[i]); fpu_kern_free_ctx(ctx_vfp[i]); } ctx_vfp[i] = NULL; } free(ctx_mtx, M_ARMV8_CRYPTO); ctx_mtx = NULL; free(ctx_vfp, M_ARMV8_CRYPTO); ctx_vfp = NULL; return (0); } static int armv8_crypto_probesession(device_t dev, const struct crypto_session_params *csp) { if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (csp->csp_ivlen != AES_BLOCK_LEN) return (EINVAL); switch (csp->csp_cipher_klen * 8) { case 128: case 192: case 256: break; default: return (EINVAL); } break; default: return (EINVAL); } default: return (EINVAL); } return (CRYPTODEV_PROBE_ACCEL_SOFTWARE); } static void armv8_crypto_cipher_setup(struct armv8_crypto_session *ses, const struct crypto_session_params *csp) { int i; switch (csp->csp_cipher_klen * 8) { case 128: ses->rounds = AES128_ROUNDS; break; case 192: ses->rounds = AES192_ROUNDS; break; case 256: ses->rounds = AES256_ROUNDS; break; default: panic("invalid CBC key length"); } rijndaelKeySetupEnc(ses->enc_schedule, csp->csp_cipher_key, csp->csp_cipher_klen * 8); rijndaelKeySetupDec(ses->dec_schedule, csp->csp_cipher_key, csp->csp_cipher_klen * 8); for (i = 0; i < nitems(ses->enc_schedule); i++) { ses->enc_schedule[i] = bswap32(ses->enc_schedule[i]); ses->dec_schedule[i] = bswap32(ses->dec_schedule[i]); } } static int armv8_crypto_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct armv8_crypto_softc *sc; struct armv8_crypto_session *ses; sc = device_get_softc(dev); rw_wlock(&sc->lock); if (sc->dieing) { rw_wunlock(&sc->lock); return (EINVAL); } ses = crypto_get_driver_session(cses); armv8_crypto_cipher_setup(ses, csp); rw_wunlock(&sc->lock); return (0); } static int armv8_crypto_process(device_t dev, struct cryptop *crp, int hint __unused) { struct armv8_crypto_session *ses; int error; /* We can only handle full blocks for now */ if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) { error = EINVAL; goto out; } ses = crypto_get_driver_session(crp->crp_session); error = armv8_crypto_cipher_process(ses, crp); out: crp->crp_etype = error; crypto_done(crp); - return (error); + return (0); } static uint8_t * armv8_crypto_cipher_alloc(struct cryptop *crp, int *allocated) { uint8_t *addr; addr = crypto_contiguous_subsegment(crp, crp->crp_payload_start, crp->crp_payload_length); if (addr != NULL) { *allocated = 0; return (addr); } addr = malloc(crp->crp_payload_length, M_ARMV8_CRYPTO, M_NOWAIT); if (addr != NULL) { *allocated = 1; crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, addr); } else *allocated = 0; return (addr); } static int armv8_crypto_cipher_process(struct armv8_crypto_session *ses, struct cryptop *crp) { const struct crypto_session_params *csp; struct fpu_kern_ctx *ctx; uint8_t *buf; uint8_t iv[AES_BLOCK_LEN]; int allocated, i; int encflag; int kt; csp = crypto_get_params(crp->crp_session); encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); buf = armv8_crypto_cipher_alloc(crp, &allocated); if (buf == NULL) return (ENOMEM); kt = is_fpu_kern_thread(0); if (!kt) { AQUIRE_CTX(i, ctx); fpu_kern_enter(curthread, ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); } if (crp->crp_cipher_key != NULL) { panic("armv8: new cipher key"); } crypto_read_iv(crp, iv); /* Do work */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (encflag) armv8_aes_encrypt_cbc(ses->rounds, ses->enc_schedule, crp->crp_payload_length, buf, buf, iv); else armv8_aes_decrypt_cbc(ses->rounds, ses->dec_schedule, crp->crp_payload_length, buf, iv); break; } if (allocated) crypto_copyback(crp, crp->crp_payload_start, crp->crp_payload_length, buf); if (!kt) { fpu_kern_leave(curthread, ctx); RELEASE_CTX(i, ctx); } if (allocated) zfree(buf, M_ARMV8_CRYPTO); return (0); } static device_method_t armv8_crypto_methods[] = { DEVMETHOD(device_identify, armv8_crypto_identify), DEVMETHOD(device_probe, armv8_crypto_probe), DEVMETHOD(device_attach, armv8_crypto_attach), DEVMETHOD(device_detach, armv8_crypto_detach), DEVMETHOD(cryptodev_probesession, armv8_crypto_probesession), DEVMETHOD(cryptodev_newsession, armv8_crypto_newsession), DEVMETHOD(cryptodev_process, armv8_crypto_process), DEVMETHOD_END, }; static DEFINE_CLASS_0(armv8crypto, armv8_crypto_driver, armv8_crypto_methods, sizeof(struct armv8_crypto_softc)); static devclass_t armv8_crypto_devclass; DRIVER_MODULE(armv8crypto, nexus, armv8_crypto_driver, armv8_crypto_devclass, 0, 0); Index: head/sys/dev/hifn/hifn7751.c =================================================================== --- head/sys/dev/hifn/hifn7751.c (revision 365477) +++ head/sys/dev/hifn/hifn7751.c (revision 365478) @@ -1,2743 +1,2743 @@ /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Invertex AEON / Hifn 7751 driver * Copyright (c) 1999 Invertex Inc. All rights reserved. * Copyright (c) 1999 Theo de Raadt * Copyright (c) 2000-2001 Network Security Technologies, Inc. * http://www.netsec.net * Copyright (c) 2003 Hifn Inc. * * This driver is based on a previous driver by Invertex, for which they * requested: Please send any comments, feedback, bug-fixes, or feature * requests to software@invertex.com. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. */ #include __FBSDID("$FreeBSD$"); /* * Driver for various Hifn encryption processors. */ #include "opt_hifn.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #ifdef HIFN_RNDTEST #include #endif #include #include #ifdef HIFN_VULCANDEV #include #include static struct cdevsw vulcanpk_cdevsw; /* forward declaration */ #endif /* * Prototypes and count for the pci_device structure */ static int hifn_probe(device_t); static int hifn_attach(device_t); static int hifn_detach(device_t); static int hifn_suspend(device_t); static int hifn_resume(device_t); static int hifn_shutdown(device_t); static int hifn_probesession(device_t, const struct crypto_session_params *); static int hifn_newsession(device_t, crypto_session_t, const struct crypto_session_params *); static int hifn_process(device_t, struct cryptop *, int); static device_method_t hifn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hifn_probe), DEVMETHOD(device_attach, hifn_attach), DEVMETHOD(device_detach, hifn_detach), DEVMETHOD(device_suspend, hifn_suspend), DEVMETHOD(device_resume, hifn_resume), DEVMETHOD(device_shutdown, hifn_shutdown), /* crypto device methods */ DEVMETHOD(cryptodev_probesession, hifn_probesession), DEVMETHOD(cryptodev_newsession, hifn_newsession), DEVMETHOD(cryptodev_process, hifn_process), DEVMETHOD_END }; static driver_t hifn_driver = { "hifn", hifn_methods, sizeof (struct hifn_softc) }; static devclass_t hifn_devclass; DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0); MODULE_DEPEND(hifn, crypto, 1, 1, 1); #ifdef HIFN_RNDTEST MODULE_DEPEND(hifn, rndtest, 1, 1, 1); #endif static void hifn_reset_board(struct hifn_softc *, int); static void hifn_reset_puc(struct hifn_softc *); static void hifn_puc_wait(struct hifn_softc *); static int hifn_enable_crypto(struct hifn_softc *); static void hifn_set_retry(struct hifn_softc *sc); static void hifn_init_dma(struct hifn_softc *); static void hifn_init_pci_registers(struct hifn_softc *); static int hifn_sramsize(struct hifn_softc *); static int hifn_dramsize(struct hifn_softc *); static int hifn_ramtype(struct hifn_softc *); static void hifn_sessions(struct hifn_softc *); static void hifn_intr(void *); static u_int hifn_write_command(struct hifn_command *, u_int8_t *); static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *); static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int); static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *); static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *); static int hifn_init_pubrng(struct hifn_softc *); static void hifn_rng(void *); static void hifn_tick(void *); static void hifn_abort(struct hifn_softc *); static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *); static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t); static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t); static __inline u_int32_t READ_REG_0(struct hifn_softc *sc, bus_size_t reg) { u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg); sc->sc_bar0_lastreg = (bus_size_t) -1; return (v); } #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val) static __inline u_int32_t READ_REG_1(struct hifn_softc *sc, bus_size_t reg) { u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg); sc->sc_bar1_lastreg = (bus_size_t) -1; return (v); } #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val) static SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Hifn driver parameters"); #ifdef HIFN_DEBUG static int hifn_debug = 0; SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug, 0, "control debugging msgs"); #endif static struct hifn_stats hifnstats; SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats, hifn_stats, "driver statistics"); static int hifn_maxbatch = 1; SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch, 0, "max ops to batch w/o interrupt"); /* * Probe for a supported device. The PCI vendor and device * IDs are used to detect devices we know how to handle. */ static int hifn_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX && pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON) return (BUS_PROBE_DEFAULT); if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)) return (BUS_PROBE_DEFAULT); if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751) return (BUS_PROBE_DEFAULT); return (ENXIO); } static void hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static const char* hifn_partname(struct hifn_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_HIFN: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_HIFN_6500: return "Hifn 6500"; case PCI_PRODUCT_HIFN_7751: return "Hifn 7751"; case PCI_PRODUCT_HIFN_7811: return "Hifn 7811"; case PCI_PRODUCT_HIFN_7951: return "Hifn 7951"; case PCI_PRODUCT_HIFN_7955: return "Hifn 7955"; case PCI_PRODUCT_HIFN_7956: return "Hifn 7956"; } return "Hifn unknown-part"; case PCI_VENDOR_INVERTEX: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON"; } return "Invertex unknown-part"; case PCI_VENDOR_NETSEC: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751"; } return "NetSec unknown-part"; } return "Unknown-vendor unknown-part"; } static void default_harvest(struct rndtest_state *rsp, void *buf, u_int count) { /* MarkM: FIX!! Check that this does not swamp the harvester! */ random_harvest_queue(buf, count, RANDOM_PURE_HIFN); } static u_int checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max) { if (v > max) { device_printf(dev, "Warning, %s %u out of range, " "using max %u\n", what, v, max); v = max; } else if (v < min) { device_printf(dev, "Warning, %s %u out of range, " "using min %u\n", what, v, min); v = min; } return v; } /* * Select PLL configuration for 795x parts. This is complicated in * that we cannot determine the optimal parameters without user input. * The reference clock is derived from an external clock through a * multiplier. The external clock is either the host bus (i.e. PCI) * or an external clock generator. When using the PCI bus we assume * the clock is either 33 or 66 MHz; for an external source we cannot * tell the speed. * * PLL configuration is done with a string: "pci" for PCI bus, or "ext" * for an external source, followed by the frequency. We calculate * the appropriate multiplier and PLL register contents accordingly. * When no configuration is given we default to "pci66" since that * always will allow the card to work. If a card is using the PCI * bus clock and in a 33MHz slot then it will be operating at half * speed until the correct information is provided. * * We use a default setting of "ext66" because according to Mike Ham * of HiFn, almost every board in existence has an external crystal * populated at 66Mhz. Using PCI can be a problem on modern motherboards, * because PCI33 can have clocks from 0 to 33Mhz, and some have * non-PCI-compliant spread-spectrum clocks, which can confuse the pll. */ static void hifn_getpllconfig(device_t dev, u_int *pll) { const char *pllspec; u_int freq, mul, fl, fh; u_int32_t pllconfig; char *nxt; if (resource_string_value("hifn", device_get_unit(dev), "pllconfig", &pllspec)) pllspec = "ext66"; fl = 33, fh = 66; pllconfig = 0; if (strncmp(pllspec, "ext", 3) == 0) { pllspec += 3; pllconfig |= HIFN_PLL_REF_SEL; switch (pci_get_device(dev)) { case PCI_PRODUCT_HIFN_7955: case PCI_PRODUCT_HIFN_7956: fl = 20, fh = 100; break; #ifdef notyet case PCI_PRODUCT_HIFN_7954: fl = 20, fh = 66; break; #endif } } else if (strncmp(pllspec, "pci", 3) == 0) pllspec += 3; freq = strtoul(pllspec, &nxt, 10); if (nxt == pllspec) freq = 66; else freq = checkmaxmin(dev, "frequency", freq, fl, fh); /* * Calculate multiplier. We target a Fck of 266 MHz, * allowing only even values, possibly rounded down. * Multipliers > 8 must set the charge pump current. */ mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12); pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT; if (mul > 8) pllconfig |= HIFN_PLL_IS; *pll = pllconfig; } /* * Attach an interface that successfully probed. */ static int hifn_attach(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); caddr_t kva; int rseg, rid; char rbase; uint16_t rev; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF); /* XXX handle power management */ /* * The 7951 and 795x have a random number generator and * public key support; note this. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC; /* * The 7811 has a random number generator and * we also note it's identity 'cuz of some quirks. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && pci_get_device(dev) == PCI_PRODUCT_HIFN_7811) sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG; /* * The 795x parts support AES. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) { sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES; /* * Select PLL configuration. This depends on the * bus and board design and must be manually configured * if the default setting is unacceptable. */ hifn_getpllconfig(dev, &sc->sc_pllconfig); } /* * Setup PCI resources. Note that we record the bus * tag and handle for each register mapping, this is * used by the READ_REG_0, WRITE_REG_0, READ_REG_1, * and WRITE_REG_1 macros throughout the driver. */ pci_enable_busmaster(dev); rid = HIFN_BAR0; sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_bar0res == NULL) { device_printf(dev, "cannot map bar%d register space\n", 0); goto fail_pci; } sc->sc_st0 = rman_get_bustag(sc->sc_bar0res); sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res); sc->sc_bar0_lastreg = (bus_size_t) -1; rid = HIFN_BAR1; sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_bar1res == NULL) { device_printf(dev, "cannot map bar%d register space\n", 1); goto fail_io0; } sc->sc_st1 = rman_get_bustag(sc->sc_bar1res); sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res); sc->sc_bar1_lastreg = (bus_size_t) -1; hifn_set_retry(sc); /* * Setup the area where the Hifn DMA's descriptors * and associated data structures. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* PCI parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HIFN_MAX_DMALEN, /* maxsize */ MAX_SCATTER, /* nsegments */ HIFN_MAX_SEGLEN, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->sc_dmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto fail_io1; } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { device_printf(dev, "cannot create dma map\n"); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { device_printf(dev, "cannot alloc dma buffer\n"); bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva, sizeof (*sc->sc_dma), hifn_dmamap_cb, &sc->sc_dma_physaddr, BUS_DMA_NOWAIT)) { device_printf(dev, "cannot load dma map\n"); bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } sc->sc_dma = (struct hifn_dma *)kva; bzero(sc->sc_dma, sizeof(*sc->sc_dma)); KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!")); KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!")); KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!")); KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!")); /* * Reset the board and do the ``secret handshake'' * to enable the crypto support. Then complete the * initialization procedure by setting up the interrupt * and hooking in to the system crypto support so we'll * get used for system services like the crypto device, * IPsec, RNG device, etc. */ hifn_reset_board(sc, 0); if (hifn_enable_crypto(sc) != 0) { device_printf(dev, "crypto enabling failed\n"); goto fail_mem; } hifn_reset_puc(sc); hifn_init_dma(sc); hifn_init_pci_registers(sc); /* XXX can't dynamically determine ram type for 795x; force dram */ if (sc->sc_flags & HIFN_IS_7956) sc->sc_drammodel = 1; else if (hifn_ramtype(sc)) goto fail_mem; if (sc->sc_drammodel == 0) hifn_sramsize(sc); else hifn_dramsize(sc); /* * Workaround for NetSec 7751 rev A: half ram size because two * of the address lines were left floating */ if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 && pci_get_revid(dev) == 0x61) /*XXX???*/ sc->sc_ramsize >>= 1; /* * Arrange the interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto fail_mem; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is marked appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, hifn_intr, sc, &sc->sc_intrhand)) { device_printf(dev, "could not setup interrupt\n"); goto fail_intr2; } hifn_sessions(sc); /* * NB: Keep only the low 16 bits; this masks the chip id * from the 7951. */ rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff; rseg = sc->sc_ramsize / 1024; rbase = 'K'; if (sc->sc_ramsize >= (1024 * 1024)) { rbase = 'M'; rseg /= 1024; } device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram", hifn_partname(sc), rev, rseg, rbase, sc->sc_drammodel ? 'd' : 's'); if (sc->sc_flags & HIFN_IS_7956) printf(", pll=0x%x<%s clk, %ux mult>", sc->sc_pllconfig, sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci", 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11)); printf("\n"); WRITE_REG_0(sc, HIFN_0_PUCNFG, READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); sc->sc_ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; switch (sc->sc_ena) { case HIFN_PUSTAT_ENA_2: case HIFN_PUSTAT_ENA_1: sc->sc_cid = crypto_get_driverid(dev, sizeof(struct hifn_session), CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto fail_intr; } break; } bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) hifn_init_pubrng(sc); callout_init(&sc->sc_tickto, 1); callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); return (0); fail_intr: bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); fail_intr2: /* XXX don't store rid */ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); fail_mem: bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); /* Turn off DMA polling */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); fail_io1: bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); fail_io0: bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); fail_pci: mtx_destroy(&sc->sc_mtx); return (ENXIO); } /* * Detach an interface that successfully probed. */ static int hifn_detach(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); KASSERT(sc != NULL, ("hifn_detach: null software carrier!")); /* disable interrupts */ WRITE_REG_1(sc, HIFN_1_DMA_IER, 0); /*XXX other resources */ callout_stop(&sc->sc_tickto); callout_stop(&sc->sc_rngto); #ifdef HIFN_RNDTEST if (sc->sc_rndtest) rndtest_detach(sc->sc_rndtest); #endif /* Turn off DMA polling */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); crypto_unregister_all(sc->sc_cid); bus_generic_detach(dev); /*XXX should be no children, right? */ bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); /* XXX don't store rid */ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); mtx_destroy(&sc->sc_mtx); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int hifn_shutdown(device_t dev) { #ifdef notyet hifn_stop(device_get_softc(dev)); #endif return (0); } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int hifn_suspend(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); #ifdef notyet hifn_stop(sc); #endif sc->sc_suspended = 1; return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int hifn_resume(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); #ifdef notyet /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) rl_init(sc); #endif sc->sc_suspended = 0; return (0); } static int hifn_init_pubrng(struct hifn_softc *sc) { u_int32_t r; int i; #ifdef HIFN_RNDTEST sc->sc_rndtest = rndtest_attach(sc->sc_dev); if (sc->sc_rndtest) sc->sc_harvest = rndtest_harvest; else sc->sc_harvest = default_harvest; #else sc->sc_harvest = default_harvest; #endif if ((sc->sc_flags & HIFN_IS_7811) == 0) { /* Reset 7951 public key/rng engine */ WRITE_REG_1(sc, HIFN_1_PUB_RESET, READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); for (i = 0; i < 100; i++) { DELAY(1000); if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0) break; } if (i == 100) { device_printf(sc->sc_dev, "public key init failed\n"); return (1); } } /* Enable the rng, if available */ if (sc->sc_flags & HIFN_HAS_RNG) { if (sc->sc_flags & HIFN_IS_7811) { r = READ_REG_1(sc, HIFN_1_7811_RNGENA); if (r & HIFN_7811_RNGENA_ENA) { r &= ~HIFN_7811_RNGENA_ENA; WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); } WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, HIFN_7811_RNGCFG_DEFL); r |= HIFN_7811_RNGENA_ENA; WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); } else WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, READ_REG_1(sc, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA); sc->sc_rngfirst = 1; if (hz >= 100) sc->sc_rnghz = hz / 100; else sc->sc_rnghz = 1; callout_init(&sc->sc_rngto, 1); callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); } /* Enable public key engine, if available */ if (sc->sc_flags & HIFN_HAS_PUBLIC) { WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); #ifdef HIFN_VULCANDEV sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "vulcanpk"); sc->sc_pkdev->si_drv1 = sc; #endif } return (0); } static void hifn_rng(void *vsc) { #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0 struct hifn_softc *sc = vsc; u_int32_t sts, num[2]; int i; if (sc->sc_flags & HIFN_IS_7811) { /* ONLY VALID ON 7811!!!! */ for (i = 0; i < 5; i++) { sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); if (sts & HIFN_7811_RNGSTS_UFL) { device_printf(sc->sc_dev, "RNG underflow: disabling\n"); return; } if ((sts & HIFN_7811_RNGSTS_RDY) == 0) break; /* * There are at least two words in the RNG FIFO * at this point. */ num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); /* NB: discard first data read */ if (sc->sc_rngfirst) sc->sc_rngfirst = 0; else (*sc->sc_harvest)(sc->sc_rndtest, num, sizeof (num)); } } else { num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA); /* NB: discard first data read */ if (sc->sc_rngfirst) sc->sc_rngfirst = 0; else (*sc->sc_harvest)(sc->sc_rndtest, num, sizeof (num[0])); } callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); #undef RANDOM_BITS } static void hifn_puc_wait(struct hifn_softc *sc) { int i; int reg = HIFN_0_PUCTRL; if (sc->sc_flags & HIFN_IS_7956) { reg = HIFN_0_PUCTRL2; } for (i = 5000; i > 0; i--) { DELAY(1); if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET)) break; } if (!i) device_printf(sc->sc_dev, "proc unit did not reset\n"); } /* * Reset the processing unit. */ static void hifn_reset_puc(struct hifn_softc *sc) { /* Reset processing unit */ int reg = HIFN_0_PUCTRL; if (sc->sc_flags & HIFN_IS_7956) { reg = HIFN_0_PUCTRL2; } WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA); hifn_puc_wait(sc); } /* * Set the Retry and TRDY registers; note that we set them to * zero because the 7811 locks up when forced to retry (section * 3.6 of "Specification Update SU-0014-04". Not clear if we * should do this for all Hifn parts, but it doesn't seem to hurt. */ static void hifn_set_retry(struct hifn_softc *sc) { /* NB: RETRY only responds to 8-bit reads/writes */ pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1); pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 1); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void hifn_reset_board(struct hifn_softc *sc, int full) { u_int32_t reg; /* * Set polling in the DMA configuration register to zero. 0x7 avoids * resetting the board and zeros out the other fields. */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); /* * Now that polling has been disabled, we have to wait 1 ms * before resetting the board. */ DELAY(1000); /* Reset the DMA unit */ if (full) { WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); DELAY(1000); } else { WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); hifn_reset_puc(sc); } KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!")); bzero(sc->sc_dma, sizeof(*sc->sc_dma)); /* Bring dma unit out of reset */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); hifn_puc_wait(sc); hifn_set_retry(sc); if (sc->sc_flags & HIFN_IS_7811) { for (reg = 0; reg < 1000; reg++) { if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & HIFN_MIPSRST_CRAMINIT) break; DELAY(1000); } if (reg == 1000) printf(": cram init timeout\n"); } else { /* set up DMA configuration register #2 */ /* turn off all PK and BAR0 swaps */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG2, (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)| (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)| (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)| (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT)); } } static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt) { int i; u_int32_t v; for (i = 0; i < cnt; i++) { /* get the parity */ v = a & 0x80080125; v ^= v >> 16; v ^= v >> 8; v ^= v >> 4; v ^= v >> 2; v ^= v >> 1; a = (v & 1) ^ (a << 1); } return a; } struct pci2id { u_short pci_vendor; u_short pci_prod; char card_id[13]; }; static struct pci2id pci2id[] = { { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { /* * Other vendors share this PCI ID as well, such as * http://www.powercrypt.com, and obviously they also * use the same key. */ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, }; /* * Checks to see if crypto is already enabled. If crypto isn't enable, * "hifn_enable_crypto" is called to enable it. The check is important, * as enabling crypto twice will lock the board. */ static int hifn_enable_crypto(struct hifn_softc *sc) { u_int32_t dmacfg, ramcfg, encl, addr, i; char *offtbl = NULL; for (i = 0; i < nitems(pci2id); i++) { if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) && pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) { offtbl = pci2id[i].card_id; break; } } if (offtbl == NULL) { device_printf(sc->sc_dev, "Unknown card!\n"); return (1); } ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); /* * The RAM config register's encrypt level bit needs to be set before * every read performed on the encryption level register. */ WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; /* * Make sure we don't re-unlock. Two unlocks kills chip until the * next reboot. */ if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "Strong crypto already enabled!\n"); #endif goto report; } if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "Unknown encryption level 0x%x\n", encl); #endif return 1; } WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); DELAY(1000); addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1); DELAY(1000); WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0); DELAY(1000); for (i = 0; i <= 12; i++) { addr = hifn_next_signature(addr, offtbl[i] + 0x101); WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr); DELAY(1000); } WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; #ifdef HIFN_DEBUG if (hifn_debug) { if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) device_printf(sc->sc_dev, "Engine is permanently " "locked until next system reset!\n"); else device_printf(sc->sc_dev, "Engine enabled " "successfully!\n"); } #endif report: WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); switch (encl) { case HIFN_PUSTAT_ENA_1: case HIFN_PUSTAT_ENA_2: break; case HIFN_PUSTAT_ENA_0: default: device_printf(sc->sc_dev, "disabled"); break; } return 0; } /* * Give initial values to the registers listed in the "Register Space" * section of the HIFN Software Development reference manual. */ static void hifn_init_pci_registers(struct hifn_softc *sc) { /* write fixed values needed by the Initialization registers */ WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); /* write all 4 ring address registers */ WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); DELAY(2000); /* write status register */ WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | HIFN_DMACSR_S_WAIT | HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | HIFN_DMACSR_C_WAIT | HIFN_DMACSR_ENGINE | ((sc->sc_flags & HIFN_HAS_PUBLIC) ? HIFN_DMACSR_PUBDONE : 0) | ((sc->sc_flags & HIFN_IS_7811) ? HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | ((sc->sc_flags & HIFN_IS_7811) ? HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); if (sc->sc_flags & HIFN_IS_7956) { u_int32_t pll; WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | HIFN_PUCNFG_TCALLPHASES | HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); /* turn off the clocks and insure bypass is set */ pll = READ_REG_1(sc, HIFN_1_PLL); pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL)) | HIFN_PLL_BP | HIFN_PLL_MBSET; WRITE_REG_1(sc, HIFN_1_PLL, pll); DELAY(10*1000); /* 10ms */ /* change configuration */ pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig; WRITE_REG_1(sc, HIFN_1_PLL, pll); DELAY(10*1000); /* 10ms */ /* disable bypass */ pll &= ~HIFN_PLL_BP; WRITE_REG_1(sc, HIFN_1_PLL, pll); /* enable clocks with new configuration */ pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL; WRITE_REG_1(sc, HIFN_1_PLL, pll); } else { WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); } WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); } /* * The maximum number of sessions supported by the card * is dependent on the amount of context ram, which * encryption algorithms are enabled, and how compression * is configured. This should be configured before this * routine is called. */ static void hifn_sessions(struct hifn_softc *sc) { u_int32_t pucnfg; int ctxsize; pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); if (pucnfg & HIFN_PUCNFG_COMPSING) { if (pucnfg & HIFN_PUCNFG_ENCCNFG) ctxsize = 128; else ctxsize = 512; /* * 7955/7956 has internal context memory of 32K */ if (sc->sc_flags & HIFN_IS_7956) sc->sc_maxses = 32768 / ctxsize; else sc->sc_maxses = 1 + ((sc->sc_ramsize - 32768) / ctxsize); } else sc->sc_maxses = sc->sc_ramsize / 16384; if (sc->sc_maxses > 2048) sc->sc_maxses = 2048; } /* * Determine ram type (sram or dram). Board should be just out of a reset * state when this is called. */ static int hifn_ramtype(struct hifn_softc *sc) { u_int8_t data[8], dataexpect[8]; int i; for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = 0x55; if (hifn_writeramaddr(sc, 0, data)) return (-1); if (hifn_readramaddr(sc, 0, data)) return (-1); if (bcmp(data, dataexpect, sizeof(data)) != 0) { sc->sc_drammodel = 1; return (0); } for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = 0xaa; if (hifn_writeramaddr(sc, 0, data)) return (-1); if (hifn_readramaddr(sc, 0, data)) return (-1); if (bcmp(data, dataexpect, sizeof(data)) != 0) { sc->sc_drammodel = 1; return (0); } return (0); } #define HIFN_SRAM_MAX (32 << 20) #define HIFN_SRAM_STEP_SIZE 16384 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) static int hifn_sramsize(struct hifn_softc *sc) { u_int32_t a; u_int8_t data[8]; u_int8_t dataexpect[sizeof(data)]; int32_t i; for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = i ^ 0x5a; for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { a = i * HIFN_SRAM_STEP_SIZE; bcopy(&i, data, sizeof(i)); hifn_writeramaddr(sc, a, data); } for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { a = i * HIFN_SRAM_STEP_SIZE; bcopy(&i, dataexpect, sizeof(i)); if (hifn_readramaddr(sc, a, data) < 0) return (0); if (bcmp(data, dataexpect, sizeof(data)) != 0) return (0); sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; } return (0); } /* * XXX For dram boards, one should really try all of the * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG * is already set up correctly. */ static int hifn_dramsize(struct hifn_softc *sc) { u_int32_t cnfg; if (sc->sc_flags & HIFN_IS_7956) { /* * 7955/7956 have a fixed internal ram of only 32K. */ sc->sc_ramsize = 32768; } else { cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & HIFN_PUCNFG_DRAMMASK; sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); } return (0); } static void hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp) { struct hifn_dma *dma = sc->sc_dma; if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) { sc->sc_cmdi = 0; dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *cmdp = sc->sc_cmdi++; sc->sc_cmdk = sc->sc_cmdi; if (sc->sc_srci == HIFN_D_SRC_RSIZE) { sc->sc_srci = 0; dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *srcp = sc->sc_srci++; sc->sc_srck = sc->sc_srci; if (sc->sc_dsti == HIFN_D_DST_RSIZE) { sc->sc_dsti = 0; dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *dstp = sc->sc_dsti++; sc->sc_dstk = sc->sc_dsti; if (sc->sc_resi == HIFN_D_RES_RSIZE) { sc->sc_resi = 0; dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *resp = sc->sc_resi++; sc->sc_resk = sc->sc_resi; } static int hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) { struct hifn_dma *dma = sc->sc_dma; hifn_base_command_t wc; const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; int r, cmdi, resi, srci, dsti; wc.masks = htole16(3 << 13); wc.session_num = htole16(addr >> 14); wc.total_source_count = htole16(8); wc.total_dest_count = htole16(addr & 0x3fff); hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); /* build write command */ bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc; bcopy(data, &dma->test_src, sizeof(dma->test_src)); dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_src)); dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_dst)); dma->cmdr[cmdi].l = htole32(16 | masks); dma->srcr[srci].l = htole32(8 | masks); dma->dstr[dsti].l = htole32(4 | masks); dma->resr[resi].l = htole32(4 | masks); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); for (r = 10000; r >= 0; r--) { DELAY(10); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) break; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (r == 0) { device_printf(sc->sc_dev, "writeramaddr -- " "result[%d](addr %d) still valid\n", resi, addr); r = -1; return (-1); } else r = 0; WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); return (r); } static int hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) { struct hifn_dma *dma = sc->sc_dma; hifn_base_command_t rc; const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; int r, cmdi, srci, dsti, resi; rc.masks = htole16(2 << 13); rc.session_num = htole16(addr >> 14); rc.total_source_count = htole16(addr & 0x3fff); rc.total_dest_count = htole16(8); hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc; dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_src)); dma->test_src = 0; dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_dst)); dma->test_dst = 0; dma->cmdr[cmdi].l = htole32(8 | masks); dma->srcr[srci].l = htole32(8 | masks); dma->dstr[dsti].l = htole32(8 | masks); dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); for (r = 10000; r >= 0; r--) { DELAY(10); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) break; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (r == 0) { device_printf(sc->sc_dev, "readramaddr -- " "result[%d](addr %d) still valid\n", resi, addr); r = -1; } else { r = 0; bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); } WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); return (r); } /* * Initialize the descriptor rings. */ static void hifn_init_dma(struct hifn_softc *sc) { struct hifn_dma *dma = sc->sc_dma; int i; hifn_set_retry(sc); /* initialize static pointer values */ for (i = 0; i < HIFN_D_CMD_RSIZE; i++) dma->cmdr[i].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, command_bufs[i][0])); for (i = 0; i < HIFN_D_RES_RSIZE; i++) dma->resr[i].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, result_bufs[i][0])); dma->cmdr[HIFN_D_CMD_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); dma->srcr[HIFN_D_SRC_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); dma->dstr[HIFN_D_DST_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); dma->resr[HIFN_D_RES_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); sc->sc_cmdu = sc->sc_srcu = sc->sc_dstu = sc->sc_resu = 0; sc->sc_cmdi = sc->sc_srci = sc->sc_dsti = sc->sc_resi = 0; sc->sc_cmdk = sc->sc_srck = sc->sc_dstk = sc->sc_resk = 0; } /* * Writes out the raw command buffer space. Returns the * command buffer size. */ static u_int hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) { struct cryptop *crp; u_int8_t *buf_pos; hifn_base_command_t *base_cmd; hifn_mac_command_t *mac_cmd; hifn_crypt_command_t *cry_cmd; int using_mac, using_crypt, ivlen; u_int32_t dlen, slen; crp = cmd->crp; buf_pos = buf; using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; base_cmd = (hifn_base_command_t *)buf_pos; base_cmd->masks = htole16(cmd->base_masks); slen = cmd->src_mapsize; if (cmd->sloplen) dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t); else dlen = cmd->dst_mapsize; base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); dlen >>= 16; slen >>= 16; base_cmd->session_num = htole16( ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); buf_pos += sizeof(hifn_base_command_t); if (using_mac) { mac_cmd = (hifn_mac_command_t *)buf_pos; dlen = crp->crp_aad_length + crp->crp_payload_length; mac_cmd->source_count = htole16(dlen & 0xffff); dlen >>= 16; mac_cmd->masks = htole16(cmd->mac_masks | ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); if (crp->crp_aad_length != 0) mac_cmd->header_skip = htole16(crp->crp_aad_start); else mac_cmd->header_skip = htole16(crp->crp_payload_start); mac_cmd->reserved = 0; buf_pos += sizeof(hifn_mac_command_t); } if (using_crypt) { cry_cmd = (hifn_crypt_command_t *)buf_pos; dlen = crp->crp_payload_length; cry_cmd->source_count = htole16(dlen & 0xffff); dlen >>= 16; cry_cmd->masks = htole16(cmd->cry_masks | ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); cry_cmd->header_skip = htole16(crp->crp_payload_length); cry_cmd->reserved = 0; buf_pos += sizeof(hifn_crypt_command_t); } if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); buf_pos += HIFN_MAC_KEY_LENGTH; } if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { case HIFN_CRYPT_CMD_ALG_AES: /* * AES keys are variable 128, 192 and * 256 bits (16, 24 and 32 bytes). */ bcopy(cmd->ck, buf_pos, cmd->cklen); buf_pos += cmd->cklen; break; } } if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { case HIFN_CRYPT_CMD_ALG_AES: ivlen = HIFN_AES_IV_LENGTH; break; default: ivlen = HIFN_IV_LENGTH; break; } bcopy(cmd->iv, buf_pos, ivlen); buf_pos += ivlen; } if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) { bzero(buf_pos, 8); buf_pos += 8; } return (buf_pos - buf); } static int hifn_dmamap_aligned(struct hifn_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) return (0); } return (1); } static __inline int hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx) { struct hifn_dma *dma = sc->sc_dma; if (++idx == HIFN_D_DST_RSIZE) { dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); idx = 0; } return (idx); } static int hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) { struct hifn_dma *dma = sc->sc_dma; struct hifn_operand *dst = &cmd->dst; u_int32_t p, l; int idx, used = 0, i; idx = sc->sc_dsti; for (i = 0; i < dst->nsegs - 1; i++) { dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; idx = hifn_dmamap_dstwrap(sc, idx); } if (cmd->sloplen == 0) { p = dst->segs[i].ds_addr; l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | dst->segs[i].ds_len; } else { p = sc->sc_dma_physaddr + offsetof(struct hifn_dma, slop[cmd->slopidx]); l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | sizeof(u_int32_t); if ((dst->segs[i].ds_len - cmd->sloplen) != 0) { dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_MASKDONEIRQ | (dst->segs[i].ds_len - cmd->sloplen)); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; idx = hifn_dmamap_dstwrap(sc, idx); } } dma->dstr[idx].p = htole32(p); dma->dstr[idx].l = htole32(l); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; idx = hifn_dmamap_dstwrap(sc, idx); sc->sc_dsti = idx; sc->sc_dstu += used; return (idx); } static __inline int hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx) { struct hifn_dma *dma = sc->sc_dma; if (++idx == HIFN_D_SRC_RSIZE) { dma->srcr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); idx = 0; } return (idx); } static int hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) { struct hifn_dma *dma = sc->sc_dma; struct hifn_operand *src = &cmd->src; int idx, i; u_int32_t last = 0; idx = sc->sc_srci; for (i = 0; i < src->nsegs; i++) { if (i == src->nsegs - 1) last = HIFN_D_LAST; dma->srcr[idx].p = htole32(src->segs[i].ds_addr); dma->srcr[idx].l = htole32(src->segs[i].ds_len | HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); HIFN_SRCR_SYNC(sc, idx, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); idx = hifn_dmamap_srcwrap(sc, idx); } sc->sc_srci = idx; sc->sc_srcu += src->nsegs; return (idx); } static void hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, int error) { struct hifn_operand *op = arg; KASSERT(nsegs <= MAX_SCATTER, ("hifn_op_cb: too many DMA segments (%u > %u) " "returned when mapping operand", nsegs, MAX_SCATTER)); op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int hifn_crypto( struct hifn_softc *sc, struct hifn_command *cmd, struct cryptop *crp, int hint) { struct hifn_dma *dma = sc->sc_dma; u_int32_t cmdlen, csr; int cmdi, resi, err = 0; /* * need 1 cmd, and 1 res * * NB: check this first since it's easy. */ HIFN_LOCK(sc); if ((sc->sc_cmdu + 1) > HIFN_D_CMD_RSIZE || (sc->sc_resu + 1) > HIFN_D_RES_RSIZE) { #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "cmd/result exhaustion, cmdu %u resu %u\n", sc->sc_cmdu, sc->sc_resu); } #endif hifnstats.hst_nomem_cr++; HIFN_UNLOCK(sc); return (ERESTART); } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) { hifnstats.hst_nomem_map++; HIFN_UNLOCK(sc); return (ENOMEM); } if (bus_dmamap_load_crp(sc->sc_dmat, cmd->src_map, crp, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_load++; err = ENOMEM; goto err_srcmap1; } cmd->src_mapsize = crypto_buffer_len(&crp->crp_buf); if (hifn_dmamap_aligned(&cmd->src)) { cmd->sloplen = cmd->src_mapsize & 3; cmd->dst = cmd->src; } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { int totlen, len; struct mbuf *m, *m0, *mlast; KASSERT(cmd->dst_m == NULL, ("hifn_crypto: dst_m initialized improperly")); hifnstats.hst_unaligned++; /* * Source is not aligned on a longword boundary. * Copy the data to insure alignment. If we fail * to allocate mbufs or clusters while doing this * we return ERESTART so the operation is requeued * at the crypto later, but only if there are * ops already posted to the hardware; otherwise we * have no guarantee that we'll be re-entered. */ totlen = cmd->src_mapsize; if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m0, M_NOWAIT, MT_DATA); if (m0 && !m_dup_pkthdr(m0, crp->crp_buf.cb_mbuf, M_NOWAIT)) { m_free(m0); m0 = NULL; } } else { len = MLEN; MGET(m0, M_NOWAIT, MT_DATA); } if (m0 == NULL) { hifnstats.hst_nomem_mbuf++; err = sc->sc_cmdu ? ERESTART : ENOMEM; goto err_srcmap; } if (totlen >= MINCLSIZE) { if (!(MCLGET(m0, M_NOWAIT))) { hifnstats.hst_nomem_mcl++; err = sc->sc_cmdu ? ERESTART : ENOMEM; m_freem(m0); goto err_srcmap; } len = MCLBYTES; } totlen -= len; m0->m_pkthdr.len = m0->m_len = len; mlast = m0; while (totlen > 0) { MGET(m, M_NOWAIT, MT_DATA); if (m == NULL) { hifnstats.hst_nomem_mbuf++; err = sc->sc_cmdu ? ERESTART : ENOMEM; m_freem(m0); goto err_srcmap; } len = MLEN; if (totlen >= MINCLSIZE) { if (!(MCLGET(m, M_NOWAIT))) { hifnstats.hst_nomem_mcl++; err = sc->sc_cmdu ? ERESTART : ENOMEM; mlast->m_next = m; m_freem(m0); goto err_srcmap; } len = MCLBYTES; } m->m_len = len; m0->m_pkthdr.len += len; totlen -= len; mlast->m_next = m; mlast = m; } cmd->dst_m = m0; if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) { hifnstats.hst_nomem_map++; err = ENOMEM; goto err_srcmap; } if (bus_dmamap_load_mbuf_sg(sc->sc_dmat, cmd->dst_map, m0, cmd->dst_segs, &cmd->dst_nsegs, 0)) { hifnstats.hst_nomem_map++; err = ENOMEM; goto err_dstmap1; } cmd->dst_mapsize = m0->m_pkthdr.len; } else { err = EINVAL; goto err_srcmap; } #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu, cmd->src_nsegs, cmd->dst_nsegs); } #endif if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_PREREAD); } /* * need N src, and N dst */ if ((sc->sc_srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE || (sc->sc_dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) { #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "src/dst exhaustion, srcu %u+%u dstu %u+%u\n", sc->sc_srcu, cmd->src_nsegs, sc->sc_dstu, cmd->dst_nsegs); } #endif hifnstats.hst_nomem_sd++; err = ERESTART; goto err_dstmap; } if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) { sc->sc_cmdi = 0; dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } cmdi = sc->sc_cmdi++; cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); /* .p for command/result already set */ dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); sc->sc_cmdu++; /* * We don't worry about missing an interrupt (which a "command wait" * interrupt salvages us from), unless there is more than one command * in the queue. */ if (sc->sc_cmdu > 1) { sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); } hifnstats.hst_ipackets++; hifnstats.hst_ibytes += cmd->src_mapsize; hifn_dmamap_load_src(sc, cmd); /* * Unlike other descriptors, we don't mask done interrupt from * result descriptor. */ #ifdef HIFN_DEBUG if (hifn_debug) printf("load res\n"); #endif if (sc->sc_resi == HIFN_D_RES_RSIZE) { sc->sc_resi = 0; dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } resi = sc->sc_resi++; KASSERT(sc->sc_hifn_commands[resi] == NULL, ("hifn_crypto: command slot %u busy", resi)); sc->sc_hifn_commands[resi] = cmd; HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) { dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); sc->sc_curbatch++; if (sc->sc_curbatch > hifnstats.hst_maxbatch) hifnstats.hst_maxbatch = sc->sc_curbatch; hifnstats.hst_totbatch++; } else { dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_VALID | HIFN_D_LAST); sc->sc_curbatch = 0; } HIFN_RESR_SYNC(sc, resi, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->sc_resu++; if (cmd->sloplen) cmd->slopidx = resi; hifn_dmamap_load_dst(sc, cmd); csr = 0; if (sc->sc_c_busy == 0) { csr |= HIFN_DMACSR_C_CTRL_ENA; sc->sc_c_busy = 1; } if (sc->sc_s_busy == 0) { csr |= HIFN_DMACSR_S_CTRL_ENA; sc->sc_s_busy = 1; } if (sc->sc_r_busy == 0) { csr |= HIFN_DMACSR_R_CTRL_ENA; sc->sc_r_busy = 1; } if (sc->sc_d_busy == 0) { csr |= HIFN_DMACSR_D_CTRL_ENA; sc->sc_d_busy = 1; } if (csr) WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr); #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "command: stat %8x ier %8x\n", READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER)); } #endif sc->sc_active = 5; HIFN_UNLOCK(sc); KASSERT(err == 0, ("hifn_crypto: success with error %u", err)); return (err); /* success */ err_dstmap: if (cmd->src_map != cmd->dst_map) bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); err_dstmap1: if (cmd->src_map != cmd->dst_map) bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); err_srcmap: if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { if (cmd->dst_m != NULL) m_freem(cmd->dst_m); } bus_dmamap_unload(sc->sc_dmat, cmd->src_map); err_srcmap1: bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); HIFN_UNLOCK(sc); return (err); } static void hifn_tick(void* vsc) { struct hifn_softc *sc = vsc; HIFN_LOCK(sc); if (sc->sc_active == 0) { u_int32_t r = 0; if (sc->sc_cmdu == 0 && sc->sc_c_busy) { sc->sc_c_busy = 0; r |= HIFN_DMACSR_C_CTRL_DIS; } if (sc->sc_srcu == 0 && sc->sc_s_busy) { sc->sc_s_busy = 0; r |= HIFN_DMACSR_S_CTRL_DIS; } if (sc->sc_dstu == 0 && sc->sc_d_busy) { sc->sc_d_busy = 0; r |= HIFN_DMACSR_D_CTRL_DIS; } if (sc->sc_resu == 0 && sc->sc_r_busy) { sc->sc_r_busy = 0; r |= HIFN_DMACSR_R_CTRL_DIS; } if (r) WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); } else sc->sc_active--; HIFN_UNLOCK(sc); callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); } static void hifn_intr(void *arg) { struct hifn_softc *sc = arg; struct hifn_dma *dma; u_int32_t dmacsr, restart; int i, u; dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); /* Nothing in the DMA unit interrupted */ if ((dmacsr & sc->sc_dmaier) == 0) return; HIFN_LOCK(sc); dma = sc->sc_dma; #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n", dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier, sc->sc_cmdi, sc->sc_srci, sc->sc_dsti, sc->sc_resi, sc->sc_cmdk, sc->sc_srck, sc->sc_dstk, sc->sc_resk, sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu); } #endif WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); if ((sc->sc_flags & HIFN_HAS_PUBLIC) && (dmacsr & HIFN_DMACSR_PUBDONE)) WRITE_REG_1(sc, HIFN_1_PUB_STATUS, READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER); if (restart) device_printf(sc->sc_dev, "overrun %x\n", dmacsr); if (sc->sc_flags & HIFN_IS_7811) { if (dmacsr & HIFN_DMACSR_ILLR) device_printf(sc->sc_dev, "illegal read\n"); if (dmacsr & HIFN_DMACSR_ILLW) device_printf(sc->sc_dev, "illegal write\n"); } restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); if (restart) { device_printf(sc->sc_dev, "abort, resetting.\n"); hifnstats.hst_abort++; hifn_abort(sc); HIFN_UNLOCK(sc); return; } if ((dmacsr & HIFN_DMACSR_C_WAIT) && (sc->sc_cmdu == 0)) { /* * If no slots to process and we receive a "waiting on * command" interrupt, we disable the "waiting on command" * (by clearing it). */ sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); } /* clear the rings */ i = sc->sc_resk; u = sc->sc_resu; while (u != 0) { HIFN_RESR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->resr[i].l & htole32(HIFN_D_VALID)) { HIFN_RESR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } if (i != HIFN_D_RES_RSIZE) { struct hifn_command *cmd; u_int8_t *macbuf = NULL; HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); cmd = sc->sc_hifn_commands[i]; KASSERT(cmd != NULL, ("hifn_intr: null command slot %u", i)); sc->sc_hifn_commands[i] = NULL; if (cmd->base_masks & HIFN_BASE_CMD_MAC) { macbuf = dma->result_bufs[i]; macbuf += 12; } hifn_callback(sc, cmd, macbuf); hifnstats.hst_opackets++; u--; } if (++i == (HIFN_D_RES_RSIZE + 1)) i = 0; } sc->sc_resk = i; sc->sc_resu = u; i = sc->sc_srck; u = sc->sc_srcu; while (u != 0) { if (i == HIFN_D_SRC_RSIZE) i = 0; HIFN_SRCR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { HIFN_SRCR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } i++, u--; } sc->sc_srck = i; sc->sc_srcu = u; i = sc->sc_cmdk; u = sc->sc_cmdu; while (u != 0) { HIFN_CMDR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { HIFN_CMDR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } if (i != HIFN_D_CMD_RSIZE) { u--; HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); } if (++i == (HIFN_D_CMD_RSIZE + 1)) i = 0; } sc->sc_cmdk = i; sc->sc_cmdu = u; HIFN_UNLOCK(sc); if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "wakeup crypto (%x) u %d/%d/%d/%d\n", sc->sc_needwakeup, sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu); #endif sc->sc_needwakeup &= ~wakeup; crypto_unblock(sc->sc_cid, wakeup); } } static bool hifn_auth_supported(struct hifn_softc *sc, const struct crypto_session_params *csp) { switch (sc->sc_ena) { case HIFN_PUSTAT_ENA_2: case HIFN_PUSTAT_ENA_1: break; default: return (false); } switch (csp->csp_auth_alg) { case CRYPTO_SHA1: break; case CRYPTO_SHA1_HMAC: if (csp->csp_auth_klen > HIFN_MAC_KEY_LENGTH) return (false); break; default: return (false); } return (true); } static bool hifn_cipher_supported(struct hifn_softc *sc, const struct crypto_session_params *csp) { if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen > HIFN_MAX_IV_LENGTH) return (false); switch (sc->sc_ena) { case HIFN_PUSTAT_ENA_2: switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if ((sc->sc_flags & HIFN_HAS_AES) == 0) return (false); switch (csp->csp_cipher_klen) { case 128: case 192: case 256: break; default: return (false); } return (true); } } return (false); } static int hifn_probesession(device_t dev, const struct crypto_session_params *csp) { struct hifn_softc *sc; sc = device_get_softc(dev); if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!hifn_auth_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!hifn_cipher_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_ETA: if (!hifn_auth_supported(sc, csp) || !hifn_cipher_supported(sc, csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_HARDWARE); } /* * Allocate a new 'session'. */ static int hifn_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct hifn_session *ses; ses = crypto_get_driver_session(cses); if (csp->csp_auth_alg != 0) { if (csp->csp_auth_mlen == 0) ses->hs_mlen = crypto_auth_hash(csp)->hashsize; else ses->hs_mlen = csp->csp_auth_mlen; } return (0); } /* * XXX freesession routine should run a zero'd mac/encrypt key into context * ram. to blow away any keys already stored there. */ static int hifn_process(device_t dev, struct cryptop *crp, int hint) { const struct crypto_session_params *csp; struct hifn_softc *sc = device_get_softc(dev); struct hifn_command *cmd = NULL; const void *mackey; int err, keylen; struct hifn_session *ses; ses = crypto_get_driver_session(crp->crp_session); cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO); if (cmd == NULL) { hifnstats.hst_nomem++; err = ENOMEM; goto errout; } csp = crypto_get_params(crp->crp_session); /* * The driver only supports ETA requests where there is no * gap between the AAD and payload. */ if (csp->csp_mode == CSP_MODE_ETA && crp->crp_aad_length != 0 && crp->crp_aad_start + crp->crp_aad_length != crp->crp_payload_start) { err = EINVAL; goto errout; } switch (csp->csp_mode) { case CSP_MODE_CIPHER: case CSP_MODE_ETA: if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) cmd->base_masks |= HIFN_BASE_CMD_DECODE; cmd->base_masks |= HIFN_BASE_CMD_CRYPT; switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; default: err = EINVAL; goto errout; } crypto_read_iv(crp, cmd->iv); if (crp->crp_cipher_key != NULL) cmd->ck = crp->crp_cipher_key; else cmd->ck = csp->csp_cipher_key; cmd->cklen = csp->csp_cipher_klen; cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; /* * Need to specify the size for the AES key in the masks. */ if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == HIFN_CRYPT_CMD_ALG_AES) { switch (cmd->cklen) { case 16: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; break; case 24: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; break; case 32: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; break; default: err = EINVAL; goto errout; } } break; } switch (csp->csp_mode) { case CSP_MODE_DIGEST: case CSP_MODE_ETA: cmd->base_masks |= HIFN_BASE_CMD_MAC; switch (csp->csp_auth_alg) { case CRYPTO_SHA1: cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | HIFN_MAC_CMD_POS_IPSEC; break; case CRYPTO_SHA1_HMAC: cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; break; } if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) { cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; if (crp->crp_auth_key != NULL) mackey = crp->crp_auth_key; else mackey = csp->csp_auth_key; keylen = csp->csp_auth_klen; bcopy(mackey, cmd->mac, keylen); bzero(cmd->mac + keylen, HIFN_MAC_KEY_LENGTH - keylen); } } cmd->crp = crp; cmd->session = ses; cmd->softc = sc; err = hifn_crypto(sc, cmd, crp, hint); if (!err) { return 0; } else if (err == ERESTART) { /* * There weren't enough resources to dispatch the request * to the part. Notify the caller so they'll requeue this * request and resubmit it again soon. */ #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "requeue request\n"); #endif free(cmd, M_DEVBUF); sc->sc_needwakeup |= CRYPTO_SYMQ; return (err); } errout: if (cmd != NULL) free(cmd, M_DEVBUF); if (err == EINVAL) hifnstats.hst_invalid++; else hifnstats.hst_nomem++; crp->crp_etype = err; crypto_done(crp); - return (err); + return (0); } static void hifn_abort(struct hifn_softc *sc) { struct hifn_dma *dma = sc->sc_dma; struct hifn_command *cmd; struct cryptop *crp; int i, u; i = sc->sc_resk; u = sc->sc_resu; while (u != 0) { cmd = sc->sc_hifn_commands[i]; KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i)); sc->sc_hifn_commands[i] = NULL; crp = cmd->crp; if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { /* Salvage what we can. */ u_int8_t *macbuf; if (cmd->base_masks & HIFN_BASE_CMD_MAC) { macbuf = dma->result_bufs[i]; macbuf += 12; } else macbuf = NULL; hifnstats.hst_opackets++; hifn_callback(sc, cmd, macbuf); } else { if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_POSTREAD); } if (cmd->dst_m != NULL) { m_freem(cmd->dst_m); } /* non-shared buffers cannot be restarted */ if (cmd->src_map != cmd->dst_map) { /* * XXX should be EAGAIN, delayed until * after the reset. */ crp->crp_etype = ENOMEM; bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); } else crp->crp_etype = ENOMEM; bus_dmamap_unload(sc->sc_dmat, cmd->src_map); bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); free(cmd, M_DEVBUF); if (crp->crp_etype != EAGAIN) crypto_done(crp); } if (++i == HIFN_D_RES_RSIZE) i = 0; u--; } sc->sc_resk = i; sc->sc_resu = u; hifn_reset_board(sc, 1); hifn_init_dma(sc); hifn_init_pci_registers(sc); } static void hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) { struct hifn_dma *dma = sc->sc_dma; struct cryptop *crp = cmd->crp; uint8_t macbuf2[SHA1_HASH_LEN]; struct mbuf *m; int totlen, i, u; if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_POSTREAD); } if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { if (cmd->dst_m != NULL) { totlen = cmd->src_mapsize; for (m = cmd->dst_m; m != NULL; m = m->m_next) { if (totlen < m->m_len) { m->m_len = totlen; totlen = 0; } else totlen -= m->m_len; } cmd->dst_m->m_pkthdr.len = crp->crp_buf.cb_mbuf->m_pkthdr.len; m_freem(crp->crp_buf.cb_mbuf); crp->crp_buf.cb_mbuf = cmd->dst_m; } } if (cmd->sloplen != 0) { crypto_copyback(crp, cmd->src_mapsize - cmd->sloplen, cmd->sloplen, &dma->slop[cmd->slopidx]); } i = sc->sc_dstk; u = sc->sc_dstu; while (u != 0) { if (i == HIFN_D_DST_RSIZE) i = 0; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } i++, u--; } sc->sc_dstk = i; sc->sc_dstu = u; hifnstats.hst_obytes += cmd->dst_mapsize; if (macbuf != NULL) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, cmd->session->hs_mlen, macbuf2); if (timingsafe_bcmp(macbuf, macbuf2, cmd->session->hs_mlen) != 0) crp->crp_etype = EBADMSG; } else crypto_copyback(crp, crp->crp_digest_start, cmd->session->hs_mlen, macbuf); } if (cmd->src_map != cmd->dst_map) { bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); } bus_dmamap_unload(sc->sc_dmat, cmd->src_map); bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); free(cmd, M_DEVBUF); crypto_done(crp); } /* * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 * and Group 1 registers; avoid conditions that could create * burst writes by doing a read in between the writes. * * NB: The read we interpose is always to the same register; * we do this because reading from an arbitrary (e.g. last) * register may not always work. */ static void hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) { if (sc->sc_flags & HIFN_IS_7811) { if (sc->sc_bar0_lastreg == reg - 4) bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG); sc->sc_bar0_lastreg = reg; } bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); } static void hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) { if (sc->sc_flags & HIFN_IS_7811) { if (sc->sc_bar1_lastreg == reg - 4) bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); sc->sc_bar1_lastreg = reg; } bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); } #ifdef HIFN_VULCANDEV /* * this code provides support for mapping the PK engine's register * into a userspace program. * */ static int vulcanpk_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) { struct hifn_softc *sc; vm_paddr_t pd; void *b; sc = dev->si_drv1; pd = rman_get_start(sc->sc_bar1res); b = rman_get_virtual(sc->sc_bar1res); #if 0 printf("vpk mmap: %p(%016llx) offset=%lld\n", b, (unsigned long long)pd, offset); hexdump(b, HIFN_1_PUB_MEMEND, "vpk", 0); #endif if (offset == 0) { *paddr = pd; return (0); } return (-1); } static struct cdevsw vulcanpk_cdevsw = { .d_version = D_VERSION, .d_mmap = vulcanpk_mmap, .d_name = "vulcanpk", }; #endif /* HIFN_VULCANDEV */ Index: head/sys/dev/safe/safe.c =================================================================== --- head/sys/dev/safe/safe.c (revision 365477) +++ head/sys/dev/safe/safe.c (revision 365478) @@ -1,1980 +1,1981 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Sam Leffler, Errno Consulting * Copyright (c) 2003 Global Technology Associates, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * SafeNet SafeXcel-1141 hardware crypto accelerator */ #include "opt_safe.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #ifdef SAFE_RNDTEST #include #endif #include #include #ifndef bswap32 #define bswap32 NTOHL #endif /* * Prototypes and count for the pci_device structure */ static int safe_probe(device_t); static int safe_attach(device_t); static int safe_detach(device_t); static int safe_suspend(device_t); static int safe_resume(device_t); static int safe_shutdown(device_t); static int safe_probesession(device_t, const struct crypto_session_params *); static int safe_newsession(device_t, crypto_session_t, const struct crypto_session_params *); static int safe_process(device_t, struct cryptop *, int); static device_method_t safe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, safe_probe), DEVMETHOD(device_attach, safe_attach), DEVMETHOD(device_detach, safe_detach), DEVMETHOD(device_suspend, safe_suspend), DEVMETHOD(device_resume, safe_resume), DEVMETHOD(device_shutdown, safe_shutdown), /* crypto device methods */ DEVMETHOD(cryptodev_probesession, safe_probesession), DEVMETHOD(cryptodev_newsession, safe_newsession), DEVMETHOD(cryptodev_process, safe_process), DEVMETHOD_END }; static driver_t safe_driver = { "safe", safe_methods, sizeof (struct safe_softc) }; static devclass_t safe_devclass; DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0); MODULE_DEPEND(safe, crypto, 1, 1, 1); #ifdef SAFE_RNDTEST MODULE_DEPEND(safe, rndtest, 1, 1, 1); #endif static void safe_intr(void *); static void safe_callback(struct safe_softc *, struct safe_ringentry *); static void safe_feed(struct safe_softc *, struct safe_ringentry *); static void safe_mcopy(struct mbuf *, struct mbuf *, u_int); #ifndef SAFE_NO_RNG static void safe_rng_init(struct safe_softc *); static void safe_rng(void *); #endif /* SAFE_NO_RNG */ static int safe_dma_malloc(struct safe_softc *, bus_size_t, struct safe_dma_alloc *, int); #define safe_dma_sync(_dma, _flags) \ bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *); static int safe_dmamap_aligned(const struct safe_operand *); static int safe_dmamap_uniform(const struct safe_operand *); static void safe_reset_board(struct safe_softc *); static void safe_init_board(struct safe_softc *); static void safe_init_pciregs(device_t dev); static void safe_cleanchip(struct safe_softc *); static void safe_totalreset(struct safe_softc *); static int safe_free_entry(struct safe_softc *, struct safe_ringentry *); static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "SafeNet driver parameters"); #ifdef SAFE_DEBUG static void safe_dump_dmastatus(struct safe_softc *, const char *); static void safe_dump_ringstate(struct safe_softc *, const char *); static void safe_dump_intrstate(struct safe_softc *, const char *); static void safe_dump_request(struct safe_softc *, const char *, struct safe_ringentry *); static struct safe_softc *safec; /* for use by hw.safe.dump */ static int safe_debug = 0; SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug, 0, "control debugging msgs"); #define DPRINTF(_x) if (safe_debug) printf _x #else #define DPRINTF(_x) #endif #define READ_REG(sc,r) \ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) #define WRITE_REG(sc,reg,val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) struct safe_stats safestats; SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats, safe_stats, "driver statistics"); #ifndef SAFE_NO_RNG static int safe_rnginterval = 1; /* poll once a second */ SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval, 0, "RNG polling interval (secs)"); static int safe_rngbufsize = 16; /* 64 bytes each poll */ SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize, 0, "RNG polling buffer size (32-bit words)"); static int safe_rngmaxalarm = 8; /* max alarms before reset */ SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm, 0, "RNG max alarms before reset"); #endif /* SAFE_NO_RNG */ static int safe_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET && pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL) return (BUS_PROBE_DEFAULT); return (ENXIO); } static const char* safe_partname(struct safe_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_SAFENET: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141"; } return "SafeNet unknown-part"; } return "Unknown-vendor unknown-part"; } #ifndef SAFE_NO_RNG static void default_harvest(struct rndtest_state *rsp, void *buf, u_int count) { /* MarkM: FIX!! Check that this does not swamp the harvester! */ random_harvest_queue(buf, count, RANDOM_PURE_SAFE); } #endif /* SAFE_NO_RNG */ static int safe_attach(device_t dev) { struct safe_softc *sc = device_get_softc(dev); u_int32_t raddr; u_int32_t i; int rid; bzero(sc, sizeof (*sc)); sc->sc_dev = dev; /* XXX handle power management */ pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ rid = BS_BAR; sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_sr == NULL) { device_printf(dev, "cannot map register space\n"); goto bad; } sc->sc_st = rman_get_bustag(sc->sc_sr); sc->sc_sh = rman_get_bushandle(sc->sc_sr); /* * Arrange interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto bad1; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is mapped appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, safe_intr, sc, &sc->sc_ih)) { device_printf(dev, "could not establish interrupt\n"); goto bad2; } sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safe_session), CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto bad3; } sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) & (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN); /* * Setup DMA descriptor area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, /* alignment */ SAFE_DMA_BOUNDARY, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SAFE_MAX_DMA, /* maxsize */ SAFE_MAX_PART, /* nsegments */ SAFE_MAX_SSIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &sc->sc_srcdmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad4; } if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, /* alignment */ SAFE_MAX_DSIZE, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SAFE_MAX_DMA, /* maxsize */ SAFE_MAX_PART, /* nsegments */ SAFE_MAX_DSIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &sc->sc_dstdmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad4; } /* * Allocate packet engine descriptors. */ if (safe_dma_malloc(sc, SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry), &sc->sc_ringalloc, 0)) { device_printf(dev, "cannot allocate PE descriptor ring\n"); bus_dma_tag_destroy(sc->sc_srcdmat); goto bad4; } /* * Hookup the static portion of all our data structures. */ sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr; sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE; sc->sc_front = sc->sc_ring; sc->sc_back = sc->sc_ring; raddr = sc->sc_ringalloc.dma_paddr; bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry)); for (i = 0; i < SAFE_MAX_NQUEUE; i++) { struct safe_ringentry *re = &sc->sc_ring[i]; re->re_desc.d_sa = raddr + offsetof(struct safe_ringentry, re_sa); re->re_sa.sa_staterec = raddr + offsetof(struct safe_ringentry, re_sastate); raddr += sizeof (struct safe_ringentry); } mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev), "packet engine ring", MTX_DEF); /* * Allocate scatter and gather particle descriptors. */ if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc), &sc->sc_spalloc, 0)) { device_printf(dev, "cannot allocate source particle " "descriptor ring\n"); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_ringalloc); bus_dma_tag_destroy(sc->sc_srcdmat); goto bad4; } sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr; sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART; sc->sc_spfree = sc->sc_spring; bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc)); if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc), &sc->sc_dpalloc, 0)) { device_printf(dev, "cannot allocate destination particle " "descriptor ring\n"); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_spalloc); safe_dma_free(sc, &sc->sc_ringalloc); bus_dma_tag_destroy(sc->sc_dstdmat); goto bad4; } sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr; sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART; sc->sc_dpfree = sc->sc_dpring; bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc)); device_printf(sc->sc_dev, "%s", safe_partname(sc)); sc->sc_devinfo = READ_REG(sc, SAFE_DEVINFO); if (sc->sc_devinfo & SAFE_DEVINFO_RNG) { sc->sc_flags |= SAFE_FLAGS_RNG; printf(" rng"); } if (sc->sc_devinfo & SAFE_DEVINFO_PKEY) { #if 0 printf(" key"); sc->sc_flags |= SAFE_FLAGS_KEY; crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0); crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0); #endif } if (sc->sc_devinfo & SAFE_DEVINFO_DES) { printf(" des/3des"); } if (sc->sc_devinfo & SAFE_DEVINFO_AES) { printf(" aes"); } if (sc->sc_devinfo & SAFE_DEVINFO_MD5) { printf(" md5"); } if (sc->sc_devinfo & SAFE_DEVINFO_SHA1) { printf(" sha1"); } /* XXX other supported algorithms */ printf("\n"); safe_reset_board(sc); /* reset h/w */ safe_init_pciregs(dev); /* init pci settings */ safe_init_board(sc); /* init h/w */ #ifndef SAFE_NO_RNG if (sc->sc_flags & SAFE_FLAGS_RNG) { #ifdef SAFE_RNDTEST sc->sc_rndtest = rndtest_attach(dev); if (sc->sc_rndtest) sc->sc_harvest = rndtest_harvest; else sc->sc_harvest = default_harvest; #else sc->sc_harvest = default_harvest; #endif safe_rng_init(sc); callout_init(&sc->sc_rngto, 1); callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc); } #endif /* SAFE_NO_RNG */ #ifdef SAFE_DEBUG safec = sc; /* for use by hw.safe.dump */ #endif return (0); bad4: crypto_unregister_all(sc->sc_cid); bad3: bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bad2: bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); bad: return (ENXIO); } /* * Detach a device that successfully probed. */ static int safe_detach(device_t dev) { struct safe_softc *sc = device_get_softc(dev); /* XXX wait/abort active ops */ WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */ callout_stop(&sc->sc_rngto); crypto_unregister_all(sc->sc_cid); #ifdef SAFE_RNDTEST if (sc->sc_rndtest) rndtest_detach(sc->sc_rndtest); #endif safe_cleanchip(sc); safe_dma_free(sc, &sc->sc_dpalloc); safe_dma_free(sc, &sc->sc_spalloc); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_ringalloc); bus_generic_detach(dev); bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dma_tag_destroy(sc->sc_srcdmat); bus_dma_tag_destroy(sc->sc_dstdmat); bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); return (0); } /* * Stop all chip i/o so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int safe_shutdown(device_t dev) { #ifdef notyet safe_stop(device_get_softc(dev)); #endif return (0); } /* * Device suspend routine. */ static int safe_suspend(device_t dev) { struct safe_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX stop the device and save PCI settings */ #endif sc->sc_suspended = 1; return (0); } static int safe_resume(device_t dev) { struct safe_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX retore PCI settings and start the device */ #endif sc->sc_suspended = 0; return (0); } /* * SafeXcel Interrupt routine */ static void safe_intr(void *arg) { struct safe_softc *sc = arg; volatile u_int32_t stat; stat = READ_REG(sc, SAFE_HM_STAT); if (stat == 0) /* shared irq, not for us */ return; WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */ if ((stat & SAFE_INT_PE_DDONE)) { /* * Descriptor(s) done; scan the ring and * process completed operations. */ mtx_lock(&sc->sc_ringmtx); while (sc->sc_back != sc->sc_front) { struct safe_ringentry *re = sc->sc_back; #ifdef SAFE_DEBUG if (safe_debug) { safe_dump_ringstate(sc, __func__); safe_dump_request(sc, __func__, re); } #endif /* * safe_process marks ring entries that were allocated * but not used with a csr of zero. This insures the * ring front pointer never needs to be set backwards * in the event that an entry is allocated but not used * because of a setup error. */ if (re->re_desc.d_csr != 0) { if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) break; if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) break; sc->sc_nqchip--; safe_callback(sc, re); } if (++(sc->sc_back) == sc->sc_ringtop) sc->sc_back = sc->sc_ring; } mtx_unlock(&sc->sc_ringmtx); } /* * Check to see if we got any DMA Error */ if (stat & SAFE_INT_PE_ERROR) { DPRINTF(("dmaerr dmastat %08x\n", READ_REG(sc, SAFE_PE_DMASTAT))); safestats.st_dmaerr++; safe_totalreset(sc); #if 0 safe_feed(sc); #endif } if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); DPRINTF(("%s: wakeup crypto %x\n", __func__, sc->sc_needwakeup)); sc->sc_needwakeup &= ~wakeup; crypto_unblock(sc->sc_cid, wakeup); } } /* * safe_feed() - post a request to chip */ static void safe_feed(struct safe_softc *sc, struct safe_ringentry *re) { bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE); if (re->re_dst_map != NULL) bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, BUS_DMASYNC_PREREAD); /* XXX have no smaller granularity */ safe_dma_sync(&sc->sc_ringalloc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE); safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE); #ifdef SAFE_DEBUG if (safe_debug) { safe_dump_ringstate(sc, __func__); safe_dump_request(sc, __func__, re); } #endif sc->sc_nqchip++; if (sc->sc_nqchip > safestats.st_maxqchip) safestats.st_maxqchip = sc->sc_nqchip; /* poke h/w to check descriptor ring, any value can be written */ WRITE_REG(sc, SAFE_HI_RD_DESCR, 0); } #define N(a) (sizeof(a) / sizeof (a[0])) static void safe_setup_enckey(struct safe_session *ses, const void *key) { int i; bcopy(key, ses->ses_key, ses->ses_klen); /* PE is little-endian, insure proper byte order */ for (i = 0; i < N(ses->ses_key); i++) ses->ses_key[i] = htole32(ses->ses_key[i]); } static void safe_setup_mackey(struct safe_session *ses, int algo, const uint8_t *key, int klen) { SHA1_CTX sha1ctx; int i; hmac_init_ipad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); hmac_init_opad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); explicit_bzero(&sha1ctx, sizeof(sha1ctx)); /* PE is little-endian, insure proper byte order */ for (i = 0; i < N(ses->ses_hminner); i++) { ses->ses_hminner[i] = htole32(ses->ses_hminner[i]); ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]); } } #undef N static bool safe_auth_supported(struct safe_softc *sc, const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: if ((sc->sc_devinfo & SAFE_DEVINFO_SHA1) == 0) return (false); break; default: return (false); } return (true); } static bool safe_cipher_supported(struct safe_softc *sc, const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if ((sc->sc_devinfo & SAFE_DEVINFO_AES) == 0) return (false); if (csp->csp_ivlen != 16) return (false); if (csp->csp_cipher_klen != 16 && csp->csp_cipher_klen != 24 && csp->csp_cipher_klen != 32) return (false); break; } return (true); } static int safe_probesession(device_t dev, const struct crypto_session_params *csp) { struct safe_softc *sc = device_get_softc(dev); if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!safe_auth_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!safe_cipher_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_ETA: if (!safe_auth_supported(sc, csp) || !safe_cipher_supported(sc, csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_HARDWARE); } /* * Allocate a new 'session'. */ static int safe_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct safe_session *ses; ses = crypto_get_driver_session(cses); if (csp->csp_cipher_alg != 0) { ses->ses_klen = csp->csp_cipher_klen; if (csp->csp_cipher_key != NULL) safe_setup_enckey(ses, csp->csp_cipher_key); } if (csp->csp_auth_alg != 0) { ses->ses_mlen = csp->csp_auth_mlen; if (ses->ses_mlen == 0) { ses->ses_mlen = SHA1_HASH_LEN; } if (csp->csp_auth_key != NULL) { safe_setup_mackey(ses, csp->csp_auth_alg, csp->csp_auth_key, csp->csp_auth_klen); } } return (0); } static void safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error) { struct safe_operand *op = arg; DPRINTF(("%s: nsegs %d error %d\n", __func__, nsegs, error)); if (error != 0) return; op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int safe_process(device_t dev, struct cryptop *crp, int hint) { struct safe_softc *sc = device_get_softc(dev); const struct crypto_session_params *csp; int err = 0, i, nicealign, uniform; int bypass, oplen; int16_t coffset; struct safe_session *ses; struct safe_ringentry *re; struct safe_sarec *sa; struct safe_pdesc *pd; u_int32_t cmd0, cmd1, staterec; mtx_lock(&sc->sc_ringmtx); if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) { safestats.st_ringfull++; sc->sc_needwakeup |= CRYPTO_SYMQ; mtx_unlock(&sc->sc_ringmtx); return (ERESTART); } re = sc->sc_front; staterec = re->re_sa.sa_staterec; /* save */ /* NB: zero everything but the PE descriptor */ bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc)); re->re_sa.sa_staterec = staterec; /* restore */ re->re_crp = crp; sa = &re->re_sa; ses = crypto_get_driver_session(crp->crp_session); csp = crypto_get_params(crp->crp_session); cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */ cmd1 = 0; switch (csp->csp_mode) { case CSP_MODE_DIGEST: cmd0 |= SAFE_SA_CMD0_OP_HASH; break; case CSP_MODE_CIPHER: cmd0 |= SAFE_SA_CMD0_OP_CRYPT; break; case CSP_MODE_ETA: cmd0 |= SAFE_SA_CMD0_OP_BOTH; break; } if (csp->csp_cipher_alg != 0) { if (crp->crp_cipher_key != NULL) safe_setup_enckey(ses, crp->crp_cipher_key); switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: cmd0 |= SAFE_SA_CMD0_AES; cmd1 |= SAFE_SA_CMD1_CBC; if (ses->ses_klen * 8 == 128) cmd1 |= SAFE_SA_CMD1_AES128; else if (ses->ses_klen * 8 == 192) cmd1 |= SAFE_SA_CMD1_AES192; else cmd1 |= SAFE_SA_CMD1_AES256; } /* * Setup encrypt/decrypt state. When using basic ops * we can't use an inline IV because hash/crypt offset * must be from the end of the IV to the start of the * crypt data and this leaves out the preceding header * from the hash calculation. Instead we place the IV * in the state record and set the hash/crypt offset to * copy both the header+IV. */ crypto_read_iv(crp, re->re_sastate.sa_saved_iv); cmd0 |= SAFE_SA_CMD0_IVLD_STATE; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { cmd0 |= SAFE_SA_CMD0_OUTBOUND; /* * XXX: I suspect we don't need this since we * don't save the returned IV. */ cmd0 |= SAFE_SA_CMD0_SAVEIV; } else { cmd0 |= SAFE_SA_CMD0_INBOUND; } /* * For basic encryption use the zero pad algorithm. * This pads results to an 8-byte boundary and * suppresses padding verification for inbound (i.e. * decrypt) operations. * * NB: Not sure if the 8-byte pad boundary is a problem. */ cmd0 |= SAFE_SA_CMD0_PAD_ZERO; /* XXX assert key bufs have the same size */ bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key)); } if (csp->csp_auth_alg != 0) { if (crp->crp_auth_key != NULL) { safe_setup_mackey(ses, csp->csp_auth_alg, crp->crp_auth_key, csp->csp_auth_klen); } switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: cmd0 |= SAFE_SA_CMD0_SHA1; cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ break; } /* * Digest data is loaded from the SA and the hash * result is saved to the state block where we * retrieve it for return to the caller. */ /* XXX assert digest bufs have the same size */ bcopy(ses->ses_hminner, sa->sa_indigest, sizeof(sa->sa_indigest)); bcopy(ses->ses_hmouter, sa->sa_outdigest, sizeof(sa->sa_outdigest)); cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH; re->re_flags |= SAFE_QFLAGS_COPYOUTICV; } if (csp->csp_mode == CSP_MODE_ETA) { /* * The driver only supports ETA requests where there * is no gap between the AAD and payload. */ if (crp->crp_aad_length != 0 && crp->crp_aad_start + crp->crp_aad_length != crp->crp_payload_start) { safestats.st_lenmismatch++; err = EINVAL; goto errout; } if (crp->crp_aad_length != 0) bypass = crp->crp_aad_start; else bypass = crp->crp_payload_start; coffset = crp->crp_aad_length; oplen = crp->crp_payload_start + crp->crp_payload_length; #ifdef SAFE_DEBUG if (safe_debug) { printf("AAD: skip %d, len %d, digest %d\n", crp->crp_aad_start, crp->crp_aad_length, crp->crp_digest_start); printf("payload: skip %d, len %d, IV %d\n", crp->crp_payload_start, crp->crp_payload_length, crp->crp_iv_start); printf("bypass %d coffset %d oplen %d\n", bypass, coffset, oplen); } #endif if (coffset & 3) { /* offset must be 32-bit aligned */ DPRINTF(("%s: coffset %u misaligned\n", __func__, coffset)); safestats.st_coffmisaligned++; err = EINVAL; goto errout; } coffset >>= 2; if (coffset > 255) { /* offset must be <256 dwords */ DPRINTF(("%s: coffset %u too big\n", __func__, coffset)); safestats.st_cofftoobig++; err = EINVAL; goto errout; } /* * Tell the hardware to copy the header to the output. * The header is defined as the data from the end of * the bypass to the start of data to be encrypted. * Typically this is the inline IV. Note that you need * to do this even if src+dst are the same; it appears * that w/o this bit the crypted data is written * immediately after the bypass data. */ cmd1 |= SAFE_SA_CMD1_HDRCOPY; /* * Disable IP header mutable bit handling. This is * needed to get correct HMAC calculations. */ cmd1 |= SAFE_SA_CMD1_MUTABLE; } else { bypass = crp->crp_payload_start; oplen = bypass + crp->crp_payload_length; coffset = 0; } /* XXX verify multiple of 4 when using s/g */ if (bypass > 96) { /* bypass offset must be <= 96 bytes */ DPRINTF(("%s: bypass %u too big\n", __func__, bypass)); safestats.st_bypasstoobig++; err = EINVAL; goto errout; } if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_crp(sc->sc_srcdmat, re->re_src_map, crp, safe_op_cb, &re->re_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); re->re_src_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } re->re_src_mapsize = crypto_buffer_len(&crp->crp_buf); nicealign = safe_dmamap_aligned(&re->re_src); uniform = safe_dmamap_uniform(&re->re_src); DPRINTF(("src nicealign %u uniform %u nsegs %u\n", nicealign, uniform, re->re_src.nsegs)); if (re->re_src.nsegs > 1) { re->re_desc.d_src = sc->sc_spalloc.dma_paddr + ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring); for (i = 0; i < re->re_src_nsegs; i++) { /* NB: no need to check if there's space */ pd = sc->sc_spfree; if (++(sc->sc_spfree) == sc->sc_springtop) sc->sc_spfree = sc->sc_spring; KASSERT((pd->pd_flags&3) == 0 || (pd->pd_flags&3) == SAFE_PD_DONE, ("bogus source particle descriptor; flags %x", pd->pd_flags)); pd->pd_addr = re->re_src_segs[i].ds_addr; pd->pd_size = re->re_src_segs[i].ds_len; pd->pd_flags = SAFE_PD_READY; } cmd0 |= SAFE_SA_CMD0_IGATHER; } else { /* * No need for gather, reference the operand directly. */ re->re_desc.d_src = re->re_src_segs[0].ds_addr; } if (csp->csp_mode == CSP_MODE_DIGEST) { /* * Hash op; no destination needed. */ } else { if (nicealign && uniform == 1) { /* * Source layout is suitable for direct * sharing of the DMA map and segment list. */ re->re_dst = re->re_src; } else if (nicealign && uniform == 2) { /* * The source is properly aligned but requires a * different particle list to handle DMA of the * result. Create a new map and do the load to * create the segment list. The particle * descriptor setup code below will handle the * rest. */ if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, &re->re_dst_map)) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_crp(sc->sc_dstdmat, re->re_dst_map, crp, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); re->re_dst_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { int totlen, len; struct mbuf *m, *top, **mp; /* * DMA constraints require that we allocate a * new mbuf chain for the destination. We * allocate an entire new set of mbufs of * optimal/required size and then tell the * hardware to copy any bits that are not * created as a byproduct of the operation. */ if (!nicealign) safestats.st_unaligned++; if (!uniform) safestats.st_notuniform++; totlen = re->re_src_mapsize; if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m, M_NOWAIT, MT_DATA); if (m && !m_dup_pkthdr(m, crp->crp_buf.cb_mbuf, M_NOWAIT)) { m_free(m); m = NULL; } } else { len = MLEN; MGET(m, M_NOWAIT, MT_DATA); } if (m == NULL) { safestats.st_nombuf++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } if (totlen >= MINCLSIZE) { if (!(MCLGET(m, M_NOWAIT))) { m_free(m); safestats.st_nomcl++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MCLBYTES; } m->m_len = len; top = NULL; mp = ⊤ while (totlen > 0) { if (top) { MGET(m, M_NOWAIT, MT_DATA); if (m == NULL) { m_freem(top); safestats.st_nombuf++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MLEN; } if (top && totlen >= MINCLSIZE) { if (!(MCLGET(m, M_NOWAIT))) { *mp = m; m_freem(top); safestats.st_nomcl++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MCLBYTES; } m->m_len = len = min(totlen, len); totlen -= len; *mp = m; mp = &m->m_next; } re->re_dst_m = top; if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, &re->re_dst_map) != 0) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_mbuf_sg(sc->sc_dstdmat, re->re_dst_map, top, re->re_dst_segs, &re->re_dst_nsegs, 0) != 0) { bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); re->re_dst_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } re->re_dst_mapsize = re->re_src_mapsize; if (re->re_src.mapsize > oplen) { /* * There's data following what the * hardware will copy for us. If this * isn't just the ICV (that's going to * be written on completion), copy it * to the new mbufs */ if (!(csp->csp_mode == CSP_MODE_ETA && (re->re_src.mapsize-oplen) == ses->ses_mlen && crp->crp_digest_start == oplen)) safe_mcopy(crp->crp_buf.cb_mbuf, re->re_dst_m, oplen); else safestats.st_noicvcopy++; } } else { if (!nicealign) { safestats.st_iovmisaligned++; err = EINVAL; goto errout; } else { /* * There's no way to handle the DMA * requirements with this uio. We * could create a separate DMA area for * the result and then copy it back, * but for now we just bail and return * an error. Note that uio requests * > SAFE_MAX_DSIZE are handled because * the DMA map and segment list for the * destination wil result in a * destination particle list that does * the necessary scatter DMA. */ safestats.st_iovnotuniform++; err = EINVAL; goto errout; } } if (re->re_dst.nsegs > 1) { re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr + ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring); for (i = 0; i < re->re_dst_nsegs; i++) { pd = sc->sc_dpfree; KASSERT((pd->pd_flags&3) == 0 || (pd->pd_flags&3) == SAFE_PD_DONE, ("bogus dest particle descriptor; flags %x", pd->pd_flags)); if (++(sc->sc_dpfree) == sc->sc_dpringtop) sc->sc_dpfree = sc->sc_dpring; pd->pd_addr = re->re_dst_segs[i].ds_addr; pd->pd_flags = SAFE_PD_READY; } cmd0 |= SAFE_SA_CMD0_OSCATTER; } else { /* * No need for scatter, reference the operand directly. */ re->re_desc.d_dst = re->re_dst_segs[0].ds_addr; } } /* * All done with setup; fillin the SA command words * and the packet engine descriptor. The operation * is now ready for submission to the hardware. */ sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI; sa->sa_cmd1 = cmd1 | (coffset << SAFE_SA_CMD1_OFFSET_S) | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */ | SAFE_SA_CMD1_SRPCI ; /* * NB: the order of writes is important here. In case the * chip is scanning the ring because of an outstanding request * it might nab this one too. In that case we need to make * sure the setup is complete before we write the length * field of the descriptor as it signals the descriptor is * ready for processing. */ re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI; if (csp->csp_auth_alg != 0) re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL; re->re_desc.d_len = oplen | SAFE_PE_LEN_READY | (bypass << SAFE_PE_LEN_BYPASS_S) ; safestats.st_ipackets++; safestats.st_ibytes += oplen; if (++(sc->sc_front) == sc->sc_ringtop) sc->sc_front = sc->sc_ring; /* XXX honor batching */ safe_feed(sc, re); mtx_unlock(&sc->sc_ringmtx); return (0); errout: if (re->re_dst_m != NULL) m_freem(re->re_dst_m); if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); } if (re->re_src_map != NULL) { bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); } mtx_unlock(&sc->sc_ringmtx); if (err != ERESTART) { crp->crp_etype = err; crypto_done(crp); + err = 0; } else { sc->sc_needwakeup |= CRYPTO_SYMQ; } return (err); } static void safe_callback(struct safe_softc *sc, struct safe_ringentry *re) { const struct crypto_session_params *csp; struct cryptop *crp = (struct cryptop *)re->re_crp; struct safe_session *ses; uint8_t hash[HASH_MAX_LEN]; ses = crypto_get_driver_session(crp->crp_session); csp = crypto_get_params(crp->crp_session); safestats.st_opackets++; safestats.st_obytes += re->re_dst.mapsize; safe_dma_sync(&sc->sc_ringalloc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) { device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n", re->re_desc.d_csr, re->re_sa.sa_cmd0, re->re_sa.sa_cmd1); safestats.st_peoperr++; crp->crp_etype = EIO; /* something more meaningful? */ } /* * XXX: Should crp_buf.cb_mbuf be updated to re->re_dst_m if * it is non-NULL? */ if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); } bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) { if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) { /* * SHA-1 ICV's are byte-swapped; fix 'em up * before copying them to their destination. */ re->re_sastate.sa_saved_indigest[0] = bswap32(re->re_sastate.sa_saved_indigest[0]); re->re_sastate.sa_saved_indigest[1] = bswap32(re->re_sastate.sa_saved_indigest[1]); re->re_sastate.sa_saved_indigest[2] = bswap32(re->re_sastate.sa_saved_indigest[2]); } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, ses->ses_mlen, hash); if (timingsafe_bcmp(re->re_sastate.sa_saved_indigest, hash, ses->ses_mlen) != 0) crp->crp_etype = EBADMSG; } else crypto_copyback(crp, crp->crp_digest_start, ses->ses_mlen, re->re_sastate.sa_saved_indigest); } crypto_done(crp); } /* * Copy all data past offset from srcm to dstm. */ static void safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset) { u_int j, dlen, slen; caddr_t dptr, sptr; /* * Advance src and dst to offset. */ j = offset; while (j >= srcm->m_len) { j -= srcm->m_len; srcm = srcm->m_next; if (srcm == NULL) return; } sptr = mtod(srcm, caddr_t) + j; slen = srcm->m_len - j; j = offset; while (j >= dstm->m_len) { j -= dstm->m_len; dstm = dstm->m_next; if (dstm == NULL) return; } dptr = mtod(dstm, caddr_t) + j; dlen = dstm->m_len - j; /* * Copy everything that remains. */ for (;;) { j = min(slen, dlen); bcopy(sptr, dptr, j); if (slen == j) { srcm = srcm->m_next; if (srcm == NULL) return; sptr = srcm->m_data; slen = srcm->m_len; } else sptr += j, slen -= j; if (dlen == j) { dstm = dstm->m_next; if (dstm == NULL) return; dptr = dstm->m_data; dlen = dstm->m_len; } else dptr += j, dlen -= j; } } #ifndef SAFE_NO_RNG #define SAFE_RNG_MAXWAIT 1000 static void safe_rng_init(struct safe_softc *sc) { u_int32_t w, v; int i; WRITE_REG(sc, SAFE_RNG_CTRL, 0); /* use default value according to the manual */ WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */ WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); /* * There is a bug in rev 1.0 of the 1140 that when the RNG * is brought out of reset the ready status flag does not * work until the RNG has finished its internal initialization. * * So in order to determine the device is through its * initialization we must read the data register, using the * status reg in the read in case it is initialized. Then read * the data register until it changes from the first read. * Once it changes read the data register until it changes * again. At this time the RNG is considered initialized. * This could take between 750ms - 1000ms in time. */ i = 0; w = READ_REG(sc, SAFE_RNG_OUT); do { v = READ_REG(sc, SAFE_RNG_OUT); if (v != w) { w = v; break; } DELAY(10); } while (++i < SAFE_RNG_MAXWAIT); /* Wait Until data changes again */ i = 0; do { v = READ_REG(sc, SAFE_RNG_OUT); if (v != w) break; DELAY(10); } while (++i < SAFE_RNG_MAXWAIT); } static __inline void safe_rng_disable_short_cycle(struct safe_softc *sc) { WRITE_REG(sc, SAFE_RNG_CTRL, READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN); } static __inline void safe_rng_enable_short_cycle(struct safe_softc *sc) { WRITE_REG(sc, SAFE_RNG_CTRL, READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN); } static __inline u_int32_t safe_rng_read(struct safe_softc *sc) { int i; i = 0; while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT) ; return READ_REG(sc, SAFE_RNG_OUT); } static void safe_rng(void *arg) { struct safe_softc *sc = arg; u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */ u_int maxwords; int i; safestats.st_rng++; /* * Fetch the next block of data. */ maxwords = safe_rngbufsize; if (maxwords > SAFE_RNG_MAXBUFSIZ) maxwords = SAFE_RNG_MAXBUFSIZ; retry: for (i = 0; i < maxwords; i++) buf[i] = safe_rng_read(sc); /* * Check the comparator alarm count and reset the h/w if * it exceeds our threshold. This guards against the * hardware oscillators resonating with external signals. */ if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) { u_int32_t freq_inc, w; DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__, READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm)); safestats.st_rngalarm++; safe_rng_enable_short_cycle(sc); freq_inc = 18; for (i = 0; i < 64; i++) { w = READ_REG(sc, SAFE_RNG_CNFG); freq_inc = ((w + freq_inc) & 0x3fL); w = ((w & ~0x3fL) | freq_inc); WRITE_REG(sc, SAFE_RNG_CNFG, w); WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); (void) safe_rng_read(sc); DELAY(25); if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) { safe_rng_disable_short_cycle(sc); goto retry; } freq_inc = 1; } safe_rng_disable_short_cycle(sc); } else WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t)); callout_reset(&sc->sc_rngto, hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc); } #endif /* SAFE_NO_RNG */ static void safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static int safe_dma_malloc( struct safe_softc *sc, bus_size_t size, struct safe_dma_alloc *dma, int mapflags ) { int r; r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ sizeof(u_int32_t), 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &dma->dma_tag); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dma_tag_create failed; error %u\n", r); goto fail_0; } r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dmammem_alloc failed; size %ju, error %u\n", (uintmax_t)size, r); goto fail_1; } r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, safe_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dmamap_load failed; error %u\n", r); goto fail_2; } dma->dma_size = size; return (0); bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); fail_1: bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_tag = NULL; return (r); } static void safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void safe_reset_board(struct safe_softc *sc) { u_int32_t v; /* * Reset the device. The manual says no delay * is needed between marking and clearing reset. */ v = READ_REG(sc, SAFE_PE_DMACFG) &~ (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | SAFE_PE_DMACFG_SGRESET); WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | SAFE_PE_DMACFG_SGRESET); WRITE_REG(sc, SAFE_PE_DMACFG, v); } /* * Initialize registers we need to touch only once. */ static void safe_init_board(struct safe_softc *sc) { u_int32_t v, dwords; v = READ_REG(sc, SAFE_PE_DMACFG); v &=~ SAFE_PE_DMACFG_PEMODE; v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */ | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */ | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */ | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */ | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */ | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */ ; WRITE_REG(sc, SAFE_PE_DMACFG, v); #if 0 /* XXX select byte swap based on host byte order */ WRITE_REG(sc, SAFE_ENDIAN, 0x1b); #endif if (sc->sc_chiprev == SAFE_REV(1,0)) { /* * Avoid large PCI DMA transfers. Rev 1.0 has a bug where * "target mode transfers" done while the chip is DMA'ing * >1020 bytes cause the hardware to lockup. To avoid this * we reduce the max PCI transfer size and use small source * particle descriptors (<= 256 bytes). */ WRITE_REG(sc, SAFE_DMA_CFG, 256); device_printf(sc->sc_dev, "Reduce max DMA size to %u words for rev %u.%u WAR\n", (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff, SAFE_REV_MAJ(sc->sc_chiprev), SAFE_REV_MIN(sc->sc_chiprev)); } /* NB: operands+results are overlaid */ WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr); /* * Configure ring entry size and number of items in the ring. */ KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0, ("PE ring entry not 32-bit aligned!")); dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t); WRITE_REG(sc, SAFE_PE_RINGCFG, (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE); WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */ WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_PARTSIZE, (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART); /* * NB: destination particles are fixed size. We use * an mbuf cluster and require all results go to * clusters or smaller. */ WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE); /* it's now safe to enable PE mode, do it */ WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE); /* * Configure hardware to use level-triggered interrupts and * to interrupt after each descriptor is processed. */ WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL); WRITE_REG(sc, SAFE_HI_DESC_CNT, 1); WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR); } /* * Init PCI registers */ static void safe_init_pciregs(device_t dev) { } /* * Clean up after a chip crash. * It is assumed that the caller in splimp() */ static void safe_cleanchip(struct safe_softc *sc) { if (sc->sc_nqchip != 0) { struct safe_ringentry *re = sc->sc_back; while (re != sc->sc_front) { if (re->re_desc.d_csr != 0) safe_free_entry(sc, re); if (++re == sc->sc_ringtop) re = sc->sc_ring; } sc->sc_back = re; sc->sc_nqchip = 0; } } /* * free a safe_q * It is assumed that the caller is within splimp(). */ static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re) { struct cryptop *crp; /* * Free header MCR */ if (re->re_dst_m != NULL) m_freem(re->re_dst_m); crp = (struct cryptop *)re->re_crp; re->re_desc.d_csr = 0; crp->crp_etype = EFAULT; crypto_done(crp); return(0); } /* * Routine to reset the chip and clean up. * It is assumed that the caller is in splimp() */ static void safe_totalreset(struct safe_softc *sc) { safe_reset_board(sc); safe_init_board(sc); safe_cleanchip(sc); } /* * Is the operand suitable aligned for direct DMA. Each * segment must be aligned on a 32-bit boundary and all * but the last segment must be a multiple of 4 bytes. */ static int safe_dmamap_aligned(const struct safe_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3)) return (0); } return (1); } /* * Is the operand suitable for direct DMA as the destination * of an operation. The hardware requires that each ``particle'' * but the last in an operation result have the same size. We * fix that size at SAFE_MAX_DSIZE bytes. This routine returns * 0 if some segment is not a multiple of of this size, 1 if all * segments are exactly this size, or 2 if segments are at worst * a multple of this size. */ static int safe_dmamap_uniform(const struct safe_operand *op) { int result = 1; if (op->nsegs > 0) { int i; for (i = 0; i < op->nsegs-1; i++) { if (op->segs[i].ds_len % SAFE_MAX_DSIZE) return (0); if (op->segs[i].ds_len != SAFE_MAX_DSIZE) result = 2; } } return (result); } #ifdef SAFE_DEBUG static void safe_dump_dmastatus(struct safe_softc *sc, const char *tag) { printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n" , tag , READ_REG(sc, SAFE_DMA_ENDIAN) , READ_REG(sc, SAFE_DMA_SRCADDR) , READ_REG(sc, SAFE_DMA_DSTADDR) , READ_REG(sc, SAFE_DMA_STAT) ); } static void safe_dump_intrstate(struct safe_softc *sc, const char *tag) { printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n" , tag , READ_REG(sc, SAFE_HI_CFG) , READ_REG(sc, SAFE_HI_MASK) , READ_REG(sc, SAFE_HI_DESC_CNT) , READ_REG(sc, SAFE_HU_STAT) , READ_REG(sc, SAFE_HM_STAT) ); } static void safe_dump_ringstate(struct safe_softc *sc, const char *tag) { u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT); /* NB: assume caller has lock on ring */ printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n", tag, estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S), (unsigned long)(sc->sc_back - sc->sc_ring), (unsigned long)(sc->sc_front - sc->sc_ring)); } static void safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re) { int ix, nsegs; ix = re - sc->sc_ring; printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n" , tag , re, ix , re->re_desc.d_csr , re->re_desc.d_src , re->re_desc.d_dst , re->re_desc.d_sa , re->re_desc.d_len ); if (re->re_src.nsegs > 1) { ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) / sizeof(struct safe_pdesc); for (nsegs = re->re_src.nsegs; nsegs; nsegs--) { printf(" spd[%u] %p: %p size %u flags %x" , ix, &sc->sc_spring[ix] , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr , sc->sc_spring[ix].pd_size , sc->sc_spring[ix].pd_flags ); if (sc->sc_spring[ix].pd_size == 0) printf(" (zero!)"); printf("\n"); if (++ix == SAFE_TOTAL_SPART) ix = 0; } } if (re->re_dst.nsegs > 1) { ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) / sizeof(struct safe_pdesc); for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) { printf(" dpd[%u] %p: %p flags %x\n" , ix, &sc->sc_dpring[ix] , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr , sc->sc_dpring[ix].pd_flags ); if (++ix == SAFE_TOTAL_DPART) ix = 0; } } printf("sa: cmd0 %08x cmd1 %08x staterec %x\n", re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec); printf("sa: key %x %x %x %x %x %x %x %x\n" , re->re_sa.sa_key[0] , re->re_sa.sa_key[1] , re->re_sa.sa_key[2] , re->re_sa.sa_key[3] , re->re_sa.sa_key[4] , re->re_sa.sa_key[5] , re->re_sa.sa_key[6] , re->re_sa.sa_key[7] ); printf("sa: indigest %x %x %x %x %x\n" , re->re_sa.sa_indigest[0] , re->re_sa.sa_indigest[1] , re->re_sa.sa_indigest[2] , re->re_sa.sa_indigest[3] , re->re_sa.sa_indigest[4] ); printf("sa: outdigest %x %x %x %x %x\n" , re->re_sa.sa_outdigest[0] , re->re_sa.sa_outdigest[1] , re->re_sa.sa_outdigest[2] , re->re_sa.sa_outdigest[3] , re->re_sa.sa_outdigest[4] ); printf("sr: iv %x %x %x %x\n" , re->re_sastate.sa_saved_iv[0] , re->re_sastate.sa_saved_iv[1] , re->re_sastate.sa_saved_iv[2] , re->re_sastate.sa_saved_iv[3] ); printf("sr: hashbc %u indigest %x %x %x %x %x\n" , re->re_sastate.sa_saved_hashbc , re->re_sastate.sa_saved_indigest[0] , re->re_sastate.sa_saved_indigest[1] , re->re_sastate.sa_saved_indigest[2] , re->re_sastate.sa_saved_indigest[3] , re->re_sastate.sa_saved_indigest[4] ); } static void safe_dump_ring(struct safe_softc *sc, const char *tag) { mtx_lock(&sc->sc_ringmtx); printf("\nSafeNet Ring State:\n"); safe_dump_intrstate(sc, tag); safe_dump_dmastatus(sc, tag); safe_dump_ringstate(sc, tag); if (sc->sc_nqchip) { struct safe_ringentry *re = sc->sc_back; do { safe_dump_request(sc, tag, re); if (++re == sc->sc_ringtop) re = sc->sc_ring; } while (re != sc->sc_front); } mtx_unlock(&sc->sc_ringmtx); } static int sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS) { char dmode[64]; int error; strncpy(dmode, "", sizeof(dmode) - 1); dmode[sizeof(dmode) - 1] = '\0'; error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req); if (error == 0 && req->newptr != NULL) { struct safe_softc *sc = safec; if (!sc) return EINVAL; if (strncmp(dmode, "dma", 3) == 0) safe_dump_dmastatus(sc, "safe0"); else if (strncmp(dmode, "int", 3) == 0) safe_dump_intrstate(sc, "safe0"); else if (strncmp(dmode, "ring", 4) == 0) safe_dump_ring(sc, "safe0"); else return EINVAL; } return error; } SYSCTL_PROC(_hw_safe, OID_AUTO, dump, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, sysctl_hw_safe_dump, "A", "Dump driver state"); #endif /* SAFE_DEBUG */