Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F105967897
D12723.id34166.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
102 KB
Referenced Files
None
Subscribers
None
D12723.id34166.diff
View Options
Index: sys/crypto/ccp/ccp.h
===================================================================
--- /dev/null
+++ sys/crypto/ccp/ccp.h
@@ -0,0 +1,223 @@
+/*-
+ * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#pragma once
+
+/*
+ * Keccak SHAKE128 (if supported by the device?) uses a 1344 bit block.
+ * SHA3-224 is the next largest block size, at 1152 bits. However, crypto(4)
+ * doesn't support any SHA3 hash, so SHA2 is the constraint:
+ */
+#define CCP_HASH_MAX_BLOCK_SIZE (SHA2_512_HMAC_BLOCK_LEN)
+
+#define CCP_AES_MAX_KEY_LEN (AES_XTS_MAX_KEY)
+#define CCP_MAX_CRYPTO_IV_LEN 16 /* AES? */
+
+#define MAX_HW_QUEUES 5
+#define MAX_LSB_REGIONS 8
+
+/*
+ * Internal data structures.
+ */
+enum sha_version {
+ SHA1,
+#if 0
+ SHA2_224,
+#endif
+ SHA2_256, SHA2_384, SHA2_512
+};
+
+struct ccp_session_hmac {
+ struct auth_hash *auth_hash;
+ int hash_len;
+ unsigned int partial_digest_len;
+ unsigned int auth_mode;
+ unsigned int mk_size;
+ char ipad[CCP_HASH_MAX_BLOCK_SIZE];
+ char opad[CCP_HASH_MAX_BLOCK_SIZE];
+};
+
+struct ccp_session_gmac {
+ int hash_len;
+ char ghash_h[GMAC_BLOCK_LEN];
+};
+
+struct ccp_session_blkcipher {
+ unsigned cipher_mode;
+ unsigned cipher_type;
+ unsigned key_len;
+ unsigned iv_len;
+ char enckey[CCP_AES_MAX_KEY_LEN];
+ char deckey[CCP_AES_MAX_KEY_LEN];
+ char iv[CCP_MAX_CRYPTO_IV_LEN];
+};
+
+struct ccp_session {
+ bool active : 1;
+ bool cipher_first : 1;
+ int pending;
+ enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode;
+ unsigned queue;
+ union {
+ struct ccp_session_hmac hmac;
+ struct ccp_session_gmac gmac;
+ };
+ struct ccp_session_blkcipher blkcipher;
+};
+
+struct ccp_softc;
+struct ccp_queue {
+ struct mtx cq_lock;
+ unsigned cq_qindex;
+ struct ccp_softc *cq_softc;
+
+ /* Host memory and tracking structures for descriptor ring. */
+ bus_dma_tag_t ring_desc_tag;
+ bus_dmamap_t ring_desc_map;
+ struct ccp_desc *desc_ring;
+ bus_addr_t desc_ring_bus_addr;
+ /* Callbacks and arguments ring; indices correspond to above ring. */
+ struct ccp_completion_ctx *completions_ring;
+
+ uint32_t qcontrol; /* Cached register value */
+ unsigned lsb_mask; /* LSBs available to queue */
+ int private_lsb; /* Reserved LSB #, or -1 */
+
+ unsigned cq_head;
+ unsigned cq_tail;
+ unsigned cq_acq_tail;
+
+ struct sglist *cq_sg_crp;
+ struct sglist *cq_sg_ulptx;
+ struct sglist *cq_sg_dst;
+};
+
+struct ccp_completion_ctx {
+ void (*callback_fn)(struct ccp_queue *qp, struct ccp_session *s,
+ void *arg, int error);
+ void *callback_arg;
+ struct ccp_session *session;
+};
+
+struct ccp_softc {
+ device_t dev;
+ int32_t cid;
+ struct ccp_session *sessions;
+ int nsessions;
+ struct mtx lock;
+ bool detaching;
+
+ unsigned ring_size_order;
+
+ /*
+ * Each command queue is either public or private. "Private"
+ * (PSP-only) by default. PSP grants access to some queues to host via
+ * QMR (Queue Mask Register). Set bits are host accessible.
+ */
+ uint8_t valid_queues;
+
+ uint8_t hw_version;
+ uint8_t num_queues;
+ uint16_t hw_features;
+ uint16_t num_lsb_entries;
+
+ /* Primary BAR (RID 2) used for register access */
+ bus_space_tag_t pci_bus_tag;
+ bus_space_handle_t pci_bus_handle;
+ int pci_resource_id;
+ struct resource *pci_resource;
+
+ /* Secondary BAR (RID 5) apparently used for MSI-X */
+ int pci_resource_id_msix;
+ struct resource *pci_resource_msix;
+
+ /* Interrupt resources */
+ void *intr_tag;
+ struct resource *intr_res;
+
+ struct ccp_queue queues[MAX_HW_QUEUES];
+};
+
+/* Internal globals */
+SYSCTL_DECL(_hw_ccp);
+MALLOC_DECLARE(M_CCP);
+extern struct ccp_softc *g_ccp_softc;
+
+/*
+ * Internal hardware manipulation routines.
+ */
+int ccp_hw_attach(device_t dev);
+void ccp_hw_detach(device_t dev);
+
+void ccp_queue_write_tail(struct ccp_queue *qp);
+
+#ifdef DDB
+void db_ccp_show_hw(struct ccp_softc *sc);
+void db_ccp_show_queue_hw(struct ccp_queue *qp);
+#endif
+
+/*
+ * Internal hardware crypt-op submission routines.
+ */
+int ccp_authenc(struct ccp_queue *sc, struct ccp_session *s,
+ struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde);
+int ccp_blkcipher(struct ccp_queue *sc, struct ccp_session *s,
+ struct cryptop *crp);
+int ccp_gcm(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp,
+ struct cryptodesc *crda, struct cryptodesc *crde);
+int ccp_hmac(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp);
+
+/*
+ * Internal hardware TRNG read routine.
+ */
+u_int random_ccp_read(void *v, u_int c);
+
+/* XXX */
+int ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags);
+void ccp_queue_release(struct ccp_queue *qp);
+
+void XXX_ccp_test(struct ccp_softc *sc);
+
+/*
+ * Internal inline routines.
+ */
+static inline unsigned
+ccp_queue_get_active(struct ccp_queue *qp)
+{
+ struct ccp_softc *sc;
+
+ sc = qp->cq_softc;
+ return ((qp->cq_head - qp->cq_tail) & ((1 << sc->ring_size_order) - 1));
+}
+
+static inline unsigned
+ccp_queue_get_ring_space(struct ccp_queue *qp)
+{
+ struct ccp_softc *sc;
+
+ sc = qp->cq_softc;
+ return ((1 << sc->ring_size_order) - ccp_queue_get_active(qp) - 1);
+}
Index: sys/crypto/ccp/ccp.c
===================================================================
--- /dev/null
+++ sys/crypto/ccp/ccp.c
@@ -0,0 +1,977 @@
+/*-
+ * Copyright (c) 2017 Chelsio Communications, Inc.
+ * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
+ * All rights reserved.
+ * Largely borrowed from ccr(4), Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_ddb.h"
+
+#include <sys/types.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/module.h>
+#include <sys/random.h>
+#include <sys/sglist.h>
+#include <sys/sysctl.h>
+
+#ifdef DDB
+#include <ddb/ddb.h>
+#endif
+
+#include <dev/pci/pcivar.h>
+
+#include <dev/random/randomdev.h>
+
+#include <opencrypto/cryptodev.h>
+#include <opencrypto/xform.h>
+
+#include "cryptodev_if.h"
+
+#include "ccp.h"
+#include "ccp_hardware.h"
+
+MALLOC_DEFINE(M_CCP, "ccp", "AMD CCP crypto");
+
+/*
+ * Need a global softc available for garbage random_source API, which lacks any
+ * context pointer. It's also handy for debugging.
+ */
+struct ccp_softc *g_ccp_softc;
+
+static struct pciid {
+ uint32_t devid;
+ const char *desc;
+} ccp_ids[] = {
+ { 0x14561022, "AMD CCP-5a" },
+ { 0x14681022, "AMD CCP-5b" },
+};
+MODULE_PNP_INFO("W32:vendor/device", pci, ccp, ccp_ids, sizeof(ccp_ids[0]),
+ nitems(ccp_ids));
+
+static struct random_source random_ccp = {
+ .rs_ident = "AMD CCP TRNG",
+ .rs_source = RANDOM_PURE_CCP,
+ .rs_read = random_ccp_read,
+};
+
+/*
+ * ccp_populate_sglist() generates a scatter/gather list that covers the entire
+ * crypto operation buffer.
+ */
+static int
+ccp_populate_sglist(struct sglist *sg, struct cryptop *crp)
+{
+ int error;
+
+ sglist_reset(sg);
+ if (crp->crp_flags & CRYPTO_F_IMBUF)
+ error = sglist_append_mbuf(sg, (struct mbuf *)crp->crp_buf);
+ else if (crp->crp_flags & CRYPTO_F_IOV)
+ error = sglist_append_uio(sg, (struct uio *)crp->crp_buf);
+ else
+ error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
+ return (error);
+}
+
+/*
+ * Handle a GCM request with an empty payload by performing the
+ * operation in software. Derived from swcr_authenc().
+ */
+static void
+ccp_gcm_soft(struct ccp_session *s, struct cryptop *crp,
+ struct cryptodesc *crda, struct cryptodesc *crde)
+{
+ struct aes_gmac_ctx gmac_ctx;
+ char block[GMAC_BLOCK_LEN];
+ char digest[GMAC_DIGEST_LEN];
+ char iv[AES_BLOCK_LEN];
+ int i, len;
+
+ /*
+ * This assumes a 12-byte IV from the crp. See longer comment
+ * above in ccp_gcm() for more details.
+ */
+ if (crde->crd_flags & CRD_F_ENCRYPT) {
+ if (crde->crd_flags & CRD_F_IV_EXPLICIT)
+ memcpy(iv, crde->crd_iv, 12);
+ else
+ arc4rand(iv, 12, 0);
+ } else {
+ if (crde->crd_flags & CRD_F_IV_EXPLICIT)
+ memcpy(iv, crde->crd_iv, 12);
+ else
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ crde->crd_inject, 12, iv);
+ }
+ *(uint32_t *)&iv[12] = htobe32(1);
+
+ /* Initialize the MAC. */
+ AES_GMAC_Init(&gmac_ctx);
+ AES_GMAC_Setkey(&gmac_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
+ AES_GMAC_Reinit(&gmac_ctx, iv, sizeof(iv));
+
+ /* MAC the AAD. */
+ for (i = 0; i < crda->crd_len; i += sizeof(block)) {
+ len = imin(crda->crd_len - i, sizeof(block));
+ crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
+ i, len, block);
+ bzero(block + len, sizeof(block) - len);
+ AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
+ }
+
+ /* Length block. */
+ bzero(block, sizeof(block));
+ ((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
+ AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
+ AES_GMAC_Final(digest, &gmac_ctx);
+
+ if (crde->crd_flags & CRD_F_ENCRYPT) {
+ crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
+ sizeof(digest), digest);
+ crp->crp_etype = 0;
+ } else {
+ char digest2[GMAC_DIGEST_LEN];
+
+ crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
+ sizeof(digest2), digest2);
+ if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0)
+ crp->crp_etype = 0;
+ else
+ crp->crp_etype = EBADMSG;
+ }
+ crypto_done(crp);
+}
+
+static int
+ccp_probe(device_t dev)
+{
+ struct pciid *ip;
+ uint32_t id;
+
+ id = pci_get_devid(dev);
+ for (ip = ccp_ids; ip < &ccp_ids[nitems(ccp_ids)]; ip++) {
+ if (id == ip->devid) {
+ device_set_desc(dev, ip->desc);
+ return (0);
+ }
+ }
+ return (ENXIO);
+}
+
+static void
+ccp_initialize_queue(struct ccp_queue *qp)
+{
+ mtx_init(&qp->cq_lock, "ccp queue", NULL, MTX_DEF);
+ /* XXX - arbitrarily chosen sizes */
+ qp->cq_sg_crp = sglist_alloc(32, M_WAITOK);
+ /* Two more SGLs than sg_crp to accommodate ipad. */
+ qp->cq_sg_ulptx = sglist_alloc(34, M_WAITOK);
+ qp->cq_sg_dst = sglist_alloc(2, M_WAITOK);
+}
+
+static void
+ccp_free_queue(struct ccp_queue *qp)
+{
+ mtx_destroy(&qp->cq_lock);
+ sglist_free(qp->cq_sg_crp);
+ sglist_free(qp->cq_sg_ulptx);
+ sglist_free(qp->cq_sg_dst);
+}
+
+static int
+ccp_attach(device_t dev)
+{
+ struct ccp_softc *sc;
+ size_t i;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ device_printf(dev, "XXX%s: sc=%p\n", __func__, sc);
+
+ /* XXX - Remove SYNC when interrupts are implemented */
+ sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE |
+ CRYPTOCAP_F_SYNC);
+ if (sc->cid < 0) {
+ device_printf(dev, "could not get crypto driver id\n");
+ return (ENXIO);
+ }
+
+ error = ccp_hw_attach(dev);
+ if (error != 0)
+ return (error);
+
+ mtx_init(&sc->lock, "ccp", NULL, MTX_DEF);
+
+ for (i = 0; i < nitems(sc->queues); i++) {
+ sc->queues[i].cq_softc = sc;
+ sc->queues[i].cq_qindex = i;
+ ccp_initialize_queue(&sc->queues[i]);
+ }
+
+ if (g_ccp_softc == NULL) {
+ g_ccp_softc = sc;
+ if ((sc->hw_features & VERSION_CAP_TRNG) != 0)
+ random_source_register(&random_ccp);
+ }
+
+ XXX_ccp_test(sc);
+
+ if ((sc->hw_features & VERSION_CAP_AES) != 0) {
+ crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
+ crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0);
+#if 0
+ crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
+ crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
+ crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
+ crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
+#endif
+#if 0
+ crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
+#endif
+ }
+ if ((sc->hw_features & VERSION_CAP_SHA) != 0) {
+ crypto_register(sc->cid, CRYPTO_SHA1_HMAC, 0, 0);
+ crypto_register(sc->cid, CRYPTO_SHA2_256_HMAC, 0, 0);
+ crypto_register(sc->cid, CRYPTO_SHA2_384_HMAC, 0, 0);
+ crypto_register(sc->cid, CRYPTO_SHA2_512_HMAC, 0, 0);
+ }
+
+ return (0);
+}
+
+static int
+ccp_detach(device_t dev)
+{
+ struct ccp_softc *sc;
+ int i;
+
+ sc = device_get_softc(dev);
+
+ mtx_lock(&sc->lock);
+ for (i = 0; i < sc->nsessions; i++) {
+ if (sc->sessions[i].active || sc->sessions[i].pending != 0) {
+ mtx_unlock(&sc->lock);
+ return (EBUSY);
+ }
+ }
+ sc->detaching = true;
+ mtx_unlock(&sc->lock);
+
+ crypto_unregister_all(sc->cid);
+ if (g_ccp_softc == sc && (sc->hw_features & VERSION_CAP_TRNG) != 0)
+ random_source_deregister(&random_ccp);
+
+ ccp_hw_detach(dev);
+ for (i = 0; i < (int)nitems(sc->queues); i++)
+ ccp_free_queue(&sc->queues[i]);
+
+ if (g_ccp_softc == sc)
+ g_ccp_softc = NULL;
+
+ free(sc->sessions, M_CCP);
+ mtx_destroy(&sc->lock);
+ return (0);
+}
+
+static void
+ccp_init_hmac_digest(struct ccp_session *s, int cri_alg, char *key,
+ int klen)
+{
+ union authctx auth_ctx;
+ struct auth_hash *axf;
+ u_int i;
+
+ /*
+ * If the key is larger than the block size, use the digest of
+ * the key as the key instead.
+ */
+ axf = s->hmac.auth_hash;
+ klen /= 8;
+ if (klen > axf->blocksize) {
+ axf->Init(&auth_ctx);
+ axf->Update(&auth_ctx, key, klen);
+ axf->Final(s->hmac.ipad, &auth_ctx);
+ explicit_bzero(&auth_ctx, sizeof(auth_ctx));
+ klen = axf->hashsize;
+ } else
+ memcpy(s->hmac.ipad, key, klen);
+
+ memset(s->hmac.ipad + klen, 0, axf->blocksize - klen);
+ memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
+
+ for (i = 0; i < axf->blocksize; i++) {
+ s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
+ s->hmac.opad[i] ^= HMAC_OPAD_VAL;
+ }
+}
+
+/*
+ * Borrowed from AES_GMAC_Setkey().
+ */
+static void
+ccp_init_gmac_hash(struct ccp_session *s, char *key, int klen)
+{
+ static char zeroes[GMAC_BLOCK_LEN];
+ uint32_t keysched[4 * (RIJNDAEL_MAXNR + 1)];
+ int rounds;
+
+ rounds = rijndaelKeySetupEnc(keysched, key, klen);
+ rijndaelEncrypt(keysched, rounds, zeroes, s->gmac.ghash_h);
+}
+
+static int
+ccp_aes_check_keylen(int alg, int klen)
+{
+
+ switch (klen) {
+ case 128:
+ case 192:
+ if (alg == CRYPTO_AES_XTS)
+ return (EINVAL);
+ break;
+ case 256:
+ break;
+ case 512:
+ if (alg != CRYPTO_AES_XTS)
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (0);
+}
+
+/*
+ * Borrowed from cesa_prep_aes_key(). We should perhaps have a public
+ * function to generate this instead.
+ *
+ * NB: The crypto engine wants the words in the decryption key in reverse
+ * order.
+ */
+static void
+ccp_aes_getdeckey(void *dec_key, const void *enc_key, unsigned int kbits)
+{
+ uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
+ uint32_t *dkey;
+ int i;
+
+ rijndaelKeySetupEnc(ek, enc_key, kbits);
+ dkey = dec_key;
+ dkey += (kbits / 8) / 4;
+
+ switch (kbits) {
+ case 128:
+ for (i = 0; i < 4; i++)
+ *--dkey = htobe32(ek[4 * 10 + i]);
+ break;
+ case 192:
+ for (i = 0; i < 2; i++)
+ *--dkey = htobe32(ek[4 * 11 + 2 + i]);
+ for (i = 0; i < 4; i++)
+ *--dkey = htobe32(ek[4 * 12 + i]);
+ break;
+ case 256:
+ for (i = 0; i < 4; i++)
+ *--dkey = htobe32(ek[4 * 13 + i]);
+ for (i = 0; i < 4; i++)
+ *--dkey = htobe32(ek[4 * 14 + i]);
+ break;
+ }
+ MPASS(dkey == dec_key);
+}
+
+static void
+ccp_aes_setkey(struct ccp_session *s, int alg, const void *key, int klen)
+{
+ unsigned kbits;
+
+ if (alg == CRYPTO_AES_XTS)
+ kbits = klen / 2;
+ else
+ kbits = klen;
+
+ switch (kbits) {
+ case 128:
+ s->blkcipher.cipher_type = CCP_AES_TYPE_128;
+ break;
+ case 192:
+ s->blkcipher.cipher_type = CCP_AES_TYPE_192;
+ break;
+ case 256:
+ s->blkcipher.cipher_type = CCP_AES_TYPE_256;
+ break;
+ default:
+ panic("should not get here");
+ }
+
+ s->blkcipher.key_len = klen / 8;
+ memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
+ switch (alg) {
+ case CRYPTO_AES_CBC:
+ case CRYPTO_AES_XTS:
+ ccp_aes_getdeckey(s->blkcipher.deckey, key, kbits);
+ break;
+ }
+}
+
+static int
+ccp_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
+{
+ struct ccp_softc *sc;
+ struct ccp_session *s;
+ struct auth_hash *auth_hash;
+ struct cryptoini *c, *hash, *cipher;
+ enum ccp_aes_mode cipher_mode;
+ unsigned auth_mode, iv_len;
+ unsigned partial_digest_len;
+ unsigned q;
+ int error, i, sess;
+ bool gcm_hash;
+
+ if (sidp == NULL || cri == NULL)
+ return (EINVAL);
+
+ gcm_hash = false;
+ cipher = NULL;
+ hash = NULL;
+ auth_hash = NULL;
+ /* XXX reconcile auth_mode with use by ccp_sha */
+ auth_mode = 0;
+ cipher_mode = CCP_AES_MODE_ECB;
+ iv_len = 0;
+ partial_digest_len = 0;
+ for (c = cri; c != NULL; c = c->cri_next) {
+ switch (c->cri_alg) {
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_AES_128_NIST_GMAC:
+ case CRYPTO_AES_192_NIST_GMAC:
+ case CRYPTO_AES_256_NIST_GMAC:
+ if (hash)
+ return (EINVAL);
+ hash = c;
+ switch (c->cri_alg) {
+ case CRYPTO_SHA1_HMAC:
+ auth_hash = &auth_hash_hmac_sha1;
+ auth_mode = SHA1;
+ partial_digest_len = SHA1_HASH_LEN;
+ break;
+ case CRYPTO_SHA2_256_HMAC:
+ auth_hash = &auth_hash_hmac_sha2_256;
+ auth_mode = SHA2_256;
+ partial_digest_len = SHA2_256_HASH_LEN;
+ break;
+ case CRYPTO_SHA2_384_HMAC:
+ auth_hash = &auth_hash_hmac_sha2_384;
+ auth_mode = SHA2_384;
+ partial_digest_len = SHA2_512_HASH_LEN;
+ break;
+ case CRYPTO_SHA2_512_HMAC:
+ auth_hash = &auth_hash_hmac_sha2_512;
+ auth_mode = SHA2_512;
+ partial_digest_len = SHA2_512_HASH_LEN;
+ break;
+ case CRYPTO_AES_128_NIST_GMAC:
+ case CRYPTO_AES_192_NIST_GMAC:
+ case CRYPTO_AES_256_NIST_GMAC:
+ gcm_hash = true;
+#if 0
+ auth_mode = CHCR_SCMD_AUTH_MODE_GHASH;
+#endif
+ break;
+ }
+ break;
+ case CRYPTO_AES_CBC:
+ case CRYPTO_AES_ICM:
+ case CRYPTO_AES_NIST_GCM_16:
+ case CRYPTO_AES_XTS:
+ if (cipher)
+ return (EINVAL);
+ cipher = c;
+ switch (c->cri_alg) {
+ case CRYPTO_AES_CBC:
+ cipher_mode = CCP_AES_MODE_CBC;
+ iv_len = AES_BLOCK_LEN;
+ break;
+ case CRYPTO_AES_ICM:
+ cipher_mode = CCP_AES_MODE_CTR;
+ iv_len = AES_BLOCK_LEN;
+ break;
+ case CRYPTO_AES_NIST_GCM_16:
+ cipher_mode = CCP_AES_MODE_GCTR;
+ iv_len = AES_GCM_IV_LEN;
+ break;
+ case CRYPTO_AES_XTS:
+#if 0
+ cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
+#endif
+ iv_len = AES_BLOCK_LEN;
+ break;
+ }
+ if (c->cri_key != NULL) {
+ error = ccp_aes_check_keylen(c->cri_alg,
+ c->cri_klen);
+ if (error != 0)
+ return (error);
+ }
+ break;
+ default:
+ return (EINVAL);
+ }
+ }
+ if (gcm_hash != (cipher_mode == CCP_AES_MODE_GCTR))
+ return (EINVAL);
+ if (hash == NULL && cipher == NULL)
+ return (EINVAL);
+ if (hash != NULL && hash->cri_key == NULL)
+ return (EINVAL);
+
+ sc = device_get_softc(dev);
+ mtx_lock(&sc->lock);
+ if (sc->detaching) {
+ mtx_unlock(&sc->lock);
+ return (ENXIO);
+ }
+ sess = -1;
+ for (i = 0; i < sc->nsessions; i++) {
+ if (!sc->sessions[i].active && sc->sessions[i].pending == 0) {
+ sess = i;
+ break;
+ }
+ }
+ if (sess == -1) {
+ s = malloc(sizeof(*s) * (sc->nsessions + 1), M_CCP,
+ M_NOWAIT | M_ZERO);
+ if (s == NULL) {
+ mtx_unlock(&sc->lock);
+ return (ENOMEM);
+ }
+ if (sc->sessions != NULL)
+ memcpy(s, sc->sessions, sizeof(*s) * sc->nsessions);
+ sess = sc->nsessions;
+ free(sc->sessions, M_CCP);
+ sc->sessions = s;
+ sc->nsessions++;
+ }
+
+ s = &sc->sessions[sess];
+
+ /* Just grab the first usable queue for now. */
+ for (q = 0; q < nitems(sc->queues); q++)
+ if ((sc->valid_queues & (1 << q)) != 0)
+ break;
+ if (q == nitems(sc->queues)) {
+ mtx_unlock(&sc->lock);
+ return (ENXIO);
+ }
+ s->queue = q;
+
+ if (gcm_hash)
+ s->mode = GCM;
+ else if (hash != NULL && cipher != NULL)
+ s->mode = AUTHENC;
+ else if (hash != NULL)
+ s->mode = HMAC;
+ else {
+ MPASS(cipher != NULL);
+ s->mode = BLKCIPHER;
+ }
+ if (gcm_hash) {
+ if (hash->cri_mlen == 0)
+ s->gmac.hash_len = AES_GMAC_HASH_LEN;
+ else
+ s->gmac.hash_len = hash->cri_mlen;
+ ccp_init_gmac_hash(s, hash->cri_key, hash->cri_klen);
+ } else if (hash != NULL) {
+ s->hmac.auth_hash = auth_hash;
+ s->hmac.auth_mode = auth_mode;
+ s->hmac.partial_digest_len = partial_digest_len;
+ if (hash->cri_mlen == 0)
+ s->hmac.hash_len = auth_hash->hashsize;
+ else
+ s->hmac.hash_len = hash->cri_mlen;
+ ccp_init_hmac_digest(s, hash->cri_alg, hash->cri_key,
+ hash->cri_klen);
+ }
+ if (cipher != NULL) {
+ s->blkcipher.cipher_mode = cipher_mode;
+ s->blkcipher.iv_len = iv_len;
+ if (cipher->cri_key != NULL)
+ ccp_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
+ cipher->cri_klen);
+ }
+
+ s->active = true;
+ mtx_unlock(&sc->lock);
+
+ *sidp = sess;
+ return (0);
+}
+
+static int
+ccp_freesession(device_t dev, uint64_t tid)
+{
+ struct ccp_softc *sc;
+ uint32_t sid;
+ int error;
+
+ sc = device_get_softc(dev);
+ sid = CRYPTO_SESID2LID(tid);
+ mtx_lock(&sc->lock);
+ if (sid >= sc->nsessions || !sc->sessions[sid].active)
+ error = EINVAL;
+ else {
+ if (sc->sessions[sid].pending != 0)
+ device_printf(dev,
+ "session %d freed with %d pending requests\n", sid,
+ sc->sessions[sid].pending);
+ sc->sessions[sid].active = false;
+ error = 0;
+ }
+ mtx_unlock(&sc->lock);
+ return (error);
+}
+
+static int
+ccp_process(device_t dev, struct cryptop *crp, int hint)
+{
+ struct ccp_softc *sc;
+ struct ccp_queue *qp;
+ struct ccp_session *s;
+ struct cryptodesc *crd, *crda, *crde;
+ uint32_t sid;
+ int error;
+
+ qp = NULL;
+ if (crp == NULL)
+ return (EINVAL);
+
+ crd = crp->crp_desc;
+ sid = CRYPTO_SESID2LID(crp->crp_sid);
+ sc = device_get_softc(dev);
+ mtx_lock(&sc->lock);
+ if (sid >= sc->nsessions || !sc->sessions[sid].active) {
+ mtx_unlock(&sc->lock);
+ error = EINVAL;
+ goto out;
+ }
+
+ s = &sc->sessions[sid];
+ qp = &sc->queues[s->queue];
+ mtx_unlock(&sc->lock);
+ mtx_lock(&qp->cq_lock);
+
+ error = ccp_populate_sglist(qp->cq_sg_crp, crp);
+ if (error != 0)
+ goto out;
+
+ switch (s->mode) {
+ case HMAC:
+ if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
+ ccp_init_hmac_digest(s, crd->crd_alg, crd->crd_key,
+ crd->crd_klen);
+ error = ccp_hmac(qp, s, crp);
+ break;
+ case BLKCIPHER:
+ if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
+ error = ccp_aes_check_keylen(crd->crd_alg,
+ crd->crd_klen);
+ if (error != 0)
+ break;
+ ccp_aes_setkey(s, crd->crd_alg, crd->crd_key,
+ crd->crd_klen);
+ }
+ error = ccp_blkcipher(qp, s, crp);
+ break;
+ case AUTHENC:
+ error = 0;
+ switch (crd->crd_alg) {
+ case CRYPTO_AES_CBC:
+ case CRYPTO_AES_ICM:
+ case CRYPTO_AES_XTS:
+ /* Only encrypt-then-authenticate supported. */
+ crde = crd;
+ crda = crd->crd_next;
+ if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
+ error = EINVAL;
+ break;
+ }
+ break;
+ default:
+ crda = crd;
+ crde = crd->crd_next;
+ if (crde->crd_flags & CRD_F_ENCRYPT) {
+ error = EINVAL;
+ break;
+ }
+ break;
+ }
+ if (error != 0)
+ break;
+ if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
+ ccp_init_hmac_digest(s, crda->crd_alg, crda->crd_key,
+ crda->crd_klen);
+ if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
+ error = ccp_aes_check_keylen(crde->crd_alg,
+ crde->crd_klen);
+ if (error != 0)
+ break;
+ ccp_aes_setkey(s, crde->crd_alg, crde->crd_key,
+ crde->crd_klen);
+ }
+ error = ccp_authenc(qp, s, crp, crda, crde);
+ break;
+ case GCM:
+ error = 0;
+ if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
+ crde = crd;
+ crda = crd->crd_next;
+ } else {
+ crda = crd;
+ crde = crd->crd_next;
+ }
+ if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
+ ccp_init_gmac_hash(s, crda->crd_key, crda->crd_klen);
+ if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
+ error = ccp_aes_check_keylen(crde->crd_alg,
+ crde->crd_klen);
+ if (error != 0)
+ break;
+ ccp_aes_setkey(s, crde->crd_alg, crde->crd_key,
+ crde->crd_klen);
+ }
+ if (crde->crd_len == 0) {
+ mtx_unlock(&qp->cq_lock);
+ ccp_gcm_soft(s, crp, crda, crde);
+ return (0);
+ }
+ error = ccp_gcm(qp, s, crp, crda, crde);
+ break;
+ }
+
+ if (error == 0)
+ s->pending++;
+
+out:
+ if (qp != NULL)
+ mtx_unlock(&qp->cq_lock);
+
+ if (error != 0) {
+ crp->crp_etype = error;
+ crypto_done(crp);
+ }
+ return (0);
+}
+
+static device_method_t ccp_methods[] = {
+ DEVMETHOD(device_probe, ccp_probe),
+ DEVMETHOD(device_attach, ccp_attach),
+ DEVMETHOD(device_detach, ccp_detach),
+
+ DEVMETHOD(cryptodev_newsession, ccp_newsession),
+ DEVMETHOD(cryptodev_freesession, ccp_freesession),
+ DEVMETHOD(cryptodev_process, ccp_process),
+
+ DEVMETHOD_END
+};
+
+static driver_t ccp_driver = {
+ "ccp",
+ ccp_methods,
+ sizeof(struct ccp_softc)
+};
+
+static devclass_t ccp_devclass;
+DRIVER_MODULE(ccp, pci, ccp_driver, ccp_devclass, NULL, NULL);
+MODULE_VERSION(ccp, 1);
+MODULE_DEPEND(ccp, crypto, 1, 1, 1);
+MODULE_DEPEND(ccp, random_device, 1, 1, 1);
+
+static int
+ccp_queue_reserve_space(struct ccp_queue *qp, unsigned n, int mflags)
+{
+ struct ccp_softc *sc;
+ int error;
+
+ mtx_assert(&qp->cq_lock, MA_OWNED);
+ error = 0;
+ sc = qp->cq_softc;
+
+ if (n < 1 || n >= (1 << sc->ring_size_order))
+ return (EINVAL);
+
+ while (true) {
+ if (ccp_queue_get_ring_space(qp) >= n)
+ return (0);
+ if ((mflags & M_WAITOK) == 0)
+ return (EAGAIN);
+ msleep(&qp->cq_tail, &qp->cq_lock, 0, "ccpqfull", 0);
+ }
+}
+
+int
+ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags)
+{
+ int error;
+
+ mtx_lock(&qp->cq_lock);
+ qp->cq_acq_tail = qp->cq_tail;
+ error = ccp_queue_reserve_space(qp, n, mflags);
+ if (error != 0)
+ mtx_unlock(&qp->cq_lock);
+ return (error);
+}
+
+void
+ccp_queue_release(struct ccp_queue *qp)
+{
+
+ mtx_assert(&qp->cq_lock, MA_OWNED);
+ if (qp->cq_tail != qp->cq_acq_tail)
+ ccp_queue_write_tail(qp);
+ mtx_unlock(&qp->cq_lock);
+}
+
+#ifdef DDB
+#define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo)
+#define db_show_lock(lk) _db_show_lock(&(lk)->lock_object)
+static void
+db_show_ccp_sc(struct ccp_softc *sc)
+{
+
+ db_printf("ccp softc at %p\n", sc);
+ db_printf(" cid: %d\n", (int)sc->cid);
+ db_printf(" nsessions: %d\n", sc->nsessions);
+
+ db_printf(" lock: ");
+ db_show_lock(&sc->lock);
+
+ db_printf(" detaching: %d\n", (int)sc->detaching);
+ db_printf(" ring_size_order: %u\n", sc->ring_size_order);
+
+ db_printf(" hw_version: %d\n", (int)sc->hw_version);
+ db_printf(" hw_features: %b\n", (int)sc->hw_features,
+ "\20\24ELFC\23TRNG\22Zip_Compress\16Zip_Decompress\13ECC\12RSA"
+ "\11SHA\0103DES\07AES");
+
+ db_printf(" hw status:\n");
+ db_ccp_show_hw(sc);
+}
+
+static void
+db_show_ccp_qp(struct ccp_queue *qp)
+{
+
+ db_printf(" lock: ");
+ db_show_lock(&qp->cq_lock);
+
+#if 0
+ db_printf(" head: %u\n", sc->head);
+ db_printf(" tail: %u\n", sc->tail);
+ db_printf(" hw_head: %u\n", sc->hw_head);
+ db_printf(" last_seen: 0x%lx\n", sc->last_seen);
+ db_printf(" ring: %p\n", sc->ring);
+ db_printf(" descriptors: %p\n", sc->hw_desc_ring);
+ db_printf(" descriptors (phys): 0x%jx\n",
+ (uintmax_t)sc->hw_desc_bus_addr);
+
+ db_printf(" ring[%u] (tail):\n", sc->tail %
+ (1 << sc->ring_size_order));
+ db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->tail)->id);
+ db_printf(" addr: 0x%lx\n",
+ RING_PHYS_ADDR(sc, sc->tail));
+ db_printf(" next: 0x%lx\n",
+ ioat_get_descriptor(sc, sc->tail)->generic.next);
+
+ db_printf(" ring[%u] (head - 1):\n", (sc->head - 1) %
+ (1 << sc->ring_size_order));
+ db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head - 1)->id);
+ db_printf(" addr: 0x%lx\n",
+ RING_PHYS_ADDR(sc, sc->head - 1));
+ db_printf(" next: 0x%lx\n",
+ ioat_get_descriptor(sc, sc->head - 1)->generic.next);
+
+ db_printf(" ring[%u] (head):\n", (sc->head) %
+ (1 << sc->ring_size_order));
+ db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head)->id);
+ db_printf(" addr: 0x%lx\n",
+ RING_PHYS_ADDR(sc, sc->head));
+ db_printf(" next: 0x%lx\n",
+ ioat_get_descriptor(sc, sc->head)->generic.next);
+
+ for (idx = 0; idx < (1 << sc->ring_size_order); idx++)
+ if ((*sc->comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK)
+ == RING_PHYS_ADDR(sc, idx))
+ db_printf(" ring[%u] == hardware tail\n", idx);
+#endif
+
+ db_printf(" hw status:\n");
+ db_ccp_show_queue_hw(qp);
+}
+
+DB_SHOW_COMMAND(ccp, db_show_ccp)
+{
+ struct ccp_softc *sc;
+ unsigned unit, qindex;
+
+ if (!have_addr)
+ goto usage;
+
+ unit = (unsigned)addr;
+
+ sc = devclass_get_softc(ccp_devclass, unit);
+ if (sc == NULL) {
+ db_printf("No such device ccp%u\n", unit);
+ goto usage;
+ }
+
+ if (count == -1) {
+ db_show_ccp_sc(sc);
+ return;
+ }
+
+ qindex = (unsigned)count;
+ if (qindex >= nitems(sc->queues)) {
+ db_printf("No such queue %u\n", qindex);
+ goto usage;
+ }
+ db_show_ccp_qp(&sc->queues[qindex]);
+ return;
+
+usage:
+ db_printf("usage: show ccp <unit>[,<qindex>]\n");
+ return;
+}
+#endif /* DDB */
Index: sys/crypto/ccp/ccp_hardware.h
===================================================================
--- /dev/null
+++ sys/crypto/ccp/ccp_hardware.h
@@ -0,0 +1,413 @@
+/*-
+ * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define CMD_QUEUE_MASK_OFFSET 0x000
+#define CMD_QUEUE_PRIO_OFFSET 0x004
+#define CMD_REQID_CONFIG_OFFSET 0x008
+#define TRNG_OUT_OFFSET 0x00C
+#define CMD_CMD_TIMEOUT_OFFSET 0x010
+#define LSB_PUBLIC_MASK_LO_OFFSET 0x018
+#define LSB_PUBLIC_MASK_HI_OFFSET 0x01C
+#define LSB_PRIVATE_MASK_LO_OFFSET 0x020
+#define LSB_PRIVATE_MASK_HI_OFFSET 0x024
+
+#define VERSION_REG 0x100
+#define VERSION_NUM_MASK 0x3F
+#define VERSION_CAP_MASK 0x7FC0
+#define VERSION_CAP_AES (1 << 6)
+#define VERSION_CAP_3DES (1 << 7)
+#define VERSION_CAP_SHA (1 << 8)
+#define VERSION_CAP_RSA (1 << 9)
+#define VERSION_CAP_ECC (1 << 10)
+#define VERSION_CAP_ZDE (1 << 11)
+#define VERSION_CAP_ZCE (1 << 12)
+#define VERSION_CAP_TRNG (1 << 13)
+#define VERSION_CAP_ELFC (1 << 14)
+#define VERSION_NUMVQM_SHIFT 15
+#define VERSION_NUMVQM_MASK 0xF
+#define VERSION_LSBSIZE_SHIFT 19
+#define VERSION_LSBSIZE_MASK 0x3FF
+
+#define CMD_Q_CONTROL_BASE 0x000
+#define CMD_Q_TAIL_LO_BASE 0x004
+#define CMD_Q_HEAD_LO_BASE 0x008
+#define CMD_Q_INT_ENABLE_BASE 0x00C
+#define CMD_Q_INTERRUPT_STATUS_BASE 0x010
+
+#define CMD_Q_STATUS_BASE 0x100
+#define CMD_Q_INT_STATUS_BASE 0x104
+
+#define CMD_Q_STATUS_INCR 0x1000
+
+/* Don't think there's much point in keeping these -- OS can't access: */
+#define CMD_CONFIG_0_OFFSET 0x6000
+#define CMD_TRNG_CTL_OFFSET 0x6008
+#define CMD_AES_MASK_OFFSET 0x6010
+#define CMD_CLK_GATE_CTL_OFFSET 0x603C
+
+/* CMD_Q_CONTROL_BASE bits */
+#define CMD_Q_RUN (1 << 0)
+#define CMD_Q_HALTED (1 << 1)
+#define CMD_Q_MEM_LOCATION (1 << 2)
+#define CMD_Q_SIZE_SHIFT 3
+#define CMD_Q_SIZE_MASK 0x1F
+#define CMD_Q_PTR_HI_SHIFT 16
+#define CMD_Q_PTR_HI_MASK 0xFFFF
+
+/*
+ * The following bits are used for both CMD_Q_INT_ENABLE_BASE and
+ * CMD_Q_INTERRUPT_STATUS_BASE.
+ */
+#define INT_COMPLETION (1 << 0)
+#define INT_ERROR (1 << 1)
+#define INT_QUEUE_STOPPED (1 << 2)
+#define INT_QUEUE_EMPTY (1 << 3)
+#define ALL_INTERRUPTS (INT_COMPLETION | \
+ INT_ERROR | \
+ INT_QUEUE_STOPPED | \
+ INT_QUEUE_EMPTY)
+
+#define STATUS_ERROR_MASK 0x3F
+#define STATUS_JOBSTATUS_SHIFT 7
+#define STATUS_JOBSTATUS_MASK 0x7
+#define STATUS_ERRORSOURCE_SHIFT 10
+#define STATUS_ERRORSOURCE_MASK 0x3
+#define STATUS_VLSB_FAULTBLOCK_SHIFT 12
+#define STATUS_VLSB_FAULTBLOCK_MASK 0x7
+
+/* From JOBSTATUS field in STATUS register above */
+#define JOBSTATUS_IDLE 0
+#define JOBSTATUS_ACTIVE_WAITING 1
+#define JOBSTATUS_ACTIVE 2
+#define JOBSTATUS_WAIT_ABORT 3
+#define JOBSTATUS_DYN_ERROR 4
+#define JOBSTATUS_PREPARE_HALT 5
+
+/* From ERRORSOURCE field in STATUS register */
+#define ERRORSOURCE_INPUT_MEMORY 0
+#define ERRORSOURCE_CMD_DESCRIPTOR 1
+#define ERRORSOURCE_INPUT_DATA 2
+#define ERRORSOURCE_KEY_DATA 3
+
+#define Q_DESC_SIZE sizeof(struct ccp_desc)
+
+enum ccp_aes_mode {
+ CCP_AES_MODE_ECB = 0,
+ CCP_AES_MODE_CBC,
+ CCP_AES_MODE_OFB,
+ CCP_AES_MODE_CFB,
+ CCP_AES_MODE_CTR,
+ CCP_AES_MODE_CMAC,
+ CCP_AES_MODE_GHASH,
+ CCP_AES_MODE_GCTR,
+};
+
+enum ccp_aes_ghash_mode {
+ CCP_AES_MODE_GHASH_AAD = 0,
+ CCP_AES_MODE_GHASH_FINAL,
+};
+
+enum ccp_aes_type {
+ CCP_AES_TYPE_128 = 0,
+ CCP_AES_TYPE_192,
+ CCP_AES_TYPE_256,
+};
+
+enum ccp_des_mode {
+ CCP_DES_MODE_ECB = 0,
+ CCP_DES_MODE_CBC,
+ CCP_DES_MODE_CFB,
+};
+
+enum ccp_des_type {
+ CCP_DES_TYPE_128 = 0, /* 112 + 16 parity */
+ CCP_DES_TYPE_192, /* 168 + 24 parity */
+};
+
+enum ccp_sha_type {
+ CCP_SHA_TYPE_1 = 1,
+ CCP_SHA_TYPE_224,
+ CCP_SHA_TYPE_256,
+ CCP_SHA_TYPE_384,
+ CCP_SHA_TYPE_512,
+ CCP_SHA_TYPE_RSVD1,
+ CCP_SHA_TYPE_RSVD2,
+ CCP_SHA3_TYPE_224,
+ CCP_SHA3_TYPE_256,
+ CCP_SHA3_TYPE_384,
+ CCP_SHA3_TYPE_512,
+};
+
+enum ccp_cipher_algo {
+ CCP_CIPHER_ALGO_AES_CBC = 0,
+ CCP_CIPHER_ALGO_AES_ECB,
+ CCP_CIPHER_ALGO_AES_CTR,
+ CCP_CIPHER_ALGO_AES_GCM,
+ CCP_CIPHER_ALGO_3DES_CBC,
+};
+
+enum ccp_cipher_dir {
+ CCP_CIPHER_DIR_DECRYPT = 0,
+ CCP_CIPHER_DIR_ENCRYPT = 1,
+};
+
+enum ccp_hash_algo {
+ CCP_AUTH_ALGO_SHA1 = 0,
+ CCP_AUTH_ALGO_SHA1_HMAC,
+ CCP_AUTH_ALGO_SHA224,
+ CCP_AUTH_ALGO_SHA224_HMAC,
+ CCP_AUTH_ALGO_SHA3_224,
+ CCP_AUTH_ALGO_SHA3_224_HMAC,
+ CCP_AUTH_ALGO_SHA256,
+ CCP_AUTH_ALGO_SHA256_HMAC,
+ CCP_AUTH_ALGO_SHA3_256,
+ CCP_AUTH_ALGO_SHA3_256_HMAC,
+ CCP_AUTH_ALGO_SHA384,
+ CCP_AUTH_ALGO_SHA384_HMAC,
+ CCP_AUTH_ALGO_SHA3_384,
+ CCP_AUTH_ALGO_SHA3_384_HMAC,
+ CCP_AUTH_ALGO_SHA512,
+ CCP_AUTH_ALGO_SHA512_HMAC,
+ CCP_AUTH_ALGO_SHA3_512,
+ CCP_AUTH_ALGO_SHA3_512_HMAC,
+ CCP_AUTH_ALGO_AES_CMAC,
+ CCP_AUTH_ALGO_AES_GCM,
+};
+
+enum ccp_hash_op {
+ CCP_AUTH_OP_GENERATE = 0,
+ CCP_AUTH_OP_VERIFY = 1,
+};
+
+enum ccp_engine {
+ CCP_ENGINE_AES = 0,
+ CCP_ENGINE_XTS_AES_128,
+ CCP_ENGINE_3DES,
+ CCP_ENGINE_SHA,
+ CCP_ENGINE_RSA,
+ CCP_ENGINE_PASSTHRU,
+ CCP_ENGINE_ZLIB_DECOMPRESS,
+ CCP_ENGINE_ECC,
+};
+
+enum ccp_passthru_bitwise {
+ CCP_PASSTHRU_BITWISE_NOOP = 0,
+ CCP_PASSTHRU_BITWISE_AND,
+ CCP_PASSTHRU_BITWISE_OR,
+ CCP_PASSTHRU_BITWISE_XOR,
+ CCP_PASSTHRU_BITWISE_MASK,
+};
+
+enum ccp_passthru_byteswap {
+ CCP_PASSTHRU_BYTESWAP_NOOP = 0,
+ CCP_PASSTHRU_BYTESWAP_32BIT,
+ CCP_PASSTHRU_BYTESWAP_256BIT,
+};
+
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory
+ * type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+
+struct ccp_desc {
+ union dword0 {
+ struct {
+ uint32_t hoc:1; /* Halt on completion */
+ uint32_t ioc:1; /* Intr. on completion */
+ uint32_t reserved_1:1;
+ uint32_t som:1; /* Start of message */
+ uint32_t eom:1; /* End " */
+ uint32_t size:7;
+ uint32_t encrypt:1;
+ uint32_t mode:5;
+ uint32_t type:2;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t reserved_2:7;
+ } aes;
+ struct {
+ uint32_t hoc:1; /* Halt on completion */
+ uint32_t ioc:1; /* Intr. on completion */
+ uint32_t reserved_1:1;
+ uint32_t som:1; /* Start of message */
+ uint32_t eom:1; /* End " */
+ uint32_t size:7;
+ uint32_t encrypt:1;
+ uint32_t mode:5;
+ uint32_t type:2;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t reserved_2:7;
+ } des;
+ struct {
+ uint32_t hoc:1; /* Halt on completion */
+ uint32_t ioc:1; /* Intr. on completion */
+ uint32_t reserved_1:1;
+ uint32_t som:1; /* Start of message */
+ uint32_t eom:1; /* End " */
+ uint32_t size:7;
+ uint32_t encrypt:1;
+ uint32_t reserved_2:5;
+ uint32_t type:2;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t reserved_3:7;
+ } aes_xts;
+ struct {
+ uint32_t hoc:1; /* Halt on completion */
+ uint32_t ioc:1; /* Intr. on completion */
+ uint32_t reserved_1:1;
+ uint32_t som:1; /* Start of message */
+ uint32_t eom:1; /* End " */
+ uint32_t reserved_2:10;
+ uint32_t type:4;
+ uint32_t reserved_3:1;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t reserved_4:7;
+ } sha;
+ struct {
+ uint32_t hoc:1; /* Halt on completion */
+ uint32_t ioc:1; /* Intr. on completion */
+ uint32_t reserved_1:1;
+ uint32_t som:1; /* Start of message */
+ uint32_t eom:1; /* End " */
+ uint32_t mode:3;
+ uint32_t size:12;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t reserved_2:7;
+ } rsa;
+ struct {
+ uint32_t hoc:1; /* Halt on completion */
+ uint32_t ioc:1; /* Intr. on completion */
+ uint32_t reserved_1:1;
+ uint32_t som:1; /* Start of message */
+ uint32_t eom:1; /* End " */
+ uint32_t byteswap:2;
+ uint32_t bitwise:3;
+ uint32_t reflect:2;
+ uint32_t reserved_2:8;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t reserved_3:7;
+ } pt;
+ struct {
+ uint32_t hoc:1; /* Halt on completion */
+ uint32_t ioc:1; /* Intr. on completion */
+ uint32_t reserved_1:1;
+ uint32_t som:1; /* Start of message */
+ uint32_t eom:1; /* End " */
+ uint32_t reserved_2:13;
+ uint32_t reserved_3:2;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t reserved_4:7;
+ } zlib;
+ struct {
+ uint32_t hoc:1; /* Halt on completion */
+ uint32_t ioc:1; /* Intr. on completion */
+ uint32_t reserved_1:1;
+ uint32_t som:1; /* Start of message */
+ uint32_t eom:1; /* End " */
+ uint32_t size:10;
+ uint32_t type:2;
+ uint32_t mode:3;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t reserved_2:7;
+ } ecc;
+ struct {
+ uint32_t hoc:1; /* Halt on completion */
+ uint32_t ioc:1; /* Intr. on completion */
+ uint32_t reserved_1:1;
+ uint32_t som:1; /* Start of message */
+ uint32_t eom:1; /* End " */
+ uint32_t function:15;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t reserved_2:7;
+ } /* generic */;
+ };
+
+ uint32_t length;
+ uint32_t src_lo;
+
+ struct dword3 {
+ uint32_t src_hi:16;
+ uint32_t src_mem:2;
+ uint32_t lsb_ctx_id:8;
+ uint32_t reserved_3:5;
+ uint32_t src_fixed:1;
+ };
+
+ union dword4 {
+ uint32_t dst_lo; /* NON-SHA */
+ uint32_t sha_len_lo; /* SHA */
+ };
+
+ union dword5 {
+ struct {
+ uint32_t dst_hi:16;
+ uint32_t dst_mem:2;
+ uint32_t reserved_4:13;
+ uint32_t dst_fixed:1;
+ };
+ uint32_t sha_len_hi;
+ };
+
+ uint32_t key_lo;
+
+ struct dword7 {
+ uint32_t key_hi:16;
+ uint32_t key_mem:2;
+ uint32_t reserved_5:14;
+ };
+};
+
+enum ccp_memtype {
+ CCP_MEMTYPE_SYSTEM = 0,
+ CCP_MEMTYPE_SB,
+ CCP_MEMTYPE_LOCAL,
+};
+
+enum ccp_cmd_order {
+ CCP_CMD_CIPHER = 0,
+ CCP_CMD_AUTH,
+ CCP_CMD_CIPHER_HASH,
+ CCP_CMD_HASH_CIPHER,
+ CCP_CMD_COMBINED,
+ CCP_CMD_NOT_SUPPORTED,
+};
Index: sys/crypto/ccp/ccp_hardware.c
===================================================================
--- /dev/null
+++ sys/crypto/ccp/ccp_hardware.c
@@ -0,0 +1,1888 @@
+/*-
+ * Copyright (c) 2017 Chelsio Communications, Inc.
+ * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
+ * All rights reserved.
+ * Largely borrowed from ccr(4), Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_ddb.h"
+
+#include <sys/types.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/sglist.h>
+#include <sys/sysctl.h>
+
+#ifdef DDB
+#include <ddb/ddb.h>
+#endif
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/vmparam.h>
+
+#include <opencrypto/cryptodev.h>
+#include <opencrypto/xform.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include "cryptodev_if.h"
+
+#include "ccp.h"
+#include "ccp_hardware.h"
+#include "ccp_lsb.h"
+
+CTASSERT(sizeof(struct ccp_desc) == 32);
+
+SYSCTL_NODE(_hw, OID_AUTO, ccp, CTLFLAG_RD, 0, "ccp node");
+
+unsigned g_ccp_ring_order = 11;
+SYSCTL_UINT(_hw_ccp, OID_AUTO, ring_order, CTLFLAG_RDTUN, &g_ccp_ring_order,
+ 0, "Set CCP ring order. (1 << this) == ring size. Min: 1, Max: 16");
+
+static inline uint32_t
+ccp_read_4(struct ccp_softc *sc, uint32_t offset)
+{
+ return (bus_space_read_4(sc->pci_bus_tag, sc->pci_bus_handle, offset));
+}
+
+static inline void
+ccp_write_4(struct ccp_softc *sc, uint32_t offset, uint32_t value)
+{
+ bus_space_write_4(sc->pci_bus_tag, sc->pci_bus_handle, offset, value);
+}
+
+static inline uint32_t
+ccp_read_queue_4(struct ccp_softc *sc, unsigned queue, uint32_t offset)
+{
+ /*
+ * Each queue gets its own 4kB register space. Queue 0 is at 0x1000.
+ */
+ return (ccp_read_4(sc, (CMD_Q_STATUS_INCR * (1 + queue)) + offset));
+}
+
+static inline void
+ccp_write_queue_4(struct ccp_softc *sc, unsigned queue, uint32_t offset,
+ uint32_t value)
+{
+ ccp_write_4(sc, (CMD_Q_STATUS_INCR * (1 + queue)) + offset, value);
+}
+
+void
+ccp_queue_write_tail(struct ccp_queue *qp)
+{
+ ccp_write_queue_4(qp->cq_softc, qp->cq_qindex, CMD_Q_TAIL_LO_BASE,
+ ((uint32_t)qp->desc_ring_bus_addr) + (Q_DESC_SIZE * qp->cq_tail));
+}
+
+/*
+ * Given a queue and a reserved LSB entry index, compute the LSB *entry id* of
+ * that entry for the queue's private LSB region.
+ */
+static inline uint8_t
+ccp_queue_lsb_entry(struct ccp_queue *qp, unsigned lsb_entry)
+{
+ return ((qp->private_lsb * LSB_REGION_LENGTH + lsb_entry));
+}
+
+/*
+ * Given a queue and a reserved LSB entry index, compute the LSB *address* of
+ * that entry for the queue's private LSB region.
+ */
+static inline uint32_t
+ccp_queue_lsb_address(struct ccp_queue *qp, unsigned lsb_entry)
+{
+ return (ccp_queue_lsb_entry(qp, lsb_entry) * LSB_ENTRY_SIZE);
+}
+
+/*
+ * Some terminology:
+ *
+ * LSB - Local Storage Block
+ * =========================
+ *
+ * 8 segments/regions, each containing 16 entries.
+ *
+ * Each entry contains 256 bits (32 bytes).
+ *
+ * Segments are virtually addressed in commands, but accesses cannot cross
+ * segment boundaries. Virtual map uses an identity mapping by default
+ * (virtual segment N corresponds to physical segment N).
+ *
+ * Access to a physical region can be restricted to any subset of all five
+ * queues.
+ *
+ * "Pass-through" mode
+ * ===================
+ *
+ * Pass-through is a generic DMA engine, much like ioat(4). Some nice
+ * features:
+ *
+ * - Supports byte-swapping for endian conversion (32- or 256-bit words)
+ * - AND, OR, XOR with fixed 256-bit mask
+ * - CRC32 of data (may be used in tandem with bswap, but not bit operations)
+ * - Read/write of LSB
+ * - Memset
+ *
+ * If bit manipulation mode is enabled, input must be a multiple of 256 bits
+ * (32 bytes).
+ *
+ * If byte-swapping is enabled, input must be a multiple of the word size.
+ *
+ * Zlib mode -- only usable from one queue at a time, single job at a time.
+ * ========================================================================
+ *
+ * Only usable from private host, aka PSP? Not host processor?
+ *
+ * RNG.
+ * ====
+ *
+ * Raw bits are conditioned with AES and fed through CTR_DRBG. Output goes in
+ * a ring buffer readable by software.
+ *
+ * NIST SP 800-90B Repetition Count and Adaptive Proportion health checks are
+ * implemented on the raw input stream and may be enabled to verify min-entropy
+ * of 0.5 bits per bit.
+ */
+
+static void
+ccp_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ bus_addr_t *baddr;
+
+ KASSERT(error == 0, ("%s: error:%d", __func__, error));
+ baddr = arg;
+ *baddr = segs->ds_addr;
+}
+
+static int
+ccp_hw_attach_queue(device_t dev, uint64_t lsbmask, unsigned queue)
+{
+ struct ccp_softc *sc;
+ struct ccp_queue *qp;
+ void *desc;
+ size_t ringsz, num_descriptors;
+ int error;
+
+ desc = NULL;
+ sc = device_get_softc(dev);
+ qp = &sc->queues[queue];
+
+ /*
+ * Don't bother allocating a ring for queues the host isn't allowed to
+ * drive.
+ */
+ if ((sc->valid_queues & (1 << queue)) == 0)
+ return (0);
+
+ ccp_queue_decode_lsb_regions(sc, lsbmask, queue);
+
+ /* Ignore queues that do not have any LSB access. */
+ if (qp->lsb_mask == 0) {
+ device_printf(dev, "Ignoring queue %u with no LSB access\n",
+ queue);
+ sc->valid_queues &= ~(1 << queue);
+ return (0);
+ }
+
+ num_descriptors = 1 << sc->ring_size_order;
+ ringsz = sizeof(struct ccp_desc) * num_descriptors;
+
+ /*
+ * "Queue_Size" is order - 1.
+ *
+ * Queue must be aligned to 5+Queue_Size+1 == 5 + order bits.
+ */
+ error = bus_dma_tag_create(bus_get_dma_tag(dev),
+ 1 << (5 + sc->ring_size_order),
+#if defined(__i386__) && !defined(PAE)
+ 0, BUS_SPACE_MAXADDR,
+#else
+ (bus_addr_t)1 << 32, BUS_SPACE_MAXADDR_48BIT,
+#endif
+ BUS_SPACE_MAXADDR, NULL, NULL, ringsz, 1,
+ ringsz, 0, NULL, NULL, &qp->ring_desc_tag);
+ if (error != 0)
+ goto out;
+
+ error = bus_dmamem_alloc(qp->ring_desc_tag, &desc,
+ BUS_DMA_ZERO | BUS_DMA_WAITOK, &qp->ring_desc_map);
+ if (error != 0)
+ goto out;
+
+ error = bus_dmamap_load(qp->ring_desc_tag, qp->ring_desc_map, desc,
+ ringsz, ccp_dmamap_cb, &qp->desc_ring_bus_addr, BUS_DMA_WAITOK);
+ if (error != 0)
+ goto out;
+
+ qp->desc_ring = desc;
+ qp->completions_ring = malloc(num_descriptors *
+ sizeof(*qp->completions_ring), M_CCP, M_ZERO | M_WAITOK);
+
+ /* Zero control register; among other things, clears the RUN flag. */
+ qp->qcontrol = 0;
+ ccp_write_queue_4(sc, queue, CMD_Q_CONTROL_BASE, qp->qcontrol);
+ ccp_write_queue_4(sc, queue, CMD_Q_INT_ENABLE_BASE, 0);
+
+ /* Clear any leftover interrupt status flags */
+ ccp_write_queue_4(sc, queue, CMD_Q_INTERRUPT_STATUS_BASE,
+ ALL_INTERRUPTS);
+
+ qp->qcontrol |= (sc->ring_size_order - 1) << CMD_Q_SIZE_SHIFT;
+
+ ccp_write_queue_4(sc, queue, CMD_Q_TAIL_LO_BASE,
+ (uint32_t)qp->desc_ring_bus_addr);
+ ccp_write_queue_4(sc, queue, CMD_Q_HEAD_LO_BASE,
+ (uint32_t)qp->desc_ring_bus_addr);
+
+ /*
+ * Enable completion interrupts, as well as error or administrative
+ * halt interrupts. We don't use administrative halts, but they
+ * shouldn't trip unless we do, so it ought to be harmless. We also
+ * are not prepared to actually handle errors at this time.
+ */
+ ccp_write_queue_4(sc, queue, CMD_Q_INT_ENABLE_BASE,
+ INT_COMPLETION | INT_ERROR | INT_QUEUE_STOPPED);
+
+ qp->qcontrol |= (qp->desc_ring_bus_addr >> 32) << CMD_Q_PTR_HI_SHIFT;
+ qp->qcontrol |= CMD_Q_RUN;
+ ccp_write_queue_4(sc, queue, CMD_Q_CONTROL_BASE, qp->qcontrol);
+
+out:
+ if (error != 0) {
+ if (qp->desc_ring != NULL)
+ bus_dmamap_unload(qp->ring_desc_tag,
+ qp->ring_desc_map);
+ if (desc != NULL)
+ bus_dmamem_free(qp->ring_desc_tag, desc,
+ qp->ring_desc_map);
+ if (qp->ring_desc_tag != NULL)
+ bus_dma_tag_destroy(qp->ring_desc_tag);
+ }
+ return (error);
+}
+
+static void
+ccp_hw_detach_queue(device_t dev, unsigned queue)
+{
+ struct ccp_softc *sc;
+ struct ccp_queue *qp;
+
+ sc = device_get_softc(dev);
+ qp = &sc->queues[queue];
+
+ /*
+ * Don't bother allocating a ring for queues the host isn't allowed to
+ * drive.
+ */
+ if ((sc->valid_queues & (1 << queue)) == 0)
+ return;
+
+ free(qp->completions_ring, M_CCP);
+ bus_dmamap_unload(qp->ring_desc_tag, qp->ring_desc_map);
+ bus_dmamem_free(qp->ring_desc_tag, qp->desc_ring, qp->ring_desc_map);
+ bus_dma_tag_destroy(qp->ring_desc_tag);
+}
+
+static int
+ccp_map_pci_bar(device_t dev)
+{
+ struct ccp_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ sc->pci_resource_id = PCIR_BAR(2);
+ sc->pci_resource = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->pci_resource_id, RF_ACTIVE);
+ if (sc->pci_resource == NULL) {
+ device_printf(dev, "unable to allocate pci resource\n");
+ return (ENODEV);
+ }
+
+ sc->pci_resource_id_msix = PCIR_BAR(5);
+ sc->pci_resource_msix = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->pci_resource_id_msix, RF_ACTIVE);
+ if (sc->pci_resource_msix == NULL) {
+ device_printf(dev, "unable to allocate pci resource msix\n");
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->pci_resource_id,
+ sc->pci_resource);
+ return (ENODEV);
+ }
+
+ sc->pci_bus_tag = rman_get_bustag(sc->pci_resource);
+ sc->pci_bus_handle = rman_get_bushandle(sc->pci_resource);
+ return (0);
+}
+
+static void
+ccp_unmap_pci_bar(device_t dev)
+{
+ struct ccp_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->pci_resource_id_msix,
+ sc->pci_resource_msix);
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->pci_resource_id,
+ sc->pci_resource);
+}
+
+static void
+ccp_intr_handle_error(struct ccp_queue *qp, const struct ccp_desc *desc)
+{
+ struct ccp_completion_ctx *cctx;
+ struct ccp_softc *sc;
+ uint32_t status, error, esource, faultblock;
+ unsigned q, idx;
+
+ sc = qp->cq_softc;
+ q = qp->cq_qindex;
+
+ status = ccp_read_queue_4(sc, q, CMD_Q_STATUS_BASE);
+
+ /* TODO: Decode error status from table on pg. 106 */
+ error = status & STATUS_ERROR_MASK;
+ esource = (status >> STATUS_ERRORSOURCE_SHIFT) &
+ STATUS_ERRORSOURCE_MASK;
+ faultblock = (status >> STATUS_VLSB_FAULTBLOCK_SHIFT) &
+ STATUS_VLSB_FAULTBLOCK_MASK;
+ device_printf(sc->dev, "Error: %u Source: %u Faulting LSB block: %u\n",
+ error, esource, faultblock);
+
+ /* TODO Could format the desc nicely here */
+ idx = desc - qp->desc_ring;
+ device_printf(sc->dev, "Bad descriptor index: %u contents: %32D\n",
+ idx, (const void *)desc, " ");
+
+ /*
+ * TODO Per § 14.4 "Error Handling," DMA_Status, DMA_Read/Write_Status,
+ * Zlib Decompress status may be interesting.
+ */
+
+ cctx = &qp->completions_ring[idx];
+ if (cctx->callback_fn != NULL) {
+ /* TODO More specific error code */
+ cctx->callback_fn(qp, cctx->session, cctx->callback_arg, EIO);
+ cctx->callback_fn = NULL;
+ }
+
+ /*
+ * Restart procedure described in § 14.2.5. Could be used by HoC if we
+ * used that.
+ *
+ * Advance HEAD_LO past bad descriptor manually, then restart queue.
+ */
+ idx = (idx + 1) % (1 << sc->ring_size_order);
+ qp->cq_head = idx;
+ device_printf(sc->dev, "%s: wrote sw head:%u\n", __func__,
+ qp->cq_head);
+ ccp_write_queue_4(sc, q, CMD_Q_HEAD_LO_BASE,
+ (uint32_t)qp->desc_ring_bus_addr + (idx * Q_DESC_SIZE));
+ ccp_write_queue_4(sc, q, CMD_Q_CONTROL_BASE, qp->qcontrol);
+ device_printf(sc->dev, "%s: Restarted queue\n", __func__);
+}
+
+static void
+ccp_intr_run_completions(struct ccp_queue *qp, uint32_t ints)
+{
+ struct ccp_completion_ctx *cctx;
+ struct ccp_softc *sc;
+ const struct ccp_desc *desc;
+ uint32_t headlo, idx;
+ unsigned q;
+
+ sc = qp->cq_softc;
+ q = qp->cq_qindex;
+
+ mtx_lock(&qp->cq_lock);
+
+ /*
+ * Hardware HEAD_LO points to the first incomplete descriptor. Process
+ * any submitted and completed descriptors, up to but not including
+ * HEAD_LO.
+ */
+ headlo = ccp_read_queue_4(sc, q, CMD_Q_HEAD_LO_BASE);
+ idx = (headlo - (uint32_t)qp->desc_ring_bus_addr) / Q_DESC_SIZE;
+
+ device_printf(sc->dev, "%s: hw head:%u sw head:%u\n", __func__, idx,
+ qp->cq_head);
+ while (qp->cq_head != idx) {
+ device_printf(sc->dev, "%s: completing:%u\n", __func__,
+ qp->cq_head);
+
+ cctx = &qp->completions_ring[qp->cq_head];
+ if (cctx->callback_fn != NULL) {
+ cctx->callback_fn(qp, cctx->session,
+ cctx->callback_arg, 0);
+ cctx->callback_fn = NULL;
+ }
+
+ qp->cq_head = (qp->cq_head + 1) % (1 << sc->ring_size_order);
+ }
+
+ device_printf(sc->dev, "%s: wrote sw head:%u\n", __func__,
+ qp->cq_head);
+
+ /*
+ * Desc points to the first incomplete descriptor, at the time we read
+ * HEAD_LO. If there was an error flagged in interrupt status, the HW
+ * will not proceed past the erroneous descriptor by itself.
+ */
+ desc = &qp->desc_ring[idx];
+ if ((ints & INT_ERROR) != 0)
+ ccp_intr_handle_error(qp, desc);
+
+ mtx_unlock(&qp->cq_lock);
+}
+
+static void
+ccp_intr_handler(void *arg)
+{
+ struct ccp_softc *sc = arg;
+ size_t i;
+ uint32_t ints;
+
+ device_printf(sc->dev, "%s: interrupt\n", __func__);
+
+ /*
+ * We get one global interrupt per PCI device, shared over all of
+ * its queues. Scan each valid queue on interrupt for flags indicating
+ * activity.
+ */
+ for (i = 0; i < nitems(sc->queues); i++) {
+ if ((sc->valid_queues & (1 << i)) == 0)
+ continue;
+
+ ints = ccp_read_queue_4(sc, i, CMD_Q_INTERRUPT_STATUS_BASE);
+ if (ints == 0)
+ continue;
+
+#if 0
+ device_printf(sc->dev, "%s: %x interrupts on queue %zu\n",
+ __func__, (unsigned)ints, i);
+#endif
+ /* Write back 1s to clear interrupt status bits. */
+ ccp_write_queue_4(sc, i, CMD_Q_INTERRUPT_STATUS_BASE, ints);
+
+ if ((ints & INT_COMPLETION) != 0)
+ ccp_intr_run_completions(&sc->queues[i], ints);
+
+ if ((ints & INT_QUEUE_STOPPED) != 0)
+ device_printf(sc->dev, "%s: queue %zu stopped\n",
+ __func__, i);
+ }
+}
+
+static int
+ccp_setup_interrupts(struct ccp_softc *sc)
+{
+ int rid, error;
+#if 0
+ /* MSIX code */
+ uint32_t nvec;
+ int n;
+
+ n = pci_msix_count(sc->dev);
+ device_printf(sc->dev, "XXX %s: msix_count: %d\n", __func__, n);
+
+ if (n < 1) {
+ return (ENXIO);
+ }
+
+ nvec = n;
+ n = pci_alloc_msix(sc->dev, &nvec);
+ device_printf(sc->dev, "XXX %s: alloc_msix: %d nvec=%u\n",
+ __func__, n, nvec);
+
+ if (n != 0) {
+ return (n);
+ }
+ if (nvec < 1) {
+ return (ENXIO);
+ }
+
+ rid = 1;
+ sc->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
+ RF_ACTIVE);
+#else
+ /* INTx code */
+ rid = 0;
+ sc->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
+ RF_ACTIVE | RF_SHAREABLE);
+#endif
+ if (sc->intr_res == NULL) {
+ device_printf(sc->dev, "%s: Failed to alloc IRQ resource\n",
+ __func__);
+ return (ENXIO);
+ }
+
+ sc->intr_tag = NULL;
+ error = bus_setup_intr(sc->dev, sc->intr_res,
+ INTR_MPSAFE | INTR_TYPE_MISC, NULL, ccp_intr_handler, sc,
+ &sc->intr_tag);
+ if (error != 0)
+ device_printf(sc->dev, "%s: setup_intr: %d\n", __func__, error);
+
+ return (error);
+}
+
+static void
+ccp_release_interrupts(struct ccp_softc *sc)
+{
+ if (sc->intr_tag != NULL)
+ bus_teardown_intr(sc->dev, sc->intr_res, sc->intr_tag);
+ if (sc->intr_res != NULL)
+ bus_release_resource(sc->dev, SYS_RES_IRQ,
+ rman_get_rid(sc->intr_res), sc->intr_res);
+
+ pci_release_msi(sc->dev);
+}
+
+int
+ccp_hw_attach(device_t dev)
+{
+ struct ccp_softc *sc;
+ uint64_t lsbmask;
+ uint32_t version, lsbmasklo, lsbmaskhi;
+ unsigned i, j;
+ int error;
+ bool bars_mapped, interrupts_setup;
+
+ i = 0;
+ bars_mapped = interrupts_setup = false;
+ sc = device_get_softc(dev);
+
+ error = ccp_map_pci_bar(dev);
+ if (error != 0) {
+ device_printf(dev, "XXX%s: couldn't map BAR(s)\n", __func__);
+ goto out;
+ }
+ bars_mapped = true;
+
+ error = pci_enable_busmaster(dev);
+ if (error != 0) {
+ device_printf(dev, "XXX%s: couldn't enable busmaster\n",
+ __func__);
+ goto out;
+ }
+
+ sc->ring_size_order = g_ccp_ring_order;
+ sc->valid_queues = ccp_read_4(sc, CMD_QUEUE_MASK_OFFSET);
+
+ version = ccp_read_4(sc, VERSION_REG);
+ if ((version & VERSION_NUM_MASK) < 5) {
+ device_printf(dev,
+ "driver supports version 5 and later hardware\n");
+ error = ENXIO;
+ goto out;
+ }
+
+ error = ccp_setup_interrupts(sc);
+ if (error != 0)
+ goto out;
+ interrupts_setup = true;
+
+ sc->hw_version = version & VERSION_NUM_MASK;
+ sc->num_queues = (version >> VERSION_NUMVQM_SHIFT) &
+ VERSION_NUMVQM_MASK;
+ sc->num_lsb_entries = (version >> VERSION_LSBSIZE_SHIFT) &
+ VERSION_LSBSIZE_MASK;
+ sc->hw_features = version & VERSION_CAP_MASK;
+
+ /*
+ * Copy private LSB mask to public registers to enable access to LSB
+ * from all queues allowed by BIOS.
+ */
+ lsbmasklo = ccp_read_4(sc, LSB_PRIVATE_MASK_LO_OFFSET);
+ lsbmaskhi = ccp_read_4(sc, LSB_PRIVATE_MASK_HI_OFFSET);
+ ccp_write_4(sc, LSB_PUBLIC_MASK_LO_OFFSET, lsbmasklo);
+ ccp_write_4(sc, LSB_PUBLIC_MASK_HI_OFFSET, lsbmaskhi);
+
+ lsbmask = ((uint64_t)lsbmaskhi << 30) | lsbmasklo;
+ device_printf(dev, "XXX%s: 2\n", __func__);
+
+ for (; i < nitems(sc->queues); i++) {
+ error = ccp_hw_attach_queue(dev, lsbmask, i);
+ if (error != 0) {
+ device_printf(dev, "XXX%s: couldn't attach queue %u\n",
+ __func__, i);
+ goto out;
+ }
+ }
+ ccp_assign_lsb_regions(sc, lsbmask);
+ device_printf(dev, "XXX%s: 3\n", __func__);
+
+out:
+ if (error != 0) {
+ if (interrupts_setup)
+ ccp_release_interrupts(sc);
+ for (j = 0; j < i; j++)
+ ccp_hw_detach_queue(dev, j);
+ if (sc->ring_size_order != 0)
+ pci_disable_busmaster(dev);
+ if (bars_mapped)
+ ccp_unmap_pci_bar(dev);
+ }
+ return (error);
+}
+
+void
+ccp_hw_detach(device_t dev)
+{
+ struct ccp_softc *sc;
+ unsigned i;
+
+ sc = device_get_softc(dev);
+
+ for (i = 0; i < nitems(sc->queues); i++)
+ ccp_hw_detach_queue(dev, i);
+
+ ccp_release_interrupts(sc);
+ pci_disable_busmaster(dev);
+ ccp_unmap_pci_bar(dev);
+}
+
+static void
+ccp_passthrough(struct ccp_queue *qp, bus_addr_t dst,
+ enum ccp_memtype dst_type, bus_addr_t src, enum ccp_memtype src_type,
+ bus_size_t len, enum ccp_passthru_byteswap swapmode,
+ enum ccp_passthru_bitwise bitmode, bool interrupt,
+ const struct ccp_completion_ctx *cctx)
+{
+ struct ccp_desc *desc;
+
+ KASSERT(ccp_queue_get_ring_space(qp) > 0,
+ ("ccp_passthrough on full ring"));
+
+ desc = &qp->desc_ring[qp->cq_tail];
+
+ memset(desc, 0, sizeof(*desc));
+ desc->engine = CCP_ENGINE_PASSTHRU;
+
+ desc->pt.ioc = interrupt;
+ desc->pt.byteswap = swapmode;
+ desc->pt.bitwise = bitmode;
+ desc->length = len;
+
+ desc->src_lo = (uint32_t)src;
+ desc->src_hi = src >> 32;
+ desc->src_mem = src_type;
+
+ desc->dst_lo = (uint32_t)dst;
+ desc->dst_hi = dst >> 32;
+ desc->dst_mem = dst_type;
+
+ if (bitmode != CCP_PASSTHRU_BITWISE_NOOP)
+ desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_KEY);
+
+ if (cctx != NULL)
+ memcpy(&qp->completions_ring[qp->cq_tail], cctx, sizeof(*cctx));
+
+ qp->cq_tail = (qp->cq_tail + 1) % (1 << qp->cq_softc->ring_size_order);
+}
+
+static void
+ccp_passthrough_sgl(struct ccp_queue *qp, bus_addr_t lsb_addr, bool tolsb,
+ struct sglist *sgl, bus_size_t len, bool interrupt,
+ const struct ccp_completion_ctx *cctx)
+{
+ struct sglist_seg *seg;
+ size_t i, remain, nb;
+
+ remain = len;
+ for (i = 0; i < sgl->sg_nseg && remain != 0; i++) {
+ seg = &sgl->sg_segs[i];
+ nb = min(remain, seg->ss_len);
+
+ if (tolsb)
+ ccp_passthrough(qp, lsb_addr, CCP_MEMTYPE_SB,
+ seg->ss_paddr, CCP_MEMTYPE_SYSTEM, nb,
+ CCP_PASSTHRU_BYTESWAP_NOOP,
+ CCP_PASSTHRU_BITWISE_NOOP,
+ (nb == remain) && interrupt, cctx);
+ else
+ ccp_passthrough(qp, seg->ss_paddr, CCP_MEMTYPE_SYSTEM,
+ lsb_addr, CCP_MEMTYPE_SB, nb,
+ CCP_PASSTHRU_BYTESWAP_NOOP,
+ CCP_PASSTHRU_BITWISE_NOOP,
+ (nb == remain) && interrupt, cctx);
+
+ remain -= nb;
+ }
+}
+
+/*
+ * Note that these vectors are in reverse of the usual order.
+ */
+const uint32_t SHA1_H[] = {
+ 0xc3d2e1f0ul,
+ 0x10325476ul,
+ 0x98badcfeul,
+ 0xefcdab89ul,
+ 0x67452301ul,
+ 0,
+ 0,
+ 0,
+};
+
+const uint32_t SHA224_H[] = {
+ 0xbefa4fa4ul,
+ 0x64f98fa7ul,
+ 0x68581511ul,
+ 0xffc00b31ul,
+ 0xf70e5939ul,
+ 0x3070dd17ul,
+ 0x367cd507ul,
+ 0xc1059ed8ul,
+};
+
+const uint32_t SHA256_H[] = {
+ 0x5be0cd19ul,
+ 0x1f83d9abul,
+ 0x9b05688cul,
+ 0x510e527ful,
+ 0xa54ff53aul,
+ 0x3c6ef372ul,
+ 0xbb67ae85ul,
+ 0x6a09e667ul,
+};
+
+const uint64_t SHA384_H[] = {
+ 0x47b5481dbefa4fa4ull,
+ 0xdb0c2e0d64f98fa7ull,
+ 0x8eb44a8768581511ull,
+ 0x67332667ffc00b31ull,
+ 0x152fecd8f70e5939ull,
+ 0x9159015a3070dd17ull,
+ 0x629a292a367cd507ull,
+ 0xcbbb9d5dc1059ed8ull,
+};
+
+const uint64_t SHA512_H[] = {
+ 0x5be0cd19137e2179ull,
+ 0x1f83d9abfb41bd6bull,
+ 0x9b05688c2b3e6c1full,
+ 0x510e527fade682d1ull,
+ 0xa54ff53a5f1d36f1ull,
+ 0x3c6ef372fe94f82bull,
+ 0xbb67ae8584caa73bull,
+ 0x6a09e667f3bcc908ull,
+};
+
+const struct SHA_Defn {
+ enum sha_version version;
+ const void *H_vectors;
+ size_t H_size;
+ struct auth_hash *axf;
+ enum ccp_sha_type engine_type;
+} SHA_definitions[] = {
+ {
+ .version = SHA1,
+ .H_vectors = SHA1_H,
+ .H_size = sizeof(SHA1_H),
+ .axf = &auth_hash_hmac_sha1,
+ .engine_type = CCP_SHA_TYPE_1,
+ },
+#if 0
+ {
+ .version = SHA2_224,
+ .H_vectors = SHA224_H,
+ .H_size = sizeof(SHA224_H),
+ .axf = &auth_hash_hmac_sha2_224,
+ .engine_type = CCP_SHA_TYPE_224,
+ },
+#endif
+ {
+ .version = SHA2_256,
+ .H_vectors = SHA256_H,
+ .H_size = sizeof(SHA256_H),
+ .axf = &auth_hash_hmac_sha2_256,
+ .engine_type = CCP_SHA_TYPE_256,
+ },
+ {
+ .version = SHA2_384,
+ .H_vectors = SHA384_H,
+ .H_size = sizeof(SHA384_H),
+ .axf = &auth_hash_hmac_sha2_384,
+ .engine_type = CCP_SHA_TYPE_384,
+ },
+ {
+ .version = SHA2_512,
+ .H_vectors = SHA512_H,
+ .H_size = sizeof(SHA512_H),
+ .axf = &auth_hash_hmac_sha2_512,
+ .engine_type = CCP_SHA_TYPE_512,
+ },
+};
+
+static void
+ccp_sha_single_desc(struct ccp_queue *qp, const struct SHA_Defn *defn,
+ vm_paddr_t addr, size_t len, bool start, bool end, uint64_t msgbits)
+{
+ struct ccp_desc *desc;
+
+ KASSERT(ccp_queue_get_ring_space(qp) > 0,
+ ("ccp_passthrough on full ring"));
+
+ desc = &qp->desc_ring[qp->cq_tail];
+
+ memset(desc, 0, sizeof(*desc));
+ desc->engine = CCP_ENGINE_SHA;
+ desc->som = start;
+ desc->eom = end;
+
+ desc->sha.type = defn->engine_type;
+ desc->length = len;
+
+ if (end) {
+ desc->sha_len_lo = (uint32_t)msgbits;
+ desc->sha_len_hi = msgbits >> 32;
+ }
+
+ desc->src_lo = (uint32_t)addr;
+ desc->src_hi = addr >> 32;
+ desc->src_mem = CCP_MEMTYPE_SYSTEM;
+
+ desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_SHA);
+
+ qp->cq_tail = (qp->cq_tail + 1) % (1 << qp->cq_softc->ring_size_order);
+}
+
+static int
+ccp_sha(struct ccp_queue *qp, enum sha_version version, struct sglist *sgl_src,
+ struct sglist *sgl_dst, const struct ccp_completion_ctx *cctx, int mflags)
+{
+ const struct SHA_Defn *defn;
+ struct sglist_seg *seg;
+ size_t i, msgsize, remaining, nb;
+ uint32_t lsbaddr;
+
+ for (i = 0; i < nitems(SHA_definitions); i++)
+ if (SHA_definitions[i].version == version)
+ break;
+ if (i == nitems(SHA_definitions))
+ return (EINVAL);
+ defn = &SHA_definitions[i];
+
+ /* XXX validate input ??? */
+
+ /* Load initial SHA state into LSB */
+ /* XXX ensure H_vectors don't span page boundaries */
+ ccp_passthrough(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_SHA),
+ CCP_MEMTYPE_SB, pmap_kextract((vm_offset_t)defn->H_vectors),
+ CCP_MEMTYPE_SYSTEM, roundup2(defn->H_size, LSB_ENTRY_SIZE),
+ CCP_PASSTHRU_BYTESWAP_NOOP, CCP_PASSTHRU_BITWISE_NOOP, false,
+ NULL);
+
+ /* Execute series of SHA updates on correctly sized buffers */
+ msgsize = 0;
+ for (i = 0; i < sgl_src->sg_nseg; i++) {
+ seg = &sgl_src->sg_segs[i];
+ msgsize += seg->ss_len;
+ ccp_sha_single_desc(qp, defn, seg->ss_paddr, seg->ss_len,
+ i == 0, i == sgl_src->sg_nseg - 1, msgsize << 3);
+ }
+
+ /* Copy result out to sgl_dst */
+ remaining = roundup2(defn->H_size, LSB_ENTRY_SIZE);
+ lsbaddr = ccp_queue_lsb_address(qp, LSB_ENTRY_SHA);
+ for (i = 0; i < sgl_dst->sg_nseg; i++) {
+ seg = &sgl_dst->sg_segs[i];
+ nb = min(remaining, seg->ss_len);
+
+ ccp_passthrough(qp, seg->ss_paddr, CCP_MEMTYPE_SYSTEM, lsbaddr,
+ CCP_MEMTYPE_SB, nb, CCP_PASSTHRU_BYTESWAP_NOOP,
+ CCP_PASSTHRU_BITWISE_NOOP,
+ (cctx != NULL) ? (nb == remaining) : false,
+ (nb == remaining) ? cctx : NULL);
+
+ remaining -= nb;
+ lsbaddr += nb;
+ if (remaining == 0)
+ break;
+ }
+
+ return (0);
+}
+
+static void
+byteswap256(uint64_t *buffer)
+{
+ uint64_t t;
+
+ t = bswap64(buffer[3]);
+ buffer[3] = bswap64(buffer[0]);
+ buffer[0] = t;
+
+ t = bswap64(buffer[2]);
+ buffer[2] = bswap64(buffer[1]);
+ buffer[1] = t;
+}
+
+/*
+ * Translate CCP internal LSB hash format into a standard hash ouput.
+ *
+ * Manipulates input buffer with byteswap256 operation.
+ */
+static void
+ccp_sha_copy_result(char *output, char *buffer, enum sha_version version)
+{
+ const struct SHA_Defn *defn;
+ size_t i;
+
+ for (i = 0; i < nitems(SHA_definitions); i++)
+ if (SHA_definitions[i].version == version)
+ break;
+ if (i == nitems(SHA_definitions))
+ panic("bogus sha version auth_mode %u\n", (unsigned)version);
+
+ defn = &SHA_definitions[i];
+
+ /* Swap 256bit manually -- DMA engine can, but with limitations */
+ byteswap256((void *)buffer);
+ if (defn->axf->hashsize > LSB_ENTRY_SIZE)
+ byteswap256((void *)(buffer + LSB_ENTRY_SIZE));
+
+ switch (defn->version) {
+ case SHA1:
+ memcpy(output, buffer + 12, defn->axf->hashsize);
+ break;
+#if 0
+ case SHA2_224:
+ memcpy(output, buffer + XXX, defn->axf->hashsize);
+ break;
+#endif
+ case SHA2_256:
+ memcpy(output, buffer, defn->axf->hashsize);
+ break;
+ case SHA2_384:
+ memcpy(output,
+ buffer + LSB_ENTRY_SIZE * 3 - defn->axf->hashsize,
+ defn->axf->hashsize - LSB_ENTRY_SIZE);
+ memcpy(output + defn->axf->hashsize - LSB_ENTRY_SIZE, buffer,
+ LSB_ENTRY_SIZE);
+ break;
+ case SHA2_512:
+ memcpy(output, buffer + LSB_ENTRY_SIZE, LSB_ENTRY_SIZE);
+ memcpy(output + LSB_ENTRY_SIZE, buffer, LSB_ENTRY_SIZE);
+ break;
+ }
+}
+
+void
+XXX_ccp_test(struct ccp_softc *sc)
+{
+ const struct SHA_Defn *defn;
+ uint64_t var1, var2;
+ uint32_t res[32] __aligned(128) = { 0 };
+ struct ccp_queue *qp;
+ unsigned q;
+ int error;
+ const char *msg = "a";
+
+ device_printf(sc->dev, "%s enter\n", __func__);
+
+ var1 = 0;
+ var2 = 0xdeadbeef;
+ device_printf(sc->dev, "%s var1=%lx var2=%lx\n", __func__, var1, var2);
+
+ for (q = 0; q < nitems(sc->queues); q++)
+ if ((sc->valid_queues & (1 << q)) != 0)
+ break;
+ if (q == nitems(sc->queues)) {
+ device_printf(sc->dev, "%s: no valid queues\n", __func__);
+ return;
+ }
+ qp = &sc->queues[q];
+
+ struct sglist_seg sgl_segs[] = {
+ {
+ .ss_paddr = pmap_kextract((vm_offset_t)msg),
+ .ss_len = 1,
+ },
+ };
+ struct sglist sgl = {
+ .sg_segs = sgl_segs,
+ .sg_nseg = 1,
+ .sg_maxseg = 1,
+ .sg_refs = 1,
+ };
+
+ struct sglist_seg sgl2_segs[] = {
+ {
+ .ss_paddr = pmap_kextract((vm_offset_t)res),
+ .ss_len = sizeof(res),
+ },
+ };
+ struct sglist sgl_dst = {
+ .sg_segs = sgl2_segs,
+ .sg_nseg = 1,
+ .sg_maxseg = 1,
+ .sg_refs = 1,
+ };
+
+ device_printf(sc->dev, "%s ccp_sha\n", __func__);
+ error = ccp_sha(qp, SHA2_384, &sgl, &sgl_dst, NULL, M_WAITOK);
+ if (error != 0) {
+ device_printf(sc->dev, "%s: ccp_sha error: %d\n", __func__,
+ error);
+ return;
+ }
+
+ wmb();
+ device_printf(sc->dev, "%s sending to HW\n", __func__);
+ ccp_queue_write_tail(qp);
+
+ device_printf(sc->dev, "%s sleeping\n", __func__);
+ DELAY(500 * 1000);
+
+ device_printf(sc->dev, "%s hardware head is 0x%x (base=0x%x)\n",
+ __func__, ccp_read_queue_4(sc, q, CMD_Q_HEAD_LO_BASE),
+ (uint32_t)qp->desc_ring_bus_addr);
+ device_printf(sc->dev, "%s qcontrol=0x%x\n", __func__,
+ ccp_read_queue_4(sc, q, CMD_Q_CONTROL_BASE));
+
+ device_printf(sc->dev, "%s sha1 res=%20D\n", __func__,
+ (u_char*)&res[3], " ");
+ device_printf(sc->dev, "%s sha256 res=%32D\n", __func__,
+ (u_char*)res, " ");
+ device_printf(sc->dev, "%s sha384 res=%16D %32D\n", __func__,
+ (u_char*)&res[12], " ", (u_char*)res, " ");
+ device_printf(sc->dev, "%s sha512 res=%32D %32D\n", __func__,
+ (u_char*)&res[8], " ", (u_char*)res, " ");
+
+ device_printf(sc->dev, "%s leave\n", __func__);
+
+ /* XXX Below: basic single-descriptor SHA test */
+ return;
+
+ /* Load initial SHA state into LSB */
+ defn = &SHA_definitions[0];
+
+ device_printf(sc->dev, "%s writing PST desc to load hash init values\n", __func__);
+ ccp_passthrough(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_SHA),
+ CCP_MEMTYPE_SB, pmap_kextract((vm_offset_t)defn->H_vectors),
+ CCP_MEMTYPE_SYSTEM, roundup2(defn->H_size, LSB_ENTRY_SIZE),
+ CCP_PASSTHRU_BYTESWAP_NOOP, CCP_PASSTHRU_BITWISE_NOOP, false,
+ NULL);
+
+ /* Queue single SHA desc of empty vector. */
+ device_printf(sc->dev, "%s writing SHA desc\n", __func__);
+ ccp_sha_single_desc(qp, defn, pmap_kextract((vm_offset_t)msg), 1, true, true, 8);
+
+ /* Copy result out */
+ device_printf(sc->dev, "%s writing PST desc to fetch result\n", __func__);
+ ccp_passthrough(qp, pmap_kextract((vm_offset_t)res),
+ CCP_MEMTYPE_SYSTEM, ccp_queue_lsb_address(qp, LSB_ENTRY_SHA),
+ CCP_MEMTYPE_SB, sizeof(res), CCP_PASSTHRU_BYTESWAP_256BIT,
+ CCP_PASSTHRU_BITWISE_NOOP, false, NULL);
+
+ wmb();
+ device_printf(sc->dev, "%s sending to HW\n", __func__);
+ ccp_queue_write_tail(qp);
+
+ /* XXX Below: basic PST test */
+ return;
+
+ ccp_passthrough(qp, pmap_kextract((vm_offset_t)&var1),
+ CCP_MEMTYPE_SYSTEM, pmap_kextract((vm_offset_t)&var2),
+ CCP_MEMTYPE_SYSTEM, sizeof(var1), CCP_PASSTHRU_BYTESWAP_NOOP,
+ CCP_PASSTHRU_BITWISE_NOOP, false, NULL);
+
+ device_printf(sc->dev, "%s incrementing tail\n", __func__);
+ wmb();
+ ccp_queue_write_tail(qp);
+ device_printf(sc->dev, "%s tail incremented; writing control word RUN & sleeping 0.5s\n", __func__);
+
+ DELAY(500 * 1000);
+
+ device_printf(sc->dev, "%s var1=%lx var2=%lx\n", __func__, var1, var2);
+}
+
+static void
+ccp_do_hmac_done(struct ccp_queue *qp, struct ccp_session *s,
+ struct cryptop *crp, struct cryptodesc *crd, int error)
+{
+ char ihash[SHA2_512_HASH_LEN /* max hash len */];
+ union authctx auth_ctx;
+ struct auth_hash *axf;
+
+ axf = s->hmac.auth_hash;
+
+ s->pending--;
+
+ if (error != 0) {
+ crp->crp_etype = error;
+ goto out;
+ }
+
+ /* Do remaining outer hash over small inner hash in software */
+ axf->Init(&auth_ctx);
+ axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize);
+ ccp_sha_copy_result(ihash, s->hmac.ipad, s->hmac.auth_mode);
+#if 0
+ device_printf(dev, "%s sha intermediate=%64D\n", __func__,
+ (u_char *)ihash, " ");
+#endif
+ axf->Update(&auth_ctx, ihash, axf->hashsize);
+ axf->Final(s->hmac.ipad, &auth_ctx);
+
+ crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
+ s->hmac.hash_len, s->hmac.ipad);
+
+ /* Avoid leaking key material */
+ explicit_bzero(&auth_ctx, sizeof(auth_ctx));
+ explicit_bzero(s->hmac.ipad, sizeof(s->hmac.ipad));
+ explicit_bzero(s->hmac.opad, sizeof(s->hmac.opad));
+
+out:
+ crypto_done(crp);
+}
+
+static void
+ccp_hmac_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp,
+ int error)
+{
+ struct cryptodesc *crd;
+ struct cryptop *crp;
+
+ crp = vcrp;
+ crd = crp->crp_desc;
+ ccp_do_hmac_done(qp, s, crp, crd, error);
+}
+
+static int
+ccp_do_hmac(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp,
+ struct cryptodesc *crd, const struct ccp_completion_ctx *cctx)
+{
+ device_t dev;
+ struct auth_hash *axf;
+ int error, sgl_nsegs;
+
+ dev = qp->cq_softc->dev;
+ axf = s->hmac.auth_hash;
+
+ /*
+ * Populate the SGL describing inside hash contents. We want to hash
+ * the ipad (key XOR fixed bit pattern) concatenated with the user
+ * data.
+ */
+ sglist_reset(qp->cq_sg_ulptx);
+ error = sglist_append(qp->cq_sg_ulptx, s->hmac.ipad, axf->blocksize);
+ if (error != 0)
+ return (error);
+ error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
+ crd->crd_skip, crd->crd_len);
+ if (error != 0) {
+ device_printf(dev, "%s: sglist too short\n", __func__);
+ return (error);
+ }
+ /* Populate SGL for output -- just reuse hmac.ipad buffer. */
+ sglist_reset(qp->cq_sg_dst);
+ error = sglist_append(qp->cq_sg_dst, s->hmac.ipad,
+ roundup2(axf->hashsize, LSB_ENTRY_SIZE));
+ if (error != 0)
+ return (error);
+
+ /* XXX Determine # of ops required here and ensure we have enough. */
+ sgl_nsegs = qp->cq_sg_ulptx->sg_nseg;
+ //sgl_len = ccp_ulptx_sgl_len(sgl_nsegs);
+
+ error = ccp_sha(qp, s->hmac.auth_mode, qp->cq_sg_ulptx, qp->cq_sg_dst,
+ cctx, M_NOWAIT);
+ if (error != 0) {
+ device_printf(dev, "%s: ccp_sha error\n", __func__);
+ return (error);
+ }
+
+ if (cctx != NULL) {
+ wmb();
+ ccp_queue_write_tail(qp);
+ }
+ return (0);
+}
+
+int
+ccp_hmac(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp)
+{
+ struct ccp_completion_ctx ctx;
+ struct cryptodesc *crd;
+
+ crd = crp->crp_desc;
+
+ ctx.callback_fn = ccp_hmac_done;
+ ctx.callback_arg = crp;
+ ctx.session = s;
+
+ return (ccp_do_hmac(qp, s, crp, crd, &ctx));
+}
+
+static void
+ccp_byteswap(char *data, size_t len)
+{
+ size_t i;
+ char t;
+
+ len--;
+ for (i = 0; i < len; i++, len--) {
+ t = data[i];
+ data[i] = data[len];
+ data[len] = t;
+ }
+}
+
+static void
+ccp_blkcipher_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp,
+ int error)
+{
+ struct cryptop *crp;
+
+ /* TODO: zero out sensitive fields */
+
+ crp = vcrp;
+
+ s->pending--;
+
+ if (error != 0)
+ crp->crp_etype = error;
+
+ device_printf(qp->cq_softc->dev, "XXX %s: qp=%p crp=%p\n", __func__,
+ qp, crp);
+ crypto_done(crp);
+ return;
+}
+
+static int
+ccp_do_blkcipher(struct ccp_queue *qp, struct ccp_session *s,
+ struct cryptop *crp, struct cryptodesc *crd,
+ const struct ccp_completion_ctx *cctx)
+{
+ struct ccp_desc *desc;
+ char *keydata;
+ device_t dev;
+ enum ccp_cipher_dir dir;
+ int sgl_nsegs, error;
+ size_t keydata_len;
+ unsigned i;
+
+ dev = qp->cq_softc->dev;
+
+ if (s->blkcipher.key_len == 0 || crd->crd_len == 0) {
+ device_printf(dev, "%s: empty\n", __func__);
+ return (EINVAL);
+ }
+ if ((crd->crd_len % AES_BLOCK_LEN) != 0) {
+ device_printf(dev, "%s: len modulo: %d\n", __func__,
+ crd->crd_len);
+ return (EINVAL);
+ }
+
+ /*
+ * Individual segments must be multiples of AES block size for the HW
+ * to process it. Non-compliant inputs aren't bogus, just not doable
+ * on this hardware.
+ */
+ for (i = 0; i < qp->cq_sg_crp->sg_nseg; i++)
+ if ((qp->cq_sg_crp->sg_segs[i].ss_len % AES_BLOCK_LEN) != 0) {
+ device_printf(dev, "%s: seg modulo: %zu\n", __func__,
+ qp->cq_sg_crp->sg_segs[i].ss_len);
+ return (EINVAL);
+ }
+
+ /* Gather IV/nonce data */
+ if (crd->crd_flags & CRD_F_ENCRYPT) {
+ dir = CCP_CIPHER_DIR_ENCRYPT;
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT)
+ memcpy(s->blkcipher.iv, crd->crd_iv,
+ s->blkcipher.iv_len);
+ else
+ arc4rand(s->blkcipher.iv, s->blkcipher.iv_len, 0);
+ if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0)
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, s->blkcipher.iv_len,
+ s->blkcipher.iv);
+ } else {
+ dir = CCP_CIPHER_DIR_DECRYPT;
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT)
+ memcpy(s->blkcipher.iv, crd->crd_iv,
+ s->blkcipher.iv_len);
+ else
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, s->blkcipher.iv_len,
+ s->blkcipher.iv);
+ }
+
+ /* Reverse order of IV material for HW */
+ device_printf(dev, "YYY %s: IV: %16D len: %u\n", __func__,
+ s->blkcipher.iv, " ", s->blkcipher.iv_len);
+ ccp_byteswap(s->blkcipher.iv, s->blkcipher.iv_len);
+
+ /* Set up passthrough op(s) to copy IV into LSB */
+ sglist_reset(qp->cq_sg_ulptx);
+ error = sglist_append(qp->cq_sg_ulptx, s->blkcipher.iv,
+ s->blkcipher.iv_len);
+ if (error != 0)
+ return (error);
+
+ device_printf(dev, "XXX %s: starting IV pst @ %u\n", __func__,
+ qp->cq_tail);
+ ccp_passthrough_sgl(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_IV), true,
+ qp->cq_sg_ulptx, s->blkcipher.iv_len, false, NULL);
+
+ switch (crd->crd_alg) {
+ case CRYPTO_AES_CBC:
+ keydata = s->blkcipher.enckey;
+ keydata_len = s->blkcipher.key_len;
+ break;
+ case CRYPTO_AES_ICM:
+ keydata = s->blkcipher.enckey;
+ keydata_len = s->blkcipher.key_len;
+ break;
+ /* XXX deal with XTS */
+#if 0
+ case CRYPTO_AES_XTS:
+ key_half = s->blkcipher.key_len / 2;
+ memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
+ key_half);
+ if (crd->crd_flags & CRD_F_ENCRYPT)
+ memcpy(crwr->key_ctx.key + key_half,
+ s->blkcipher.enckey, key_half);
+ else
+ memcpy(crwr->key_ctx.key + key_half,
+ s->blkcipher.deckey, key_half);
+ break;
+#endif
+ }
+
+ /* Reverse order of key material for HW */
+ ccp_byteswap(keydata, keydata_len);
+
+ /* Store key material into LSB to avoid page boundaries */
+ sglist_reset(qp->cq_sg_ulptx);
+ error = sglist_append(qp->cq_sg_ulptx, keydata, keydata_len);
+ if (error != 0)
+ return (error);
+ device_printf(dev, "XXX %s: starting KEY pst @ %u\n", __func__,
+ qp->cq_tail);
+ ccp_passthrough_sgl(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_KEY), true,
+ qp->cq_sg_ulptx, keydata_len, false, NULL);
+
+ /*
+ * Point SGLs at the subset of cryptop buffer contents representing the
+ * data.
+ */
+ sglist_reset(qp->cq_sg_ulptx);
+ error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
+ crd->crd_skip, crd->crd_len);
+ if (error != 0)
+ return (error);
+
+ sgl_nsegs = qp->cq_sg_ulptx->sg_nseg;
+ //sgl_len = ccp_ulptx_sgl_len(sgl_nsegs);
+
+ device_printf(dev, "YYY %s: Contents: %16D\n", __func__,
+ (void *)PHYS_TO_DMAP(qp->cq_sg_ulptx->sg_segs[0].ss_paddr), " ");
+
+ device_printf(dev, "XXX %s: starting AES ops @ %u\n", __func__,
+ qp->cq_tail);
+ for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) {
+ struct sglist_seg *seg;
+
+ seg = &qp->cq_sg_ulptx->sg_segs[i];
+
+ desc = &qp->desc_ring[qp->cq_tail];
+ desc->engine = CCP_ENGINE_AES;
+ desc->som = (i == 0);
+ desc->eom = (i == qp->cq_sg_ulptx->sg_nseg - 1);
+ desc->ioc = (desc->eom && cctx != NULL);
+ device_printf(dev, "XXX %s: AES %u: som:%d eom:%d ioc:%d dir:%d\n", __func__,
+ qp->cq_tail, (int)desc->som, (int)desc->eom, (int)desc->ioc, (int)dir);
+
+ if (desc->ioc)
+ memcpy(&qp->completions_ring[qp->cq_tail], cctx,
+ sizeof(*cctx));
+
+ desc->aes.encrypt = dir;
+ desc->aes.mode = s->blkcipher.cipher_mode;
+ desc->aes.type = s->blkcipher.cipher_type;
+ if (crd->crd_alg == CRYPTO_AES_ICM)
+ /*
+ * Size of CTR value in bits, - 1. Hardcode 32 bits
+ * for now.
+ */
+ desc->aes.size = 0x1f;
+
+ device_printf(dev, "XXX %s: AES %u: mode:%u type:%u size:%u\n", __func__,
+ qp->cq_tail, (unsigned)desc->aes.mode, (unsigned)desc->aes.type, (unsigned)desc->aes.size);
+
+ desc->length = seg->ss_len;
+ desc->src_lo = (uint32_t)seg->ss_paddr;
+ desc->src_hi = (seg->ss_paddr >> 32);
+ desc->src_mem = CCP_MEMTYPE_SYSTEM;
+
+ /* Crypt in-place */
+ desc->dst_lo = desc->src_lo;
+ desc->dst_hi = desc->src_hi;
+ desc->dst_mem = desc->src_mem;
+
+ desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY);
+ desc->key_hi = 0;
+ desc->key_mem = CCP_MEMTYPE_SB;
+
+ desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV);
+
+ qp->cq_tail = (qp->cq_tail + 1) %
+ (1 << qp->cq_softc->ring_size_order);
+ }
+ if (cctx != NULL) {
+ wmb();
+ ccp_queue_write_tail(qp);
+ }
+ return (0);
+}
+
+int
+ccp_blkcipher(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp)
+{
+ struct ccp_completion_ctx ctx;
+ struct cryptodesc *crd;
+
+ /* XXX Determine # of ops required here and ensure we have enough. */
+
+ crd = crp->crp_desc;
+
+ ctx.callback_fn = ccp_blkcipher_done;
+ ctx.session = s;
+ ctx.callback_arg = crp;
+
+ return (ccp_do_blkcipher(qp, s, crp, crd, &ctx));
+}
+
+static void
+ccp_authenc_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp,
+ int error)
+{
+ struct cryptodesc *crda;
+ struct cryptop *crp;
+
+ /* TODO: zero out sensitive blkcipher fields */
+
+ crp = vcrp;
+ if (s->cipher_first)
+ crda = crp->crp_desc->crd_next;
+ else
+ crda = crp->crp_desc;
+
+ ccp_do_hmac_done(qp, s, crp, crda, error);
+}
+
+int
+ccp_authenc(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp,
+ struct cryptodesc *crda, struct cryptodesc *crde)
+{
+ struct ccp_completion_ctx ctx;
+ int error;
+
+ ctx.callback_fn = ccp_authenc_done;
+ ctx.session = s;
+ ctx.callback_arg = crp;
+
+ /* Perform first operation */
+ if (crp->crp_desc == crde) {
+ s->cipher_first = true;
+ error = ccp_do_blkcipher(qp, s, crp, crde, NULL);
+ } else {
+ s->cipher_first = false;
+ error = ccp_do_hmac(qp, s, crp, crda, NULL);
+ }
+ if (error != 0)
+ return (error);
+
+ /* Perform second operation */
+ if (crp->crp_desc == crde)
+ error = ccp_do_hmac(qp, s, crp, crda, &ctx);
+ else
+ error = ccp_do_blkcipher(qp, s, crp, crde, &ctx);
+ return (error);
+}
+
+int
+ccp_gcm(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp,
+ struct cryptodesc *crda, struct cryptodesc *crde)
+{
+ device_printf(qp->cq_softc->dev, "%s not supported\n", __func__);
+#if 0
+ char iv[CHCR_MAX_CRYPTO_IV_LEN];
+ struct chcr_wr *crwr;
+ struct wrqe *wr;
+ char *dst;
+ u_int iv_len, iv_loc, kctx_len, op_type, transhdr_len, wr_len;
+ u_int hash_size_in_response, imm_len;
+ u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
+ u_int hmac_ctrl, input_len;
+ int dsgl_nsegs, dsgl_len;
+ int sgl_nsegs, sgl_len;
+ int error;
+
+ if (s->blkcipher.key_len == 0)
+ return (EINVAL);
+
+ /*
+ * AAD is only permitted before the cipher/plain text, not
+ * after.
+ */
+ if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
+ return (EINVAL);
+
+ hash_size_in_response = s->gmac.hash_len;
+
+ /*
+ * The IV is always stored at the start of the buffer even
+ * though it may be duplicated in the payload. The crypto
+ * engine doesn't work properly if the IV offset points inside
+ * of the AAD region, so a second copy is always required.
+ *
+ * The IV for GCM is further complicated in that IPSec
+ * provides a full 16-byte IV (including the counter), whereas
+ * the /dev/crypto interface sometimes provides a full 16-byte
+ * IV (if no IV is provided in the ioctl) and sometimes a
+ * 12-byte IV (if the IV was explicit). For now the driver
+ * always assumes a 12-byte IV and initializes the low 4 byte
+ * counter to 1.
+ */
+ iv_loc = IV_IMMEDIATE;
+ if (crde->crd_flags & CRD_F_ENCRYPT) {
+ op_type = CHCR_ENCRYPT_OP;
+ if (crde->crd_flags & CRD_F_IV_EXPLICIT)
+ memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
+ else
+ arc4rand(iv, s->blkcipher.iv_len, 0);
+ if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ crde->crd_inject, s->blkcipher.iv_len, iv);
+ } else {
+ op_type = CHCR_DECRYPT_OP;
+ if (crde->crd_flags & CRD_F_IV_EXPLICIT)
+ memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
+ else
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ crde->crd_inject, s->blkcipher.iv_len, iv);
+ }
+
+ /*
+ * If the input IV is 12 bytes, append an explicit counter of
+ * 1.
+ */
+ if (s->blkcipher.iv_len == 12) {
+ *(uint32_t *)&iv[12] = htobe32(1);
+ iv_len = AES_BLOCK_LEN;
+ } else
+ iv_len = s->blkcipher.iv_len;
+
+ /*
+ * The output buffer consists of the cipher text followed by
+ * the tag when encrypting. For decryption it only contains
+ * the plain text.
+ */
+ if (op_type == CHCR_ENCRYPT_OP) {
+ if (crde->crd_len + hash_size_in_response > MAX_REQUEST_SIZE)
+ return (EFBIG);
+ } else {
+ if (crde->crd_len > MAX_REQUEST_SIZE)
+ return (EFBIG);
+ }
+ sglist_reset(qp->cq_sg_dst);
+ error = sglist_append_sglist(qp->cq_sg_dst, qp->cq_sg_crp, crde->crd_skip,
+ crde->crd_len);
+ if (error != 0)
+ return (error);
+ if (op_type == CHCR_ENCRYPT_OP) {
+ error = sglist_append_sglist(qp->cq_sg_dst, qp->cq_sg_crp,
+ crda->crd_inject, hash_size_in_response);
+ if (error != 0)
+ return (error);
+ }
+ dsgl_nsegs = ccp_count_sgl(qp->cq_sg_dst, DSGL_SGE_MAXLEN);
+ if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
+ return (EFBIG);
+ dsgl_len = ccp_phys_dsgl_len(dsgl_nsegs);
+
+ /*
+ * The 'key' part of the key context consists of the key followed
+ * by the Galois hash key.
+ */
+ kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
+
+ /*
+ * The input buffer consists of the IV, any AAD, and then the
+ * cipher/plain text. For decryption requests the hash is
+ * appended after the cipher text.
+ */
+ input_len = crda->crd_len + crde->crd_len;
+ if (op_type == CHCR_DECRYPT_OP)
+ input_len += hash_size_in_response;
+ if (input_len > MAX_REQUEST_SIZE)
+ return (EFBIG);
+ if (ccp_use_imm_data(transhdr_len, iv_len + input_len)) {
+ imm_len = input_len;
+ sgl_nsegs = 0;
+ sgl_len = 0;
+ } else {
+ imm_len = 0;
+ sglist_reset(qp->cq_sg_ulptx);
+ if (crda->crd_len != 0) {
+ error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
+ crda->crd_skip, crda->crd_len);
+ if (error != 0)
+ return (error);
+ }
+ error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
+ crde->crd_skip, crde->crd_len);
+ if (error != 0)
+ return (error);
+ if (op_type == CHCR_DECRYPT_OP) {
+ error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
+ crda->crd_inject, hash_size_in_response);
+ if (error != 0)
+ return (error);
+ }
+ sgl_nsegs = qp->cq_sg_ulptx->sg_nseg;
+ sgl_len = ccp_ulptx_sgl_len(sgl_nsegs);
+ }
+
+ if (crda->crd_len != 0) {
+ aad_start = iv_len + 1;
+ aad_stop = aad_start + crda->crd_len - 1;
+ } else {
+ aad_start = 0;
+ aad_stop = 0;
+ }
+ cipher_start = iv_len + crda->crd_len + 1;
+ if (op_type == CHCR_DECRYPT_OP)
+ cipher_stop = hash_size_in_response;
+ else
+ cipher_stop = 0;
+ if (op_type == CHCR_DECRYPT_OP)
+ auth_insert = hash_size_in_response;
+ else
+ auth_insert = 0;
+
+ wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
+ if (iv_loc == IV_IMMEDIATE)
+ wr_len += iv_len;
+ wr = alloc_wrqe(wr_len, sc->txq);
+ if (wr == NULL) {
+ sc->stats_wr_nomem++;
+ return (ENOMEM);
+ }
+ crwr = wrtod(wr);
+ memset(crwr, 0, wr_len);
+
+ ccp_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len,
+ 0, iv_loc, crp);
+
+ /* XXX: Hardcodes SGE loopback channel of 0. */
+ crwr->sec_cpl.op_ivinsrtofst = htobe32(
+ V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
+ V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
+ V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
+ V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
+ V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
+
+ crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
+
+ /*
+ * NB: cipherstop is explicitly set to 0. On encrypt it
+ * should normally be set to 0 anyway (as the encrypt crd ends
+ * at the end of the input). However, for decrypt the cipher
+ * ends before the tag in the AUTHENC case (and authstop is
+ * set to stop before the tag), but for GCM the cipher still
+ * runs to the end of the buffer. Not sure if this is
+ * intentional or a firmware quirk, but it is required for
+ * working tag validation with GCM decryption.
+ */
+ crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
+ V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
+ V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
+ V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
+ V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
+ crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
+ V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
+ V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
+ V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
+ V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ hmac_ctrl = ccp_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
+ crwr->sec_cpl.seqno_numivs = htobe32(
+ V_SCMD_SEQ_NO_CTRL(0) |
+ V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
+ V_SCMD_ENC_DEC_CTRL(op_type) |
+ V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
+ V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
+ V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_GHASH) |
+ V_SCMD_HMAC_CTRL(hmac_ctrl) |
+ V_SCMD_IV_SIZE(iv_len / 2) |
+ V_SCMD_NUM_IVS(0));
+ crwr->sec_cpl.ivgen_hdrlen = htobe32(
+ V_SCMD_IV_GEN_CTRL(0) |
+ V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
+ V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
+
+ crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
+ memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
+ dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
+ memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
+
+ dst = (char *)(crwr + 1) + kctx_len;
+ ccp_write_phys_dsgl(sc, dst, dsgl_nsegs);
+ dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
+ if (iv_loc == IV_IMMEDIATE) {
+ memcpy(dst, iv, iv_len);
+ dst += iv_len;
+ }
+ if (imm_len != 0) {
+ if (crda->crd_len != 0) {
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ crda->crd_skip, crda->crd_len, dst);
+ dst += crda->crd_len;
+ }
+ crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
+ crde->crd_len, dst);
+ dst += crde->crd_len;
+ if (op_type == CHCR_DECRYPT_OP)
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ crda->crd_inject, hash_size_in_response, dst);
+ } else
+ ccp_write_ulptx_sgl(sc, dst, sgl_nsegs);
+
+ /* XXX: TODO backpressure */
+ t4_wrq_tx(sc->adapter, wr);
+
+#endif
+ return (ENXIO);
+}
+
+#if 0
+static int
+ccp_gcm_done(struct ccp_softc *sc, struct ccp_session *s,
+ struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
+{
+
+ /*
+ * The updated IV to permit chained requests is at
+ * cpl->data[2], but OCF doesn't permit chained requests.
+ *
+ * Note that the hardware should always verify the GMAC hash.
+ */
+ return (error);
+}
+#endif
+
+#define MAX_TRNG_RETRIES 10
+u_int
+random_ccp_read(void *v, u_int c)
+{
+ uint32_t *buf;
+ u_int i, j;
+
+ KASSERT(c % sizeof(*buf) == 0, ("%u not multiple of u_long", c));
+
+ buf = v;
+ for (i = c; i > 0; i -= sizeof(*buf)) {
+ for (j = 0; j < MAX_TRNG_RETRIES; j++) {
+ *buf = ccp_read_4(g_ccp_softc, TRNG_OUT_OFFSET);
+ if (*buf != 0)
+ break;
+ }
+ if (j == MAX_TRNG_RETRIES)
+ return (0);
+ buf++;
+ }
+ return (c);
+
+}
+
+#ifdef DDB
+void
+db_ccp_show_hw(struct ccp_softc *sc)
+{
+
+ db_printf(" queue mask: 0x%x\n",
+ ccp_read_4(sc, CMD_QUEUE_MASK_OFFSET));
+ db_printf(" queue prio: 0x%x\n",
+ ccp_read_4(sc, CMD_QUEUE_PRIO_OFFSET));
+ db_printf(" reqid: 0x%x\n", ccp_read_4(sc, CMD_REQID_CONFIG_OFFSET));
+ db_printf(" trng output: 0x%x\n", ccp_read_4(sc, TRNG_OUT_OFFSET));
+ db_printf(" cmd timeout: 0x%x\n",
+ ccp_read_4(sc, CMD_CMD_TIMEOUT_OFFSET));
+ db_printf(" lsb public mask lo: 0x%x\n",
+ ccp_read_4(sc, LSB_PUBLIC_MASK_LO_OFFSET));
+ db_printf(" lsb public mask hi: 0x%x\n",
+ ccp_read_4(sc, LSB_PUBLIC_MASK_HI_OFFSET));
+ db_printf(" lsb private mask lo: 0x%x\n",
+ ccp_read_4(sc, LSB_PRIVATE_MASK_LO_OFFSET));
+ db_printf(" lsb private mask hi: 0x%x\n",
+ ccp_read_4(sc, LSB_PRIVATE_MASK_HI_OFFSET));
+ db_printf(" version: 0x%x\n", ccp_read_4(sc, VERSION_REG));
+}
+
+void
+db_ccp_show_queue_hw(struct ccp_queue *qp)
+{
+ struct ccp_softc *sc;
+ unsigned q;
+
+ sc = qp->cq_softc;
+ q = qp->cq_qindex;
+
+ db_printf(" qcontrol: 0x%x\n",
+ ccp_read_queue_4(sc, q, CMD_Q_CONTROL_BASE));
+ db_printf(" tail_lo: 0x%x\n",
+ ccp_read_queue_4(sc, q, CMD_Q_TAIL_LO_BASE));
+ db_printf(" head_lo: 0x%x\n",
+ ccp_read_queue_4(sc, q, CMD_Q_HEAD_LO_BASE));
+ db_printf(" int enable: 0x%x\n",
+ ccp_read_queue_4(sc, q, CMD_Q_INT_ENABLE_BASE));
+ db_printf(" interrupt status: 0x%x\n",
+ ccp_read_queue_4(sc, q, CMD_Q_INTERRUPT_STATUS_BASE));
+ db_printf(" status: 0x%x\n",
+ ccp_read_queue_4(sc, q, CMD_Q_STATUS_BASE));
+ db_printf(" int stats: 0x%x\n",
+ ccp_read_queue_4(sc, q, CMD_Q_INT_STATUS_BASE));
+}
+#endif
Index: sys/crypto/ccp/ccp_lsb.h
===================================================================
--- /dev/null
+++ sys/crypto/ccp/ccp_lsb.h
@@ -0,0 +1,40 @@
+/*-
+ * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#pragma once
+
+#define LSB_ENTRY_SIZE 32 /* bytes, or 256 bits */
+#define LSB_REGION_LENGTH 16 /* entries */
+
+/* For now, just statically allocate some LSB entries for specific purposes. */
+#define LSB_ENTRY_KEY 0
+#define LSB_ENTRY_IV 2
+#define LSB_ENTRY_SHA 4
+#define LSB_ENTRY_HMAC 6
+
+void ccp_queue_decode_lsb_regions(struct ccp_softc *sc, uint64_t lsbmask,
+ unsigned queue);
+void ccp_assign_lsb_regions(struct ccp_softc *sc, uint64_t lsbmask);
Index: sys/crypto/ccp/ccp_lsb.c
===================================================================
--- /dev/null
+++ sys/crypto/ccp/ccp_lsb.c
@@ -0,0 +1,115 @@
+/*-
+ * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/module.h>
+#include <sys/random.h>
+#include <sys/rman.h>
+#include <sys/sglist.h>
+#include <sys/sysctl.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <dev/random/randomdev.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <opencrypto/cryptodev.h>
+#include <opencrypto/xform.h>
+
+#include "cryptodev_if.h"
+
+#include "ccp.h"
+#include "ccp_lsb.h"
+
+void
+ccp_queue_decode_lsb_regions(struct ccp_softc *sc, uint64_t lsbmask,
+ unsigned queue)
+{
+ struct ccp_queue *qp;
+ unsigned i;
+
+ qp = &sc->queues[queue];
+
+ qp->lsb_mask = 0;
+
+ for (i = 0; i < MAX_LSB_REGIONS; i++) {
+ if (((1 << queue) & lsbmask) != 0)
+ qp->lsb_mask |= (1 << i);
+ lsbmask >>= MAX_HW_QUEUES;
+ }
+
+ /*
+ * Ignore region 0, which has special entries that cannot be used
+ * generally.
+ */
+ qp->lsb_mask &= ~(1 << 0);
+}
+
+/*
+ * Look for a private LSB for each queue. There are 7 general purpose LSBs
+ * total and 5 queues. PSP will reserve some of both. Firmware limits some
+ * queues' access to some LSBs; we hope it is fairly sane and just use a dumb
+ * greedy algorithm to assign LSBs to queues.
+ */
+void
+ccp_assign_lsb_regions(struct ccp_softc *sc, uint64_t lsbmask)
+{
+ unsigned q, i;
+
+ for (q = 0; q < nitems(sc->queues); q++) {
+ if (((1 << q) & sc->valid_queues) == 0)
+ continue;
+
+ sc->queues[q].private_lsb = -1;
+
+ /* Intentionally skip specialized 0th LSB */
+ for (i = 1; i < MAX_LSB_REGIONS; i++) {
+ if ((lsbmask &
+ (1ull << (q + (MAX_HW_QUEUES * i)))) != 0) {
+ sc->queues[q].private_lsb = i;
+ lsbmask &= ~(0x1Full << (MAX_HW_QUEUES * i));
+ break;
+ }
+ }
+
+ if (i == MAX_LSB_REGIONS) {
+ device_printf(sc->dev,
+ "Ignoring queue %u with no private LSB\n", q);
+ sc->valid_queues &= ~(1 << q);
+ }
+ }
+}
Index: sys/dev/random/random_harvestq.c
===================================================================
--- sys/dev/random/random_harvestq.c
+++ sys/dev/random/random_harvestq.c
@@ -304,6 +304,7 @@
[RANDOM_PURE_RNDTEST] = "PURE_RNDTEST",
[RANDOM_PURE_VIRTIO] = "PURE_VIRTIO",
[RANDOM_PURE_BROADCOM] = "PURE_BROADCOM",
+ [RANDOM_PURE_CCP] = "PURE_CCP",
/* "ENTROPYSOURCE" */
};
Index: sys/modules/Makefile
===================================================================
--- sys/modules/Makefile
+++ sys/modules/Makefile
@@ -79,6 +79,7 @@
cas \
${_cbb} \
cc \
+ ${_ccp} \
cd9660 \
cd9660_iconv \
${_ce} \
@@ -577,6 +578,7 @@
.endif
_cardbus= cardbus
_cbb= cbb
+_ccp= ccp
_cpuctl= cpuctl
_cpufreq= cpufreq
_cs= cs
Index: sys/modules/ccp/Makefile
===================================================================
--- /dev/null
+++ sys/modules/ccp/Makefile
@@ -0,0 +1,19 @@
+# $FreeBSD$
+
+.PATH: ${SRCTOP}/sys/crypto/ccp
+
+KMOD= ccp
+
+SRCS= ccp.c ccp_hardware.c ccp_lsb.c
+SRCS+= ccp.h ccp_hardware.h ccp_lsb.h
+SRCS+= bus_if.h
+SRCS+= device_if.h
+SRCS+= cryptodev_if.h
+SRCS+= pci_if.h
+
+CFLAGS+= -fms-extensions -Wno-microsoft-anon-tag
+
+MFILES= kern/bus_if.m kern/device_if.m opencrypto/cryptodev_if.m \
+ dev/pci/pci_if.m
+
+.include <bsd.kmod.mk>
Index: sys/sys/random.h
===================================================================
--- sys/sys/random.h
+++ sys/sys/random.h
@@ -92,6 +92,7 @@
RANDOM_PURE_RNDTEST,
RANDOM_PURE_VIRTIO,
RANDOM_PURE_BROADCOM,
+ RANDOM_PURE_CCP,
ENTROPYSOURCE
};
Index: sys/x86/include/bus.h
===================================================================
--- sys/x86/include/bus.h
+++ sys/x86/include/bus.h
@@ -118,6 +118,7 @@
#define BUS_SPACE_MAXADDR_24BIT 0xFFFFFF
#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF
#if defined(__amd64__) || defined(PAE)
+#define BUS_SPACE_MAXADDR_48BIT 0xFFFFFFFFFFFFULL
#define BUS_SPACE_MAXADDR 0xFFFFFFFFFFFFFFFFULL
#else
#define BUS_SPACE_MAXADDR 0xFFFFFFFF
Index: tests/sys/opencrypto/cryptotest.py
===================================================================
--- tests/sys/opencrypto/cryptotest.py
+++ tests/sys/opencrypto/cryptotest.py
@@ -45,9 +45,9 @@
assert os.path.exists(os.path.join(katdir, base)), "Please 'pkg install nist-kat'"
return iglob(os.path.join(katdir, base, glob))
-aesmodules = [ 'cryptosoft0', 'aesni0', 'ccr0' ]
+aesmodules = [ 'cryptosoft0', 'aesni0', 'ccr0', 'ccp0' ]
desmodules = [ 'cryptosoft0', ]
-shamodules = [ 'cryptosoft0', 'aesni0', 'ccr0' ]
+shamodules = [ 'cryptosoft0', 'aesni0', 'ccr0', 'ccp0' ]
def GenTestCase(cname):
try:
@@ -108,10 +108,16 @@
# XXX - isn't supported
continue
- c = Crypto(cryptodev.CRYPTO_AES_NIST_GCM_16,
- cipherkey,
- mac=self._gmacsizes[len(cipherkey)],
- mackey=cipherkey, crid=crid)
+ try:
+ c = Crypto(cryptodev.CRYPTO_AES_NIST_GCM_16,
+ cipherkey,
+ mac=self._gmacsizes[len(cipherkey)],
+ mackey=cipherkey, crid=crid)
+ except EnvironmentError, e:
+ # Can't test hashes the driver does not support.
+ if e.errno != errno.EOPNOTSUPP:
+ raise
+ continue
if mode == 'ENCRYPT':
rct, rtag = c.encrypt(pt, iv, aad)
@@ -189,7 +195,13 @@
if swapptct:
pt, ct = ct, pt
# run the fun
- c = Crypto(meth, cipherkey, crid=crid)
+ try:
+ c = Crypto(meth, cipherkey, crid=crid)
+ except EnvironmentError, e:
+ # Can't test hashes the driver does not support.
+ if e.errno != errno.EOPNOTSUPP:
+ raise
+ continue
r = curfun(c, pt, iv)
self.assertEqual(r, ct)
@@ -309,6 +321,7 @@
cryptosoft = GenTestCase('cryptosoft0')
aesni = GenTestCase('aesni0')
ccr = GenTestCase('ccr0')
+ccp = GenTestCase('ccp0')
if __name__ == '__main__':
unittest.main()
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Tue, Dec 24, 6:00 AM (14 h, 40 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15570367
Default Alt Text
D12723.id34166.diff (102 KB)
Attached To
Mode
D12723: Add ccp(4): experimental driver for AMD Crypto Co-Processor
Attached
Detach File
Event Timeline
Log In to Comment