Index: sys/conf/files.arm64 =================================================================== --- sys/conf/files.arm64 +++ sys/conf/files.arm64 @@ -215,6 +215,7 @@ dev/axgbe/xgbe-drv.c optional axgbe dev/axgbe/xgbe-mdio.c optional axgbe dev/pdc/pdc.c optional fdt pdc | spum +dev/spum/spum.c optional fdt spum dev/cpufreq/cpufreq_dt.c optional cpufreq fdt dev/iicbus/sy8106a.c optional sy8106a fdt dev/iicbus/twsi/mv_twsi.c optional twsi fdt Index: sys/dev/spum/spum.h =================================================================== --- /dev/null +++ sys/dev/spum/spum.h @@ -0,0 +1,178 @@ +/*- + * Copyright (c) 2019 Juniper Networks, Inc. + * Copyright (c) 2019 Semihalf. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef SPUM_H +#define SPUM_H + +#include + +#define SPUM_MAX_REQUEST_SIZE 64960 +#define SPUM_MAX_KEY_SIZE 64 +#define SPUM_MAX_HASH_SIZE 64 +#define SPUM_MAX_CRYPTO_SESSIONS 64 + +#define BIT(x) (1 << (x)) + +/* Header contests description */ +#define SPUM_SCTX_PRESENT BIT(31) +#define SPUM_BDESC_PRESENT BIT(29) +#define SPUM_MFM_PRESENT BIT(28) +#define SPUM_BD_PRESENT BIT(27) +#define SPUM_HASH_PRESENT BIT(26) +#define SPUM_SPUDT_PRESENT BIT(24) +#define SPUM_GENERIC_REQUEST BIT(16) + +/* SPDX cipher flags */ +#define SPUM_HASH_TYPE_SHIFT 8 +#define SPUM_HASH_TYPE_MASK 0x300 +#define SPUM_HASH_MODE_SHIFT 10 +#define SPUM_HASH_ALG_SHIFT 13 +#define SPUM_CIPHER_TYPE_SHIFT 16 +#define SPUM_CIPHER_TYPE_MASK 0x30000 +#define SPUM_CIPHER_MODE_SHIFT 18 +#define SPUM_CIPHER_ALG_SHIFT 21 +#define SPUM_ICV_IS_512 BIT(27) +#define SPUM_CIPHER_ORDER BIT(30) +#define SPUM_CIPHER_INBOUND BIT(31) + +/* SPDX extended_cipher_flags */ +#define SPUM_BD_SUPPRESS BIT(19) +#define SPUM_INSERT_ICV BIT(13) +#define SPUM_CHECK_ICV BIT(12) +#define SPUM_ICV_SIZE_SHIFT 8 +#define SPUM_SCTX_IV BIT(7) + +/* Hash alg identifiers */ +#define SPUM_HASH_ALG_MD5 1 +#define SPUM_HASH_ALG_SHA1 2 +#define SPUM_HASH_ALG_SHA224 3 +#define SPUM_HASH_ALG_SHA256 4 +#define SPUM_HASH_ALG_AES 5 +#define SPUM_HASH_ALG_SHA384 6 +#define SPUM_HASH_ALG_SHA512 7 + +/* Hash mode identifiers. */ +#define SPUM_HASH_MODE_HASH 0 +#define SPUM_HASH_MODE_XCBC 0 +#define SPUM_HASH_MODE_CMAC 1 +#define SPUM_HASH_MODE_HMAC 2 +#define SPUM_HASH_MODE_RABIN 4 +#define SPUM_HASH_MODE_CCM 5 +#define SPUM_HASH_MODE_GCM 6 + +/* Cipher alg identifiers */ +#define SPUM_CIPHER_ALG_RC4 1 +#define SPUM_CIPHER_ALG_DES 2 +#define SPUM_CIPHER_ALG_3DES 3 +#define SPUM_CIPHER_ALG_AES 4 + +/* Cipher mode identifiers */ +#define SPUM_CIPHER_MODE_ECB 0 +#define SPUM_CIPHER_MODE_CBC 1 +#define SPUM_CIPHER_MODE_OFB 2 +#define SPUM_CIPHER_MODE_CFB 3 +#define SPUM_CIPHER_MODE_CTR 4 +#define SPUM_CIPHER_MODE_CCM 5 +#define SPUM_CIPHER_MODE_GCM 6 +#define SPUM_CIPHER_MODE_XTS 7 + +/* Cipher type identifiers */ +#define SPUM_CIPHER_TYPE_DES 0 +#define SPUM_CIPHER_TYPE_3DES 0 +#define SPUM_CIPHER_TYPE_INIT 0 +#define SPUM_CIPHER_TYPE_UPDT 1 + +/* + * Key sizes for AES. + * They are the same for mac and enc keys. + */ +#define SPUM_KEY_TYPE_AES128 0 +#define SPUM_KEY_TYPE_AES192 1 +#define SPUM_KEY_TYPE_AES256 2 + +#define SPUM_STAT_ERROR BIT(17) +#define SPUM_STAT_ERROR_DETAIL 0xF00 +#define SPUM_STAT_ICV_MISMATCH BIT(8) + +#define SPUM_CCM_ICV_PRESENT BIT(6) +#define SPUM_CCM_ICV_SIZE_SHIFT 3 + +#define SPUM_ARC4_KEY_SIZE 260 + +struct spum_security_ctx { + uint32_t proto_and_size; /* Type and size of this structure. */ + uint32_t cipher_flags; /* Enc flags, see definitions above */ + uint32_t extended_cipher_flags; +#if 0 + uint8_t mac_key[1]; + uint8_t enc_key[1]; + uint8_t iv[1]; +#endif +} __packed; + +struct spum_buffer_desc { + uint16_t mac_offset; /* Offset to authentication data. */ + uint16_t mac_length; /* Length of authentication data. */ + uint16_t crypto_offset; /* Offset to encryption payload. */ + uint16_t crypto_length; /* Length of payload. */ + uint16_t icv_offset; /* Offset to ICV/hash */ + uint16_t iv_offset; /* Unused. We always pass IV in SCTX. */ +} __packed; + +struct spum_buffer_header { + uint16_t size; /* Total size of payload. */ + uint16_t prev_length; /* Unused. */ +} __packed; + +struct spum_crypto_session { + uint32_t enc_key_size; + uint32_t mac_key_size; + uint32_t hash_size; /* Hash size expected in response. */ + uint32_t hash_block_size; + uint32_t iv_size; + + uint8_t header_template[PDC_MAX_HEADER_SIZE]; + size_t template_size; +}; + +struct spum_sc { + device_t dev; + bool blocked; + struct mtx mtx; + int cid; +}; + +struct spum_req_ctx { + struct spum_sc *sc; + struct cryptop *crp; + struct cryptodesc *enc; + struct cryptodesc *mac; + struct spum_crypto_session *ses; + + uint8_t *aad_header_buf; +}; + +#endif /* SPUM_H */ Index: sys/dev/spum/spum.c =================================================================== --- /dev/null +++ sys/dev/spum/spum.c @@ -0,0 +1,1481 @@ +/*- + * Copyright (c) 2019 Juniper Networks, Inc. + * Copyright (c) 2019 Semihalf. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include + +#include "cryptodev_if.h" +#include "spum.h" + +//#define SPUM_DEBUG + +static int spum_is_supported_crypto(int alg); +static int spum_is_supported_hash(int alg); +static size_t spum_calc_pad_length(size_t aligment, size_t size); +static size_t spum_calc_hash_pad_length(size_t data_size, + size_t hash_block_size, int alg); + +static int spum_set_enc_key_size(struct spum_security_ctx* sctx, size_t key_size); +static int spum_set_mac_key_size(struct spum_security_ctx* sctx, size_t key_size); + +#ifdef SPUM_DEBUG +static void spum_dump_request(struct pdc_request* req); +#endif + +static void spum_cb(struct pdc_request* req); + +static void spum_process_mac_resp(struct pdc_request* req); +static void spum_process_arc4_resp(struct pdc_request* req); +static void spum_process_gcm_ccm_resp(struct pdc_request* req); + +static void spum_fill_iv(uint8_t* iv, size_t iv_len, struct spum_security_ctx* sctx, + struct cryptop* crp, struct cryptodesc* enc, struct cryptodesc* mac); +static void spum_set_rc4_key(uint8_t* dst, uint8_t* key, size_t key_size); +static int spum_set_mac_key(uint8_t* dst, uint8_t* key, size_t key_size, + size_t block_size); +static struct uio* spum_adjust_uio(struct uio* src, size_t offset); + +static int spum_create_request(device_t dev, + struct pdc_request* req, struct spum_crypto_session* ses, + struct cryptop* crp, struct cryptodesc* enc, struct cryptodesc* mac); + +static int spum_set_session_enc_params(device_t dev, struct cryptoini* enc, + struct spum_crypto_session* ses, struct spum_security_ctx* sctx); +static int spum_set_session_mac_params(device_t dev, + struct cryptoini* enc, struct cryptoini* mac, + struct spum_crypto_session* ses, struct spum_security_ctx* sctx); + +static int spum_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri); +static int spum_process(device_t dev, struct cryptop* crp, int hint); + +static int spum_attach(device_t dev); +static int spum_detach(device_t dev); +static int spum_probe(device_t dev); + +MALLOC_DECLARE(M_SPUM); +MALLOC_DEFINE(M_SPUM, "spum_memory", "memory used for metadata"); + +/* An obligotory header prepended to every request. */ +static unsigned char bcm_header[] = { + 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 +}; + +static int +spum_is_supported_crypto(int alg) +{ + + switch (alg) { + case CRYPTO_DES_CBC: + case CRYPTO_3DES_CBC: + case CRYPTO_AES_CBC: + case CRYPTO_AES_ICM: + case CRYPTO_AES_NIST_GCM_16: + case CRYPTO_ARC4: + case CRYPTO_AES_CCM_16: + return (1); + default: + return (0); + } +} + +static int +spum_is_supported_hash(int alg) +{ + + switch (alg) { + case CRYPTO_AES_CCM_CBC_MAC: + case CRYPTO_AES_128_NIST_GMAC: + case CRYPTO_AES_192_NIST_GMAC: + case CRYPTO_AES_256_NIST_GMAC: + case CRYPTO_MD5_HMAC: + case CRYPTO_SHA1_HMAC: + case CRYPTO_SHA2_224_HMAC: + case CRYPTO_SHA2_256_HMAC: + case CRYPTO_SHA2_384_HMAC: + case CRYPTO_SHA2_512_HMAC: + case CRYPTO_MD5: + case CRYPTO_SHA1: + case CRYPTO_SHA2_224: + case CRYPTO_SHA2_256: + case CRYPTO_SHA2_384: + case CRYPTO_SHA2_512: + return (1); + default: + return (0); + } +} + +static int +spum_set_enc_key_size(struct spum_security_ctx* sctx, size_t key_size) +{ + + sctx->cipher_flags &= htobe32(~SPUM_CIPHER_TYPE_MASK); + switch (key_size) { + case 128: + sctx->cipher_flags |= htobe32( + SPUM_KEY_TYPE_AES128 << SPUM_CIPHER_TYPE_SHIFT); + break; + case 192: + sctx->cipher_flags |= htobe32( + SPUM_KEY_TYPE_AES192 << SPUM_CIPHER_TYPE_SHIFT); + break; + case 256: + sctx->cipher_flags |= + htobe32(SPUM_KEY_TYPE_AES256 << SPUM_CIPHER_TYPE_SHIFT); + break; + default: + return (EINVAL); + } + + return (0); +} + +static int +spum_set_mac_key_size(struct spum_security_ctx* sctx, size_t key_size) +{ + + sctx->cipher_flags &= htobe32(~SPUM_HASH_TYPE_MASK); + switch (key_size) { + case 128: + sctx->cipher_flags |= + htobe32(SPUM_KEY_TYPE_AES128 << SPUM_HASH_TYPE_SHIFT); + break; + case 192: + sctx->cipher_flags |= + htobe32(SPUM_KEY_TYPE_AES192 << SPUM_HASH_TYPE_SHIFT); + break; + case 256: + sctx->cipher_flags |= + htobe32(SPUM_KEY_TYPE_AES256 << SPUM_HASH_TYPE_SHIFT); + break; + default: + return (EINVAL); + } + + return (0); +} + + +static inline size_t +spum_calc_pad_length(size_t aligment, size_t size) +{ + + return ((size + (aligment - 1)) & ~(aligment - 1)) - size; +} + +static inline size_t +spum_calc_hash_pad_length(size_t data_size, size_t hash_block_size, int alg) +{ + size_t pad = 0; + + if (data_size % hash_block_size == 0) + return (0); + + /* Magic byte(0x80) + size */ + pad = 1; + switch(alg) { + case CRYPTO_SHA2_384_HMAC: + case CRYPTO_SHA2_384: + case CRYPTO_SHA2_512_HMAC: + case CRYPTO_SHA2_512: + pad += 2 * sizeof(uint64_t); + break; + default: + pad = sizeof(uint64_t); + } + pad += spum_calc_pad_length(hash_block_size, data_size + pad); + + return (pad); +} + +#ifdef SPUM_DEBUG +static void +spum_dump_request(struct pdc_request* req) +{ + uint8_t *buf = (uint8_t *) req->header; + struct spum_buffer_header *bd; + struct spum_buffer_desc *bdesc; + struct spum_security_ctx *sctx; + size_t sctx_size; + uint32_t header; + + /* Skip BCM header. */ + if (memcmp(buf, bcm_header, sizeof(bcm_header)) == 0) + buf += sizeof(bcm_header); + + header = be32toh(*(uint32_t *)buf); + /* Skip packet tag, since we don't use it. */ + buf += 2 * sizeof(uint32_t); + + if (header & SPUM_SCTX_PRESENT) { + sctx = (struct spum_security_ctx *)buf; + sctx_size = 4 * be32toh(sctx->proto_and_size); + buf += sctx_size; + + printf("Dumping SCTX:\n"); + printf( + "SCTX size : %zu\n" + "Cipher flags : 0x%08x\n" + "Extended cipher flags: 0x%08x\n", + sctx_size, + be32toh(sctx->cipher_flags), + be32toh(sctx->extended_cipher_flags)); + + printf("Raw dump of key(s) and IV:\n"); + hexdump((uint8_t *)sctx + sizeof(struct spum_security_ctx), + sctx_size - (3 * sizeof(uint32_t)), NULL, 0); + } + + if (header & SPUM_BDESC_PRESENT) { + bdesc = (struct spum_buffer_desc *)buf; + buf += sizeof(struct spum_buffer_desc); + + printf("Dumping BDESC:\n"); + printf( + "mac offset : %u\n" + "mac length : %u\n" + "crypto offset: %u\n" + "crypto length: %u\n" + "icv offset : %u\n" + "iv offset : %u\n", + be16toh(bdesc->mac_offset), + be16toh(bdesc->mac_length), + be16toh(bdesc->crypto_offset), + be16toh(bdesc->crypto_length), + be16toh(bdesc->icv_offset), + be16toh(bdesc->iv_offset)); + } + + if (header & SPUM_BD_PRESENT) { + bd = (struct spum_buffer_header *)buf; + + printf("Dumping BD:\n"); + printf( + "Size : %u\n" + "Prev Length: %u\n", + be16toh(bd->size), + be16toh(bd->prev_length)); + } + + printf("Data length: %zd\n", req->data_len); + printf("Raw footer dump:\n"); + hexdump(req->footer, MAX(req->footer_len, req->resp_footer_len), + NULL, 0); +} +#endif + +static void +spum_cb(struct pdc_request* req) +{ + struct spum_req_ctx *ctx = (struct spum_req_ctx *)req->arg; + uint32_t rc; + size_t aad_pad_len; + bool blocked; + +#ifdef SPUM_DEBUG + printf("%s: dumping req\n",__func__); + spum_dump_request(req); +#endif + + rc = *((uint32_t *)(req->footer + req->resp_footer_len - 4)); + rc = htobe32(rc); + + if (ctx->enc != NULL && + (ctx->enc->crd_alg == CRYPTO_AES_NIST_GCM_16 || + ctx->enc->crd_alg == CRYPTO_AES_CCM_16)) { + aad_pad_len = spum_calc_pad_length(16, ctx->mac->crd_len); + } else { + aad_pad_len = 0; + } + + if (rc & SPUM_STAT_ERROR) { + if ((rc & SPUM_STAT_ERROR_DETAIL) == SPUM_STAT_ICV_MISMATCH) { + /* + * We don't really want to print an error here, + * as we could be flooded with incorrect ESP packets. + */ + ctx->crp->crp_etype = EACCES; + } else { + device_printf(ctx->sc->dev, + "SPU request failed with: 0x%08x\n", + rc); + ctx->crp->crp_etype = EIO; + } + goto out; + } + + if (ctx->enc != NULL) { + switch (ctx->enc->crd_alg) { + case CRYPTO_AES_NIST_GCM_16: + case CRYPTO_AES_CCM_16: + spum_process_gcm_ccm_resp(req); + case CRYPTO_ARC4: + spum_process_arc4_resp(req); + break; + default: + break; + } + } + if (ctx->mac != NULL) + spum_process_mac_resp(req); + +out: + if (aad_pad_len != 0) { + if (ctx->crp->crp_flags & CRYPTO_F_IMBUF) { + free(ctx->aad_header_buf, M_SPUM); + } else if (ctx->crp->crp_flags & CRYPTO_F_IOV) { + free(req->data, M_IOV); + } + } + + crypto_done(ctx->crp); + mtx_lock(&ctx->sc->mtx); + blocked = ctx->sc->blocked; + ctx->sc->blocked = 0; + mtx_unlock(&ctx->sc->mtx); + if (blocked) + crypto_unblock(ctx->sc->cid, blocked); + + free(ctx, M_SPUM); +} + +static void +spum_process_mac_resp(struct pdc_request* req) +{ + struct spum_req_ctx *ctx = (struct spum_req_ctx *)req->arg; + size_t data_pad_len, icv_pad_len; + + if (ctx->enc == NULL) { + data_pad_len = spum_calc_hash_pad_length(ctx->mac->crd_len, + ctx->ses->hash_block_size, ctx->mac->crd_alg); + icv_pad_len = 0; + } else { + data_pad_len = 0; + icv_pad_len = spum_calc_pad_length(4, + ctx->mac->crd_len + ctx->enc->crd_len); + } + + /* For AES-GCM/CCM ICV has already been copied. */ + if (ctx->enc != NULL && + (ctx->enc->crd_alg == CRYPTO_AES_NIST_GCM_16 || + ctx->enc->crd_alg == CRYPTO_AES_CCM_16)) + return; + + if (ctx->ses->hash_size != 0) + crypto_copyback(ctx->crp->crp_flags, + ctx->crp->crp_buf, ctx->mac->crd_inject, + ctx->ses->hash_size, + req->footer + data_pad_len + icv_pad_len); +} + +static void +spum_process_arc4_resp(struct pdc_request *req) +{ + struct spum_req_ctx *ctx = (struct spum_req_ctx *)req->arg; + + /* + * The only known consumer of this algorithm is openssl. + * It starts a session for each encrypted/decrypted file. + * The files is passed in 4KB chunks. + * Update the key basing on SUPDT field, assuming the above. + */ + memcpy(req->header + ctx->ses->template_size - ctx->ses->enc_key_size, + req->footer, + ctx->ses->enc_key_size); +} + +static void +spum_process_gcm_ccm_resp(struct pdc_request* req) +{ + struct spum_req_ctx *ctx = (struct spum_req_ctx *)req->arg; + size_t aad_pad_len, data_pad_len, icv_pad_len; + struct mbuf **mp; + + if (ctx->enc->crd_alg == CRYPTO_AES_NIST_GCM_16) { + data_pad_len = spum_calc_pad_length(16, ctx->enc->crd_len); + aad_pad_len = spum_calc_pad_length(16, ctx->mac->crd_len); + icv_pad_len = 0; + } else { + data_pad_len = spum_calc_pad_length(16, ctx->enc->crd_len); + aad_pad_len = spum_calc_pad_length(16, ctx->mac->crd_len + 2); + icv_pad_len = spum_calc_pad_length(4, + ctx->mac->crd_len + aad_pad_len + + ctx->enc->crd_len + data_pad_len); + } + + /* Restore mbuf to its original state. */ + if (aad_pad_len != 0 && ctx->crp->crp_flags & CRYPTO_F_IMBUF) { + mp = (struct mbuf **)&ctx->crp->crp_buf; + M_PREPEND(*mp, ctx->enc->crd_skip, M_WAITOK); + m_copyback(*mp, + 0, + ctx->enc->crd_skip, + ctx->aad_header_buf); + } + + /* + * Since ICV is verified by hardware + * copy it back only if we are encrypting. + */ + if ((ctx->enc->crd_flags & CRD_F_ENCRYPT) != 0) + crypto_copyback(ctx->crp->crp_flags, + ctx->crp->crp_buf, ctx->mac->crd_inject, + ctx->ses->hash_size, + req->footer + data_pad_len + icv_pad_len); +} + +static void +spum_fill_iv(uint8_t *iv, size_t iv_size, struct spum_security_ctx *sctx, + struct cryptop *crp, struct cryptodesc *enc, struct cryptodesc *mac) +{ + + /* + * AES-CCM requires some special treatment. + * Cryptodev sends something refered to as nonce instead of IV. + * We need to construct the proper IV as defined in specification: + * 1 byte flag + nonce + payload size + */ + if (enc->crd_alg == CRYPTO_AES_CCM_16) { + /* Length of the payload size field - 1. */ + iv[0] = 16 - AES_CCM_IV_LEN - 1 - 1; + /* Length of ICV. */ + iv[0] |= SPUM_CCM_ICV_PRESENT | + ((AES_CBC_MAC_HASH_LEN - 2) / 2 << SPUM_CCM_ICV_SIZE_SHIFT); + + /* + * Payload size. + * We have to fit it in 3 bytes, but since request length + * is at most 2^16-16 it's not a problem. + */ + iv[15] = enc->crd_len & 0xFF; + iv[14] = (enc->crd_len >> 8) & 0xFF; + iv[13] = 0; + + iv_size = AES_CCM_IV_LEN; + iv += 1; + } + + if (enc->crd_flags & CRD_F_ENCRYPT) { + if (enc->crd_flags & CRD_F_IV_EXPLICIT) + memcpy(iv, enc->crd_iv, iv_size); + else + arc4rand(iv, iv_size, 0); + + /* Copy it back to the request. */ + if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0) + crypto_copyback(crp->crp_flags, crp->crp_buf, + enc->crd_inject, iv_size, iv); + + /* For AES-GCM write ICV as a part of response. */ + if (enc->crd_alg == CRYPTO_AES_NIST_GCM_16 || + enc->crd_alg == CRYPTO_AES_CCM_16) + sctx->extended_cipher_flags |= htobe32(SPUM_INSERT_ICV); + } else { + sctx->cipher_flags |= htobe32(SPUM_CIPHER_INBOUND); + + if (enc->crd_flags & CRD_F_IV_EXPLICIT) + memcpy(iv, enc->crd_iv, iv_size); + else + crypto_copydata(crp->crp_flags, crp->crp_buf, + enc->crd_inject, iv_size, iv); + + /* For AES-GCM/CCM verify ICV. */ + if (enc->crd_alg == CRYPTO_AES_NIST_GCM_16 || + enc->crd_alg == CRYPTO_AES_CCM_16) + sctx->extended_cipher_flags |= htobe32(SPUM_CHECK_ICV); + } +} + +static void +spum_set_rc4_key(uint8_t *dst, uint8_t *key, size_t key_size) +{ + size_t i; + + memset(dst, 0, 4); + dst += 4; + + for (i = 4; i < SPUM_ARC4_KEY_SIZE; i++) + dst[i] = key[(i - 4) % key_size]; +} + +static int +spum_set_mac_key(uint8_t* dst, uint8_t* key, size_t key_size, size_t block_size) +{ + + if (key_size == 0) + return (0); + + if (key_size <= block_size) { + memcpy(dst, key, key_size); + memset(dst + key_size, 0, block_size - key_size); + return (0); + } + + /* XXX: SPUM can't handle HMAC operations when key_size > block_size. */ + return (EINVAL); +} + +static struct uio* +spum_adjust_uio(struct uio *src, size_t offset) +{ + struct uio *uio; + uint8_t *data; + + uio = cloneuio(src); + if (uio == NULL) + return (NULL); + + while (offset > 0) { + if (uio->uio_iovcnt == 0) { + free(uio, M_IOV); + return (NULL); + } + + if (offset >= uio->uio_iov[0].iov_len) { + offset -= uio->uio_iov[0].iov_len; + uio->uio_resid -= uio->uio_iov[0].iov_len; + uio->uio_iovcnt--; + uio->uio_iov++; + } else { + data = (uint8_t *)uio->uio_iov[0].iov_base; + data += offset; + uio->uio_iov[0].iov_base = data; + uio->uio_iov[0].iov_len -= offset; + uio->uio_resid -= offset; + offset = 0; + } + } + + return (uio); +} + +static int +spum_create_request(device_t dev, struct pdc_request *req, + struct spum_crypto_session *ses, struct cryptop *crp, + struct cryptodesc *enc, struct cryptodesc *mac) +{ + struct spum_security_ctx *sctx; + struct spum_buffer_desc *bdesc; + struct spum_buffer_header *bd; + struct spum_req_ctx *ctx; + struct uio *uio; + struct mbuf *m; + uint8_t *data; + uint64_t *hash_len; + size_t iv_offset; + size_t data_pad_len, aad_pad_len, stat_pad_len, icv_pad_len; + int rc; + + memcpy(req->header, ses->header_template, ses->template_size); + iv_offset = ses->template_size; + + sctx = (struct spum_security_ctx*) (req->header + + sizeof(bcm_header) + + 2 * sizeof(uint32_t)); + bdesc = (struct spum_buffer_desc*) (req->header + + ses->template_size + + ses->iv_size); + bd = (struct spum_buffer_header*) ((uint8_t*) bdesc + + sizeof(struct spum_buffer_desc)); + + /* If a different key was specified for this request copy it. */ + if (enc != NULL && enc->crd_flags & CRD_F_KEY_EXPLICIT) { + if (enc->crd_alg == CRYPTO_ARC4) + return (EINVAL); + + /* We don't support changing the key size here. */ + if (enc->crd_klen != ses->enc_key_size / 8) { + device_printf(dev, + "Can't change key size during crypto session"); + return (EINVAL); + } + + memcpy(req->header + ses->template_size - ses->enc_key_size, + enc->crd_key, + ses->enc_key_size); + } + if (mac != NULL && (mac->crd_flags & CRD_F_KEY_EXPLICIT)) { + rc = spum_set_mac_key(req->header + ses->template_size - + ses->mac_key_size - ses->enc_key_size, + mac->crd_key, mac->crd_klen / 8, + ses->hash_block_size); + if (rc != 0) { + device_printf(sc->dev, + "Failed to set mac key\n"); + return (EINVAL); + } + } + + ctx = malloc(sizeof(struct spum_req_ctx), M_SPUM, M_NOWAIT); + if (ctx == NULL) + return (ENOMEM); + + ctx->crp = crp; + ctx->enc = enc; + ctx->mac = mac; + ctx->ses = ses; + ctx->sc = device_get_softc(dev); + ctx->aad_header_buf = NULL; + + req->flags = crp->crp_flags; + req->pdc_callback = spum_cb; + req->arg = ctx; + + if (enc != NULL) + spum_fill_iv(req->header + iv_offset, ses->iv_size, sctx, + crp, enc, mac); + + /* + * XXX: There are different padding requirements for different + * encryption algorithms. + */ + if (enc != NULL) { + switch (enc->crd_alg) { + case CRYPTO_AES_NIST_GCM_16: + aad_pad_len = spum_calc_pad_length(16, mac->crd_len); + data_pad_len = spum_calc_pad_length(16, enc->crd_len); + icv_pad_len = 0; + break; + case CRYPTO_AES_CCM_16: + /* For CCM, len + 2 needs to be 128-bit aligned. */ + if (mac->crd_len != 0) { + aad_pad_len = spum_calc_pad_length(16, + mac->crd_len + 2); + } else { + aad_pad_len = 0; + } + + data_pad_len = spum_calc_pad_length(16, enc->crd_len); + icv_pad_len = spum_calc_pad_length(4, + mac->crd_len + aad_pad_len + + enc->crd_len + data_pad_len); + break; + default: + data_pad_len = 0; + aad_pad_len = 0; + if (mac != NULL) { + icv_pad_len = spum_calc_pad_length(4, + mac->crd_len + enc->crd_len); + } else { + icv_pad_len = 0; + } + } + if (mac != NULL) { + stat_pad_len = spum_calc_pad_length(4, + mac->crd_len + aad_pad_len + + enc->crd_len + data_pad_len + + icv_pad_len + ses->hash_size); + } else { + stat_pad_len = spum_calc_pad_length(4, crp->crp_ilen); + } + } else { + data_pad_len = spum_calc_hash_pad_length(mac->crd_len, + ses->hash_block_size, mac->crd_alg); + aad_pad_len = 0; + stat_pad_len = 0; + icv_pad_len = 0; + } + + /* + * For AES-GCM/CCM the following fields are used for something different + * then their description suggests: iv_offset -> length of AAD + * mac_length -> length of data + length of AAD + padding + */ + if (mac != NULL) { + bdesc->mac_offset = (uint16_t) mac->crd_skip; + if (enc != NULL && + (enc->crd_alg == CRYPTO_AES_NIST_GCM_16 || + enc->crd_alg == CRYPTO_AES_CCM_16)) { + bdesc->iv_offset = (uint16_t) mac->crd_len; + bdesc->mac_length = + (uint16_t) mac->crd_len + + enc->crd_len + + aad_pad_len + + data_pad_len; + bdesc->icv_offset = + (uint16_t) mac->crd_inject + + aad_pad_len + + data_pad_len + + icv_pad_len; + } else { + bdesc->mac_length = (uint16_t)mac->crd_len; + bdesc->iv_offset = 0; + bdesc->icv_offset = 0; + } + } + + if (enc != NULL) { + bdesc->crypto_offset = (uint16_t) enc->crd_skip; + bdesc->crypto_length = (uint16_t) enc->crd_len; + } + + /* + * Total length of payload. Includes AAD bits, padding and data. + * Note that ICV and its padding are not included here. + */ + bd->size = (uint16_t) crp->crp_ilen + aad_pad_len + data_pad_len; + bd->prev_length = 0; + + req->header_len = + iv_offset + ses->iv_size + + sizeof(struct spum_buffer_desc) + + sizeof(struct spum_buffer_header); + + req->data = crp->crp_buf; + req->data_len = crp->crp_ilen; + + req->resp_header_len = 3 * sizeof(uint32_t); + + /* + * Footer contains the following: Data padding, ICV/HASH, STAT(1 word). + * RX footer can be bigger for hash requests. + */ + req->footer_len = + data_pad_len + icv_pad_len + + stat_pad_len + sizeof(uint32_t); + req->resp_footer_len = req->footer_len; + + if (mac != NULL) + req->resp_footer_len += ses->hash_size; + + /* Reserve space for SUPDT field */ + if (enc != NULL && enc->crd_alg == CRYPTO_ARC4) + req->resp_footer_len += SPUM_ARC4_KEY_SIZE; + + memset(req->footer, 0, MAX(req->footer_len, req->resp_footer_len)); + + /* + * Copy the ICV if we are decrypting, + * so that hardware can verify it. + */ + if (enc != NULL && + (enc->crd_alg == CRYPTO_AES_NIST_GCM_16 || + enc->crd_alg == CRYPTO_AES_CCM_16)) { + if ((enc->crd_flags & CRD_F_ENCRYPT) == 0) { + crypto_copydata(crp->crp_flags, crp->crp_buf, + mac->crd_inject, + ses->hash_size, + req->footer + data_pad_len + icv_pad_len); + } + req->footer_len += ses->hash_size; + + if (mac->crd_inject < crp->crp_ilen) { + req->data_len -= ses->hash_size; + } else { + bd->size += ses->hash_size; + } + } + + /* + * Normally the padding should be all zeroes. + * The only exception are pure hashing requests, + * where we inject a magic byte(0x80) together with the actual size. + */ + if (data_pad_len != 0 && enc == NULL) { + *(uint8_t*)req->footer = 0x80; + hash_len = (uint64_t *)(req->footer + + data_pad_len - sizeof(uint64_t)); + + if (mac->crd_alg == CRYPTO_MD5_HMAC || + mac->crd_alg == CRYPTO_MD5) { + *hash_len = htole64(mac->crd_len); + } else { + *hash_len = htobe64(mac->crd_len); + } + } + + /* + * WORKAROUND: HW requires AAD to be padded to 16 bytes. + * Since AAD is passed to us in the same buffer as data, we copy it to + * request header and strip all the bytes before data from buffer. + * They will be later copied back in callback. + */ + if (enc != NULL && + (enc->crd_alg == CRYPTO_AES_NIST_GCM_16 || + enc->crd_alg == CRYPTO_AES_CCM_16) && + aad_pad_len != 0) { + req->data_len -= enc->crd_skip; + req->resp_header_len += mac->crd_len + aad_pad_len; + + bdesc->mac_offset = 0; + bdesc->crypto_offset = mac->crd_len + aad_pad_len; + bdesc->icv_offset = bdesc->mac_length + icv_pad_len; + + bd->size = bdesc->mac_length + ses->hash_size; + + crypto_copydata(crp->crp_flags, crp->crp_buf, + mac->crd_skip, mac->crd_len, &req->header[req->header_len]); + req->header_len += mac->crd_len; + + memset(&(req->header[req->header_len]), 0, aad_pad_len); + req->header_len += aad_pad_len; + + if (crp->crp_flags & CRYPTO_F_IMBUF) { + m = (struct mbuf *)crp->crp_buf; + ctx->aad_header_buf = malloc(enc->crd_skip, + M_SPUM, M_NOWAIT); + if (ctx->aad_header_buf == NULL) { + rc = ENOMEM; + goto out; + } + m_copydata(m, 0, enc->crd_skip, ctx->aad_header_buf); + m_adj(m, enc->crd_skip); + } else if (crp->crp_flags & CRYPTO_F_IOV) { + uio = (struct uio*)crp->crp_buf; + req->data = spum_adjust_uio(uio, enc->crd_skip); + if (req->data == NULL) { + rc = ENOMEM; + goto out; + } + } else { + data = (uint8_t *)crp->crp_buf; + req->data = data + enc->crd_skip; + } + } + + bdesc->mac_offset = htobe16(bdesc->mac_offset); + bdesc->mac_length = htobe16(bdesc->mac_length); + bdesc->iv_offset = htobe16(bdesc->iv_offset); + bdesc->icv_offset = htobe16(bdesc->icv_offset); + bdesc->crypto_offset = htobe16(bdesc->crypto_offset); + bdesc->crypto_length = htobe16(bdesc->crypto_length); + bd->size = htobe16(bd->size); + +#ifdef SPUM_DEBUG + printf("%s: dumping req\n",__func__); + spum_dump_request(req); +#endif + + if (req->header_len > PDC_MAX_HEADER_SIZE || + req->resp_header_len > PDC_MAX_HEADER_SIZE || + req->footer_len > PDC_MAX_HEADER_SIZE || + req->resp_footer_len > PDC_MAX_HEADER_SIZE) { + rc = EINVAL; + goto out1; + } + + return (0); + +out1: + if (aad_pad_len != 0) { + if (crp->crp_flags & CRYPTO_F_IMBUF) { + free(ctx->aad_header_buf, M_SPUM); + } else if (ctx->crp->crp_flags & CRYPTO_F_IOV) { + free(req->data, M_IOV); + } + } +out: + free(ctx, M_SPUM); + return (rc); +} + +static int +spum_set_session_enc_params(device_t dev, struct cryptoini *enc, + struct spum_crypto_session *ses, struct spum_security_ctx *sctx) +{ + int rc; + int alg, mode; + + alg = mode = 0; + + /* We are always providing IV as a part of SCTX. */ + sctx->extended_cipher_flags = htobe32(SPUM_SCTX_IV); + sctx->cipher_flags = 0; + ses->enc_key_size = enc->cri_klen / 8; + + switch (enc->cri_alg) { + case CRYPTO_ARC4: + ses->iv_size = 0; + sctx->extended_cipher_flags = 0; + alg = SPUM_CIPHER_ALG_RC4; + mode = SPUM_CIPHER_MODE_ECB; + break; + case CRYPTO_AES_CBC: + ses->iv_size = AES_BLOCK_LEN; + alg = SPUM_CIPHER_ALG_AES; + mode = SPUM_CIPHER_MODE_CBC; + rc = spum_set_enc_key_size(sctx, enc->cri_klen); + if (rc != 0) { + device_printf(dev, + "Incorrect AES key size: %d\n", + enc->cri_klen); + return (rc); + } + break; + case CRYPTO_AES_ICM: + ses->iv_size = AES_BLOCK_LEN; + alg = SPUM_CIPHER_ALG_AES; + mode = SPUM_CIPHER_MODE_CTR; + rc = spum_set_enc_key_size(sctx, enc->cri_klen); + if (rc != 0) { + device_printf(dev, + "Incorrect AES key size: %d\n", + enc->cri_klen); + return (rc); + } + break; + case CRYPTO_AES_CCM_16: + /* 12 byte random nonce + 4 bytes of metadata */ + ses->iv_size = AES_CCM_IV_LEN + 4; + alg = SPUM_CIPHER_ALG_AES; + mode = SPUM_CIPHER_MODE_CCM; + rc = spum_set_enc_key_size(sctx, enc->cri_klen); + if (rc != 0) { + device_printf(dev, + "Incorrect AES key size: %d\n", + enc->cri_klen); + return (rc); + } + break; + case CRYPTO_AES_NIST_GCM_16: + ses->iv_size = AES_GCM_IV_LEN; + alg = SPUM_CIPHER_ALG_AES; + mode = SPUM_CIPHER_MODE_GCM; + rc = spum_set_enc_key_size(sctx, enc->cri_klen); + if (rc != 0) { + device_printf(dev, + "Incorrect AES key size: %d\n", + enc->cri_klen); + return (rc); + } + break; + case CRYPTO_DES_CBC: + ses->iv_size = DES_BLOCK_LEN; + alg = SPUM_CIPHER_ALG_DES; + mode = SPUM_CIPHER_MODE_CBC; + sctx->cipher_flags |= + htobe32(SPUM_CIPHER_TYPE_DES << SPUM_CIPHER_TYPE_SHIFT); + break; + case CRYPTO_3DES_CBC: + ses->iv_size = DES3_BLOCK_LEN; + alg = SPUM_CIPHER_ALG_3DES; + mode = SPUM_CIPHER_MODE_CBC; + sctx->cipher_flags = + htobe32(SPUM_CIPHER_TYPE_3DES << SPUM_CIPHER_TYPE_SHIFT); + break; + default: + device_printf(dev, + "Unrecognized encryption algorithm: %d\n", + enc->cri_alg); + return (EINVAL); + } + + sctx->cipher_flags |= htobe32( + (alg << SPUM_CIPHER_ALG_SHIFT) | + (mode << SPUM_CIPHER_MODE_SHIFT)); + + return (0); +} + +static int +spum_set_session_mac_params(device_t dev, + struct cryptoini *enc, struct cryptoini *mac, + struct spum_crypto_session *ses, struct spum_security_ctx *sctx) +{ + int rc; + int alg, mode, len; + + ses->mac_key_size = mac->cri_klen / 8; + alg = 0; + mode = 0; + len = 0; + + switch (mac->cri_alg) { + case CRYPTO_AES_128_NIST_GMAC: + case CRYPTO_AES_192_NIST_GMAC: + case CRYPTO_AES_256_NIST_GMAC: + alg = SPUM_HASH_ALG_AES; + mode = SPUM_HASH_MODE_GCM; + len = AES_GMAC_HASH_LEN; + break; + case CRYPTO_AES_CCM_CBC_MAC: + alg = SPUM_HASH_ALG_AES; + mode = SPUM_HASH_MODE_CCM; + len = AES_CBC_MAC_HASH_LEN; + break; + case CRYPTO_MD5_HMAC: + alg = SPUM_HASH_ALG_MD5; + mode = SPUM_HASH_MODE_HMAC; + len = MD5_HASH_LEN; + break; + case CRYPTO_SHA1_HMAC: + alg = SPUM_HASH_ALG_SHA1; + mode = SPUM_HASH_MODE_HMAC; + len = SHA1_HASH_LEN; + break; + case CRYPTO_SHA2_224_HMAC: + alg = SPUM_HASH_ALG_SHA224; + mode = SPUM_HASH_MODE_HMAC; + len = SHA2_224_HASH_LEN; + break; + case CRYPTO_SHA2_256_HMAC: + alg = SPUM_HASH_ALG_SHA256; + mode = SPUM_HASH_MODE_HMAC; + len = SHA2_256_HASH_LEN; + break; + case CRYPTO_SHA2_384_HMAC: + alg = SPUM_HASH_ALG_SHA384; + mode = SPUM_HASH_MODE_HMAC; + len = SHA2_384_HASH_LEN; + break; + case CRYPTO_SHA2_512_HMAC: + alg = SPUM_HASH_ALG_SHA512; + mode = SPUM_HASH_MODE_HMAC; + len = SHA2_512_HASH_LEN; + break; + case CRYPTO_MD5: + alg = SPUM_HASH_ALG_MD5; + mode = SPUM_HASH_MODE_HASH; + len = MD5_HASH_LEN; + ses->mac_key_size = 0; + break; + case CRYPTO_SHA1: + alg = SPUM_HASH_ALG_SHA1; + mode = SPUM_HASH_MODE_HASH; + len = SHA1_HASH_LEN; + ses->mac_key_size = 0; + break; + case CRYPTO_SHA2_224: + alg = SPUM_HASH_ALG_SHA224; + mode = SPUM_HASH_MODE_HASH; + len = SHA2_224_HASH_LEN; + ses->mac_key_size = 0; + break; + case CRYPTO_SHA2_256: + alg = SPUM_HASH_ALG_SHA256; + mode = SPUM_HASH_MODE_HASH; + len = SHA2_256_HASH_LEN; + ses->mac_key_size = 0; + break; + case CRYPTO_SHA2_384: + alg = SPUM_HASH_ALG_SHA384; + mode = SPUM_HASH_MODE_HASH; + len = SHA2_384_HASH_LEN; + ses->mac_key_size = 0; + break; + case CRYPTO_SHA2_512: + alg = SPUM_HASH_ALG_SHA512; + mode = SPUM_HASH_MODE_HASH; + len = SHA2_512_HASH_LEN; + ses->mac_key_size = 0; + break; + default: + device_printf(dev, + "Unrecognized mac algorithm: %d\n", + mac->cri_alg); + return (EINVAL); + } + + /* + * For GCM/CCM we are forced to use the same key for auth and enc. + * It is only sent once. + * The mac key size has to be set in sctx for AES-GCM/CCM. + */ + if ((enc != NULL && enc->cri_alg == CRYPTO_AES_NIST_GCM_16) || + (enc != NULL && enc->cri_alg == CRYPTO_AES_CCM_16)) { + ses->mac_key_size = 0; + rc = spum_set_mac_key_size(sctx, enc->cri_klen); + if (rc != 0) { + device_printf(dev, + "Incorrect MAC key size: %d\n", + mac->cri_klen); + return (rc); + } + } + ses->hash_block_size = len; + + if (len != 0 && mac->cri_mlen != 0) + len = mac->cri_mlen; + + /* Set algorithm, mode and auth key size */ + sctx->cipher_flags |= htobe32( + (alg << SPUM_HASH_ALG_SHIFT) | + (mode << SPUM_HASH_MODE_SHIFT)); + + if (len == 64) { + sctx->cipher_flags |= htobe32(SPUM_ICV_IS_512); + } else { + sctx->extended_cipher_flags |= + htobe32((len / 4) << SPUM_ICV_SIZE_SHIFT); + } + ses->hash_size = len; + return (0); +} + +static int +spum_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) +{ + struct spum_crypto_session *ses; + struct cryptoini *enc = NULL; + struct cryptoini *mac = NULL; + uint8_t *buf; + uint32_t *msg_header; + struct spum_security_ctx *sctx; + int rc = 0; + + while (cri != NULL) { + if (mac != NULL && enc != NULL) + return (EINVAL); + + if (spum_is_supported_crypto(cri->cri_alg)) { + if (enc != NULL) + return (EINVAL); + + enc = cri; + cri = cri->cri_next; + continue; + } + if (spum_is_supported_hash(cri->cri_alg)) { + if (mac != NULL) + return (EINVAL); + + mac = cri; + cri = cri->cri_next; + continue; + } + } + if (enc == NULL && mac == NULL) + return (EINVAL); + + if ((enc != NULL && enc->cri_klen / 8 > SPUM_MAX_KEY_SIZE) || + (mac != NULL && mac->cri_klen / 8 > SPUM_MAX_KEY_SIZE)) { + device_printf(dev, "Unsupported key size\n"); + return (E2BIG); + } + + ses = crypto_get_driver_session(cses); + + /* + * Prepare the header template for requests. + * Since most of its contests won't change + * we will just copy it while processing requests. + */ + buf = ses->header_template; + + memcpy(buf, bcm_header, sizeof(bcm_header)); + buf += sizeof(bcm_header); + + msg_header = (uint32_t*) buf; + buf += sizeof(uint32_t); + + /* + * First word of header. + * It lists the components included. + */ + *msg_header = htobe32(SPUM_SCTX_PRESENT | + SPUM_BDESC_PRESENT | + SPUM_BD_PRESENT | + SPUM_GENERIC_REQUEST); + /* + * The next word can be used to tag the request. + * Since we don't need to do that just skip it. + */ + memset(buf, 0, sizeof(uint32_t)); + buf += sizeof(uint32_t); + + sctx = (struct spum_security_ctx *) buf; + buf += sizeof(struct spum_security_ctx); + + memset(sctx, 0, sizeof(struct spum_security_ctx)); + + ses->enc_key_size = 0; + ses->mac_key_size = 0; + ses->iv_size = 0; + ses->hash_size = 0; + + if (enc != NULL) { + rc = spum_set_session_enc_params(dev, enc, ses, sctx); + if (rc != 0) + goto fail; + } + + if (mac != NULL) { + rc = spum_set_session_mac_params(dev, enc, mac, ses, sctx); + if (rc != 0) + goto fail; + } + + if (ses->mac_key_size != 0) { + rc = spum_set_mac_key(buf, mac->cri_key, session->mac_key_size, + session->hash_block_size); + if (rc != 0) { + device_printf(dev, + "Failed to set mac key\n"); + goto fail; + } + session->mac_key_size = session->hash_block_size; + buf += session->mac_key_size; + } + + if (ses->enc_key_size != 0) { + if (enc->cri_alg == CRYPTO_ARC4) { + spum_set_rc4_key(buf, enc->cri_key, ses->enc_key_size); + ses->enc_key_size = SPUM_ARC4_KEY_SIZE; + } else { + memcpy(buf, enc->cri_key, ses->enc_key_size); + buf += ses->enc_key_size; + } + } + + /* SCTX size is supposed to be given in words for some reason. */ + sctx->proto_and_size = htobe32( + (sizeof(struct spum_security_ctx) + + ses->enc_key_size + + ses->mac_key_size + + ses->iv_size) + / 4); + + /* + * This is not the entire size of header, + * but only the part that is filled here. + */ + ses->template_size = + sizeof(bcm_header) + + 2 * sizeof(uint32_t) + + sizeof(struct spum_security_ctx) + + ses->mac_key_size + + ses->enc_key_size; + + return (0); +fail: + return (rc); + +} + +static int +spum_process(device_t dev, struct cryptop *crp, int hint) +{ + struct spum_sc *sc; + struct spum_crypto_session *session; + struct pdc_request *req; + struct cryptodesc *crd = crp->crp_desc; + struct cryptodesc *enc = NULL; + struct cryptodesc *mac = NULL; + int rc = 0; + + sc = device_get_softc(dev); + + while (crd != NULL) { + if (mac != NULL && enc != NULL) + return (EINVAL); + + if (spum_is_supported_crypto(crd->crd_alg)) { + if (enc != NULL) + return (EINVAL); + + enc = crd; + crd = crd->crd_next; + continue; + } + if (spum_is_supported_hash(crd->crd_alg)) { + if (mac != NULL) + return (EINVAL); + + mac = crd; + + crd = crd->crd_next; + continue; + } + } + + if (enc == NULL && mac == NULL) + return (EINVAL); + + if (crp->crp_ilen > SPUM_MAX_REQUEST_SIZE) { + crp->crp_etype = EFBIG; + goto fail; + } + + /* We require the AAD bits to be present before data. */ + if (mac != NULL && enc != NULL && + mac->crd_len + mac->crd_skip > enc->crd_len + enc->crd_skip) { + device_printf(dev, "AAD bits have to be included before data\n"); + crp->crp_etype = EINVAL; + goto fail; + } + + /* Device doesn't support requests with payload size == 0 */ + if (enc != NULL && enc->crd_len == 0) { + device_printf(dev, + "Device doesn't support requests with payload size = 0\n"); + crp->crp_etype = EINVAL; + goto fail; + } + + if (enc != NULL && enc->crd_alg == CRYPTO_AES_CCM_16) { + /* For AES-CCM AAD size must be equal to 8, 12, or 16. */ + if (mac->crd_len != 8 && + mac->crd_len != 12 && + mac->crd_len != 16) { + device_printf(dev, + "Unsupported AAD size: %d\n" + "Supported values are 8, 12, 16\n", + mac->crd_len); + crp->crp_etype = EINVAL; + goto fail; + } + } + + session = crypto_get_driver_session(crp->crp_session); + + req = pdc_allocate(); + if (req == NULL) { + mtx_lock(&sc->mtx); + sc->blocked = true; + mtx_unlock(&sc->mtx); + return (ERESTART); + } + + rc = spum_create_request(dev, req, session, crp, enc, mac); + if (rc != 0) { + crp->crp_etype = rc; + crypto_done(crp); + pdc_free(req); + return (0); + } + + rc = pdc_process_request(req); + if (rc != 0) { + crp->crp_etype = rc; + crypto_done(crp); + pdc_free(req); + return (0); + } + + return (0); +fail: + crypto_done(crp); + return (0); +} + +static int +spum_probe(device_t dev) +{ + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (!ofw_bus_is_compatible(dev, "brcm,spum-crypto")) + return (ENXIO); + + device_set_desc(dev, "Broadcom Secure Processing Unit"); + + return (BUS_PROBE_DEFAULT); +} + +static int +spum_attach(device_t dev) +{ + struct spum_sc *sc = device_get_softc(dev); + + sc->cid = crypto_get_driverid(dev, sizeof(struct spum_crypto_session), + CRYPTOCAP_F_HARDWARE); + if (sc->cid < 0) { + device_printf(dev, "Failed to obtain crypto driver id\n"); + return (ENXIO); + } + + sc->blocked = false; + + mtx_init(&sc->mtx, device_get_nameunit(dev), "SPUM sctx lock", MTX_DEF); + + crypto_register(sc->cid, CRYPTO_ARC4, 0, 0); + crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0); + crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0); + crypto_register(sc->cid, CRYPTO_DES_CBC, 0, 0); + crypto_register(sc->cid, CRYPTO_3DES_CBC, 0, 0); + crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0); + crypto_register(sc->cid, CRYPTO_AES_CCM_16, 0, 0); + + crypto_register(sc->cid, CRYPTO_AES_CCM_CBC_MAC, 0, 0); + crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0); + crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0); + crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0); + crypto_register(sc->cid, CRYPTO_MD5_HMAC, 0, 0); + crypto_register(sc->cid, CRYPTO_SHA1_HMAC, 0, 0); + crypto_register(sc->cid, CRYPTO_SHA2_224_HMAC, 0, 0); + crypto_register(sc->cid, CRYPTO_SHA2_256_HMAC, 0, 0); + crypto_register(sc->cid, CRYPTO_SHA2_384_HMAC, 0, 0); + crypto_register(sc->cid, CRYPTO_SHA2_512_HMAC, 0, 0); + + crypto_register(sc->cid, CRYPTO_MD5, 0, 0); + crypto_register(sc->cid, CRYPTO_SHA1, 0, 0); + crypto_register(sc->cid, CRYPTO_SHA2_224, 0, 0); + crypto_register(sc->cid, CRYPTO_SHA2_256, 0, 0); + crypto_register(sc->cid, CRYPTO_SHA2_384, 0, 0); + crypto_register(sc->cid, CRYPTO_SHA2_512, 0, 0); + + return (0); +} + +static int +spum_detach(device_t dev) +{ + struct spum_sc *sc; + + sc = device_get_softc(dev); + + mtx_destroy(&sc->mtx); + + return (0); +} + +static device_method_t spum_methods[] = { + DEVMETHOD(device_probe, spum_probe), + DEVMETHOD(device_attach, spum_attach), + DEVMETHOD(device_detach, spum_detach), + + DEVMETHOD(cryptodev_newsession, spum_newsession), + DEVMETHOD(cryptodev_process, spum_process), + + DEVMETHOD_END +}; + +static driver_t spum_driver = { + .name = "spum", + .methods = spum_methods, + .size = sizeof(struct spum_sc), +}; + +static devclass_t spum_devclass; + +DRIVER_MODULE(spum, simplebus, spum_driver, spum_devclass, 0, 0); +MODULE_DEPEND(spum, crypto, 1, 1, 1); +MODULE_DEPEND(spum, pdc, 1, 1, 1); Index: sys/modules/Makefile =================================================================== --- sys/modules/Makefile +++ sys/modules/Makefile @@ -342,6 +342,7 @@ spi \ ${_splash} \ ${_sppp} \ + ${_spum} \ ste \ stge \ ${_superio} \ @@ -564,6 +565,7 @@ _armv8crypto= armv8crypto _efirt= efirt _em= em +_spum= spum _rockchip= rockchip .endif Index: sys/modules/spum/Makefile =================================================================== --- /dev/null +++ sys/modules/spum/Makefile @@ -0,0 +1,9 @@ +# $FreeBSD$ + +.PATH: ${SRCTOP}/sys/dev/spum + +KMOD= spum + +SRCS= spum.c + +.include