diff --git a/sys/opencrypto/cryptodev.c b/sys/opencrypto/cryptodev.c index 9312945a9b8b..7f52b57fe5e0 100644 --- a/sys/opencrypto/cryptodev.c +++ b/sys/opencrypto/cryptodev.c @@ -1,1436 +1,1432 @@ /* $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $ */ /*- * Copyright (c) 2001 Theo de Raadt * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * Copyright (c) 2014-2021 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Portions of this software were developed by Ararat River * Consulting, LLC under sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DECLARE(opencrypto); SDT_PROBE_DEFINE1(opencrypto, dev, ioctl, error, "int"/*line number*/); #ifdef COMPAT_FREEBSD12 /* * Previously, most ioctls were performed against a cloned descriptor * of /dev/crypto obtained via CRIOGET. Now all ioctls are performed * against /dev/crypto directly. */ #define CRIOGET _IOWR('c', 100, uint32_t) #endif /* the following are done against the cloned descriptor */ #ifdef COMPAT_FREEBSD32 #include #include struct session_op32 { uint32_t cipher; uint32_t mac; uint32_t keylen; uint32_t key; int mackeylen; uint32_t mackey; uint32_t ses; }; struct session2_op32 { uint32_t cipher; uint32_t mac; uint32_t keylen; uint32_t key; int mackeylen; uint32_t mackey; uint32_t ses; int crid; int ivlen; int maclen; int pad[2]; }; struct crypt_op32 { uint32_t ses; uint16_t op; uint16_t flags; u_int len; uint32_t src, dst; uint32_t mac; uint32_t iv; }; struct crypt_aead32 { uint32_t ses; uint16_t op; uint16_t flags; u_int len; u_int aadlen; u_int ivlen; uint32_t src; uint32_t dst; uint32_t aad; uint32_t tag; uint32_t iv; }; #define CIOCGSESSION32 _IOWR('c', 101, struct session_op32) #define CIOCCRYPT32 _IOWR('c', 103, struct crypt_op32) #define CIOCGSESSION232 _IOWR('c', 106, struct session2_op32) #define CIOCCRYPTAEAD32 _IOWR('c', 109, struct crypt_aead32) static void session_op_from_32(const struct session_op32 *from, struct session2_op *to) { memset(to, 0, sizeof(*to)); CP(*from, *to, cipher); CP(*from, *to, mac); CP(*from, *to, keylen); PTRIN_CP(*from, *to, key); CP(*from, *to, mackeylen); PTRIN_CP(*from, *to, mackey); CP(*from, *to, ses); to->crid = CRYPTOCAP_F_HARDWARE; } static void session2_op_from_32(const struct session2_op32 *from, struct session2_op *to) { session_op_from_32((const struct session_op32 *)from, to); CP(*from, *to, crid); CP(*from, *to, ivlen); CP(*from, *to, maclen); } static void session_op_to_32(const struct session2_op *from, struct session_op32 *to) { CP(*from, *to, cipher); CP(*from, *to, mac); CP(*from, *to, keylen); PTROUT_CP(*from, *to, key); CP(*from, *to, mackeylen); PTROUT_CP(*from, *to, mackey); CP(*from, *to, ses); } static void session2_op_to_32(const struct session2_op *from, struct session2_op32 *to) { session_op_to_32(from, (struct session_op32 *)to); CP(*from, *to, crid); } static void crypt_op_from_32(const struct crypt_op32 *from, struct crypt_op *to) { CP(*from, *to, ses); CP(*from, *to, op); CP(*from, *to, flags); CP(*from, *to, len); PTRIN_CP(*from, *to, src); PTRIN_CP(*from, *to, dst); PTRIN_CP(*from, *to, mac); PTRIN_CP(*from, *to, iv); } static void crypt_op_to_32(const struct crypt_op *from, struct crypt_op32 *to) { CP(*from, *to, ses); CP(*from, *to, op); CP(*from, *to, flags); CP(*from, *to, len); PTROUT_CP(*from, *to, src); PTROUT_CP(*from, *to, dst); PTROUT_CP(*from, *to, mac); PTROUT_CP(*from, *to, iv); } static void crypt_aead_from_32(const struct crypt_aead32 *from, struct crypt_aead *to) { CP(*from, *to, ses); CP(*from, *to, op); CP(*from, *to, flags); CP(*from, *to, len); CP(*from, *to, aadlen); CP(*from, *to, ivlen); PTRIN_CP(*from, *to, src); PTRIN_CP(*from, *to, dst); PTRIN_CP(*from, *to, aad); PTRIN_CP(*from, *to, tag); PTRIN_CP(*from, *to, iv); } static void crypt_aead_to_32(const struct crypt_aead *from, struct crypt_aead32 *to) { CP(*from, *to, ses); CP(*from, *to, op); CP(*from, *to, flags); CP(*from, *to, len); CP(*from, *to, aadlen); CP(*from, *to, ivlen); PTROUT_CP(*from, *to, src); PTROUT_CP(*from, *to, dst); PTROUT_CP(*from, *to, aad); PTROUT_CP(*from, *to, tag); PTROUT_CP(*from, *to, iv); } #endif static void session2_op_from_op(const struct session_op *from, struct session2_op *to) { memset(to, 0, sizeof(*to)); memcpy(to, from, sizeof(*from)); to->crid = CRYPTOCAP_F_HARDWARE; } static void session2_op_to_op(const struct session2_op *from, struct session_op *to) { memcpy(to, from, sizeof(*to)); } struct csession { TAILQ_ENTRY(csession) next; crypto_session_t cses; volatile u_int refs; uint32_t ses; struct mtx lock; /* for op submission */ const struct enc_xform *txform; int hashsize; int ivsize; void *key; void *mackey; }; struct cryptop_data { struct csession *cse; char *buf; char *obuf; char *aad; bool done; }; struct fcrypt { TAILQ_HEAD(csessionlist, csession) csessions; int sesn; struct mtx lock; }; static bool use_outputbuffers; SYSCTL_BOOL(_kern_crypto, OID_AUTO, cryptodev_use_output, CTLFLAG_RW, &use_outputbuffers, 0, "Use separate output buffers for /dev/crypto requests."); static bool use_separate_aad; SYSCTL_BOOL(_kern_crypto, OID_AUTO, cryptodev_separate_aad, CTLFLAG_RW, &use_separate_aad, 0, "Use separate AAD buffer for /dev/crypto requests."); /* * Check a crypto identifier to see if it requested * a software device/driver. This can be done either * by device name/class or through search constraints. */ static int checkforsoftware(int *cridp) { int crid; crid = *cridp; if (!crypto_devallowsoft) { if (crid & CRYPTOCAP_F_SOFTWARE) { if (crid & CRYPTOCAP_F_HARDWARE) { *cridp = CRYPTOCAP_F_HARDWARE; return 0; } return EINVAL; } if ((crid & CRYPTOCAP_F_HARDWARE) == 0 && (crypto_getcaps(crid) & CRYPTOCAP_F_HARDWARE) == 0) return EINVAL; } return 0; } static int cse_create(struct fcrypt *fcr, struct session2_op *sop) { struct crypto_session_params csp; struct csession *cse; const struct enc_xform *txform; const struct auth_hash *thash; void *key = NULL; void *mackey = NULL; crypto_session_t cses; int crid, error; switch (sop->cipher) { case 0: txform = NULL; break; case CRYPTO_AES_CBC: txform = &enc_xform_rijndael128; break; case CRYPTO_AES_XTS: txform = &enc_xform_aes_xts; break; case CRYPTO_NULL_CBC: txform = &enc_xform_null; break; case CRYPTO_CAMELLIA_CBC: txform = &enc_xform_camellia; break; case CRYPTO_AES_ICM: txform = &enc_xform_aes_icm; break; case CRYPTO_AES_NIST_GCM_16: txform = &enc_xform_aes_nist_gcm; break; case CRYPTO_CHACHA20: txform = &enc_xform_chacha20; break; case CRYPTO_AES_CCM_16: txform = &enc_xform_ccm; break; case CRYPTO_CHACHA20_POLY1305: txform = &enc_xform_chacha20_poly1305; break; default: CRYPTDEB("invalid cipher"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } switch (sop->mac) { case 0: thash = NULL; break; case CRYPTO_POLY1305: thash = &auth_hash_poly1305; break; case CRYPTO_SHA1_HMAC: thash = &auth_hash_hmac_sha1; break; case CRYPTO_SHA2_224_HMAC: thash = &auth_hash_hmac_sha2_224; break; case CRYPTO_SHA2_256_HMAC: thash = &auth_hash_hmac_sha2_256; break; case CRYPTO_SHA2_384_HMAC: thash = &auth_hash_hmac_sha2_384; break; case CRYPTO_SHA2_512_HMAC: thash = &auth_hash_hmac_sha2_512; break; case CRYPTO_RIPEMD160_HMAC: thash = &auth_hash_hmac_ripemd_160; break; #ifdef COMPAT_FREEBSD12 case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: /* Should always be paired with GCM. */ if (sop->cipher != CRYPTO_AES_NIST_GCM_16) { CRYPTDEB("GMAC without GCM"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; #endif case CRYPTO_AES_NIST_GMAC: switch (sop->mackeylen * 8) { case 128: thash = &auth_hash_nist_gmac_aes_128; break; case 192: thash = &auth_hash_nist_gmac_aes_192; break; case 256: thash = &auth_hash_nist_gmac_aes_256; break; default: CRYPTDEB("invalid GMAC key length"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; case CRYPTO_AES_CCM_CBC_MAC: switch (sop->mackeylen) { case 16: thash = &auth_hash_ccm_cbc_mac_128; break; case 24: thash = &auth_hash_ccm_cbc_mac_192; break; case 32: thash = &auth_hash_ccm_cbc_mac_256; break; default: CRYPTDEB("Invalid CBC MAC key size %d", sop->keylen); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; case CRYPTO_SHA1: thash = &auth_hash_sha1; break; case CRYPTO_SHA2_224: thash = &auth_hash_sha2_224; break; case CRYPTO_SHA2_256: thash = &auth_hash_sha2_256; break; case CRYPTO_SHA2_384: thash = &auth_hash_sha2_384; break; case CRYPTO_SHA2_512: thash = &auth_hash_sha2_512; break; case CRYPTO_NULL_HMAC: thash = &auth_hash_null; break; case CRYPTO_BLAKE2B: thash = &auth_hash_blake2b; break; case CRYPTO_BLAKE2S: thash = &auth_hash_blake2s; break; default: CRYPTDEB("invalid mac"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } if (txform == NULL && thash == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } memset(&csp, 0, sizeof(csp)); if (use_outputbuffers) csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; if (sop->cipher == CRYPTO_AES_NIST_GCM_16) { switch (sop->mac) { #ifdef COMPAT_FREEBSD12 case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: if (sop->keylen != sop->mackeylen) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; #endif case 0: break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } csp.csp_mode = CSP_MODE_AEAD; } else if (sop->cipher == CRYPTO_AES_CCM_16) { switch (sop->mac) { #ifdef COMPAT_FREEBSD12 case CRYPTO_AES_CCM_CBC_MAC: if (sop->keylen != sop->mackeylen) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } thash = NULL; break; #endif case 0: break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } csp.csp_mode = CSP_MODE_AEAD; } else if (sop->cipher == CRYPTO_CHACHA20_POLY1305) { if (sop->mac != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } csp.csp_mode = CSP_MODE_AEAD; } else if (txform != NULL && thash != NULL) csp.csp_mode = CSP_MODE_ETA; else if (txform != NULL) csp.csp_mode = CSP_MODE_CIPHER; else csp.csp_mode = CSP_MODE_DIGEST; switch (csp.csp_mode) { case CSP_MODE_AEAD: case CSP_MODE_ETA: if (use_separate_aad) csp.csp_flags |= CSP_F_SEPARATE_AAD; break; } if (txform != NULL) { csp.csp_cipher_alg = txform->type; csp.csp_cipher_klen = sop->keylen; if (sop->keylen > txform->maxkey || sop->keylen < txform->minkey) { CRYPTDEB("invalid cipher parameters"); error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } key = malloc(csp.csp_cipher_klen, M_XDATA, M_WAITOK); error = copyin(sop->key, key, csp.csp_cipher_klen); if (error) { CRYPTDEB("invalid key"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } csp.csp_cipher_key = key; csp.csp_ivlen = txform->ivsize; } if (thash != NULL) { csp.csp_auth_alg = thash->type; csp.csp_auth_klen = sop->mackeylen; if (sop->mackeylen > thash->keysize || sop->mackeylen < 0) { CRYPTDEB("invalid mac key length"); error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if (csp.csp_auth_klen != 0) { mackey = malloc(csp.csp_auth_klen, M_XDATA, M_WAITOK); error = copyin(sop->mackey, mackey, csp.csp_auth_klen); if (error) { CRYPTDEB("invalid mac key"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } csp.csp_auth_key = mackey; } if (csp.csp_auth_alg == CRYPTO_AES_NIST_GMAC) csp.csp_ivlen = AES_GCM_IV_LEN; if (csp.csp_auth_alg == CRYPTO_AES_CCM_CBC_MAC) csp.csp_ivlen = AES_CCM_IV_LEN; } if (sop->ivlen != 0) { if (csp.csp_ivlen == 0) { CRYPTDEB("does not support an IV"); error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } csp.csp_ivlen = sop->ivlen; } if (sop->maclen != 0) { if (!(thash != NULL || csp.csp_mode == CSP_MODE_AEAD)) { CRYPTDEB("does not support a MAC"); error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } csp.csp_auth_mlen = sop->maclen; } crid = sop->crid; error = checkforsoftware(&crid); if (error) { CRYPTDEB("checkforsoftware"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } error = crypto_newsession(&cses, &csp, crid); if (error) { CRYPTDEB("crypto_newsession"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } cse = malloc(sizeof(struct csession), M_XDATA, M_WAITOK | M_ZERO); mtx_init(&cse->lock, "cryptodev", "crypto session lock", MTX_DEF); refcount_init(&cse->refs, 1); cse->key = key; cse->mackey = mackey; cse->cses = cses; cse->txform = txform; if (sop->maclen != 0) cse->hashsize = sop->maclen; else if (thash != NULL) cse->hashsize = thash->hashsize; - else if (csp.csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) - cse->hashsize = AES_GMAC_HASH_LEN; - else if (csp.csp_cipher_alg == CRYPTO_AES_CCM_16) - cse->hashsize = AES_CBC_MAC_HASH_LEN; - else if (csp.csp_cipher_alg == CRYPTO_CHACHA20_POLY1305) - cse->hashsize = POLY1305_HASH_LEN; + else if (csp.csp_mode == CSP_MODE_AEAD) + cse->hashsize = txform->macsize; cse->ivsize = csp.csp_ivlen; mtx_lock(&fcr->lock); TAILQ_INSERT_TAIL(&fcr->csessions, cse, next); cse->ses = fcr->sesn++; mtx_unlock(&fcr->lock); sop->ses = cse->ses; /* return hardware/driver id */ sop->crid = crypto_ses2hid(cse->cses); bail: if (error) { free(key, M_XDATA); free(mackey, M_XDATA); } return (error); } static struct csession * cse_find(struct fcrypt *fcr, u_int ses) { struct csession *cse; mtx_lock(&fcr->lock); TAILQ_FOREACH(cse, &fcr->csessions, next) { if (cse->ses == ses) { refcount_acquire(&cse->refs); mtx_unlock(&fcr->lock); return (cse); } } mtx_unlock(&fcr->lock); return (NULL); } static void cse_free(struct csession *cse) { if (!refcount_release(&cse->refs)) return; crypto_freesession(cse->cses); mtx_destroy(&cse->lock); if (cse->key) free(cse->key, M_XDATA); if (cse->mackey) free(cse->mackey, M_XDATA); free(cse, M_XDATA); } static bool cse_delete(struct fcrypt *fcr, u_int ses) { struct csession *cse; mtx_lock(&fcr->lock); TAILQ_FOREACH(cse, &fcr->csessions, next) { if (cse->ses == ses) { TAILQ_REMOVE(&fcr->csessions, cse, next); mtx_unlock(&fcr->lock); cse_free(cse); return (true); } } mtx_unlock(&fcr->lock); return (false); } static struct cryptop_data * cod_alloc(struct csession *cse, size_t aad_len, size_t len) { struct cryptop_data *cod; cod = malloc(sizeof(struct cryptop_data), M_XDATA, M_WAITOK | M_ZERO); cod->cse = cse; if (crypto_get_params(cse->cses)->csp_flags & CSP_F_SEPARATE_AAD) { if (aad_len != 0) cod->aad = malloc(aad_len, M_XDATA, M_WAITOK); cod->buf = malloc(len, M_XDATA, M_WAITOK); } else cod->buf = malloc(aad_len + len, M_XDATA, M_WAITOK); if (crypto_get_params(cse->cses)->csp_flags & CSP_F_SEPARATE_OUTPUT) cod->obuf = malloc(len, M_XDATA, M_WAITOK); return (cod); } static void cod_free(struct cryptop_data *cod) { free(cod->aad, M_XDATA); free(cod->obuf, M_XDATA); free(cod->buf, M_XDATA); free(cod, M_XDATA); } static int cryptodev_cb(struct cryptop *crp) { struct cryptop_data *cod = crp->crp_opaque; /* * Lock to ensure the wakeup() is not missed by the loops * waiting on cod->done in cryptodev_op() and * cryptodev_aead(). */ mtx_lock(&cod->cse->lock); cod->done = true; mtx_unlock(&cod->cse->lock); wakeup(cod); return (0); } static int cryptodev_op(struct csession *cse, const struct crypt_op *cop) { const struct crypto_session_params *csp; struct cryptop_data *cod = NULL; struct cryptop *crp = NULL; char *dst; int error; if (cop->len > 256*1024-4) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (E2BIG); } if (cse->txform) { if ((cop->len % cse->txform->blocksize) != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } } if (cop->mac && cse->hashsize == 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } /* * The COP_F_CIPHER_FIRST flag predates explicit session * modes, but the only way it was used was for EtA so allow it * as long as it is consistent with EtA. */ if (cop->flags & COP_F_CIPHER_FIRST) { if (cop->op != COP_ENCRYPT) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } } cod = cod_alloc(cse, 0, cop->len + cse->hashsize); dst = cop->dst; crp = crypto_getreq(cse->cses, M_WAITOK); error = copyin(cop->src, cod->buf, cop->len); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_payload_start = 0; crp->crp_payload_length = cop->len; if (cse->hashsize) crp->crp_digest_start = cop->len; csp = crypto_get_params(cse->cses); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: switch (cop->op) { case COP_ENCRYPT: crp->crp_op = CRYPTO_OP_COMPRESS; break; case COP_DECRYPT: crp->crp_op = CRYPTO_OP_DECOMPRESS; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; case CSP_MODE_CIPHER: if (cop->len == 0 || (cop->iv == NULL && cop->len == cse->ivsize)) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } switch (cop->op) { case COP_ENCRYPT: crp->crp_op = CRYPTO_OP_ENCRYPT; break; case COP_DECRYPT: crp->crp_op = CRYPTO_OP_DECRYPT; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; case CSP_MODE_DIGEST: switch (cop->op) { case 0: case COP_ENCRYPT: case COP_DECRYPT: crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; if (cod->obuf != NULL) crp->crp_digest_start = 0; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; case CSP_MODE_AEAD: if (cse->ivsize != 0 && cop->iv == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } /* FALLTHROUGH */ case CSP_MODE_ETA: switch (cop->op) { case COP_ENCRYPT: crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; break; case COP_DECRYPT: crp->crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } crp->crp_flags = CRYPTO_F_CBIMM | (cop->flags & COP_F_BATCH); crypto_use_buf(crp, cod->buf, cop->len + cse->hashsize); if (cod->obuf) crypto_use_output_buf(crp, cod->obuf, cop->len + cse->hashsize); crp->crp_callback = cryptodev_cb; crp->crp_opaque = cod; if (cop->iv) { if (cse->ivsize == 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } error = copyin(cop->iv, crp->crp_iv, cse->ivsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_flags |= CRYPTO_F_IV_SEPARATE; } else if (cse->ivsize != 0) { if (crp->crp_payload_length < cse->ivsize) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } crp->crp_iv_start = 0; crp->crp_payload_length -= cse->ivsize; if (crp->crp_payload_length != 0) crp->crp_payload_start = cse->ivsize; dst += cse->ivsize; } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { error = copyin(cop->mac, cod->buf + crp->crp_digest_start, cse->hashsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } again: /* * Let the dispatch run unlocked, then, interlock against the * callback before checking if the operation completed and going * to sleep. This insures drivers don't inherit our lock which * results in a lock order reversal between crypto_dispatch forced * entry and the crypto_done callback into us. */ error = crypto_dispatch(crp); if (error != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } mtx_lock(&cse->lock); while (!cod->done) mtx_sleep(cod, &cse->lock, PWAIT, "crydev", 0); mtx_unlock(&cse->lock); if (crp->crp_etype == EAGAIN) { crp->crp_etype = 0; crp->crp_flags &= ~CRYPTO_F_DONE; cod->done = false; goto again; } if (crp->crp_etype != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = crp->crp_etype; goto bail; } if (cop->dst != NULL) { error = copyout(cod->obuf != NULL ? cod->obuf : cod->buf + crp->crp_payload_start, dst, crp->crp_payload_length); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } if (cop->mac != NULL && (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) == 0) { error = copyout((cod->obuf != NULL ? cod->obuf : cod->buf) + crp->crp_digest_start, cop->mac, cse->hashsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } bail: crypto_freereq(crp); cod_free(cod); return (error); } static int cryptodev_aead(struct csession *cse, struct crypt_aead *caead) { const struct crypto_session_params *csp; struct cryptop_data *cod = NULL; struct cryptop *crp = NULL; char *dst; int error; if (caead->len > 256*1024-4 || caead->aadlen > 256*1024-4) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (E2BIG); } if (cse->txform == NULL || cse->hashsize == 0 || caead->tag == NULL || (caead->len % cse->txform->blocksize) != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } /* * The COP_F_CIPHER_FIRST flag predates explicit session * modes, but the only way it was used was for EtA so allow it * as long as it is consistent with EtA. */ if (caead->flags & COP_F_CIPHER_FIRST) { if (caead->op != COP_ENCRYPT) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } } cod = cod_alloc(cse, caead->aadlen, caead->len + cse->hashsize); dst = caead->dst; crp = crypto_getreq(cse->cses, M_WAITOK); if (cod->aad != NULL) error = copyin(caead->aad, cod->aad, caead->aadlen); else error = copyin(caead->aad, cod->buf, caead->aadlen); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_aad = cod->aad; crp->crp_aad_start = 0; crp->crp_aad_length = caead->aadlen; if (cod->aad != NULL) crp->crp_payload_start = 0; else crp->crp_payload_start = caead->aadlen; error = copyin(caead->src, cod->buf + crp->crp_payload_start, caead->len); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_payload_length = caead->len; if (caead->op == COP_ENCRYPT && cod->obuf != NULL) crp->crp_digest_start = crp->crp_payload_output_start + caead->len; else crp->crp_digest_start = crp->crp_payload_start + caead->len; csp = crypto_get_params(cse->cses); switch (csp->csp_mode) { case CSP_MODE_AEAD: case CSP_MODE_ETA: switch (caead->op) { case COP_ENCRYPT: crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; break; case COP_DECRYPT: crp->crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } crp->crp_flags = CRYPTO_F_CBIMM | (caead->flags & COP_F_BATCH); crypto_use_buf(crp, cod->buf, crp->crp_payload_start + caead->len + cse->hashsize); if (cod->obuf != NULL) crypto_use_output_buf(crp, cod->obuf, caead->len + cse->hashsize); crp->crp_callback = cryptodev_cb; crp->crp_opaque = cod; if (caead->iv) { /* * Permit a 16-byte IV for AES-XTS, but only use the * first 8 bytes as a block number. */ if (csp->csp_mode == CSP_MODE_ETA && csp->csp_cipher_alg == CRYPTO_AES_XTS && caead->ivlen == AES_BLOCK_LEN) caead->ivlen = AES_XTS_IV_LEN; if (cse->ivsize == 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } if (caead->ivlen != cse->ivsize) { error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } error = copyin(caead->iv, crp->crp_iv, cse->ivsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_flags |= CRYPTO_F_IV_SEPARATE; } else { error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { error = copyin(caead->tag, cod->buf + crp->crp_digest_start, cse->hashsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } again: /* * Let the dispatch run unlocked, then, interlock against the * callback before checking if the operation completed and going * to sleep. This insures drivers don't inherit our lock which * results in a lock order reversal between crypto_dispatch forced * entry and the crypto_done callback into us. */ error = crypto_dispatch(crp); if (error != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } mtx_lock(&cse->lock); while (!cod->done) mtx_sleep(cod, &cse->lock, PWAIT, "crydev", 0); mtx_unlock(&cse->lock); if (crp->crp_etype == EAGAIN) { crp->crp_etype = 0; crp->crp_flags &= ~CRYPTO_F_DONE; cod->done = false; goto again; } if (crp->crp_etype != 0) { error = crp->crp_etype; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if (caead->dst != NULL) { error = copyout(cod->obuf != NULL ? cod->obuf : cod->buf + crp->crp_payload_start, dst, crp->crp_payload_length); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) == 0) { error = copyout((cod->obuf != NULL ? cod->obuf : cod->buf) + crp->crp_digest_start, caead->tag, cse->hashsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } bail: crypto_freereq(crp); cod_free(cod); return (error); } static int cryptodev_find(struct crypt_find_op *find) { device_t dev; size_t fnlen = sizeof find->name; if (find->crid != -1) { dev = crypto_find_device_byhid(find->crid); if (dev == NULL) return (ENOENT); strncpy(find->name, device_get_nameunit(dev), fnlen); find->name[fnlen - 1] = '\x0'; } else { find->name[fnlen - 1] = '\x0'; find->crid = crypto_find_driver(find->name); if (find->crid == -1) return (ENOENT); } return (0); } static void fcrypt_dtor(void *data) { struct fcrypt *fcr = data; struct csession *cse; while ((cse = TAILQ_FIRST(&fcr->csessions))) { TAILQ_REMOVE(&fcr->csessions, cse, next); KASSERT(refcount_load(&cse->refs) == 1, ("%s: crypto session %p with %d refs", __func__, cse, refcount_load(&cse->refs))); cse_free(cse); } mtx_destroy(&fcr->lock); free(fcr, M_XDATA); } static int crypto_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct fcrypt *fcr; int error; fcr = malloc(sizeof(struct fcrypt), M_XDATA, M_WAITOK | M_ZERO); TAILQ_INIT(&fcr->csessions); mtx_init(&fcr->lock, "fcrypt", NULL, MTX_DEF); error = devfs_set_cdevpriv(fcr, fcrypt_dtor); if (error) fcrypt_dtor(fcr); return (error); } static int crypto_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { struct fcrypt *fcr; struct csession *cse; struct session2_op *sop; struct crypt_op *cop; struct crypt_aead *caead; uint32_t ses; int error = 0; union { struct session2_op sopc; #ifdef COMPAT_FREEBSD32 struct crypt_op copc; struct crypt_aead aeadc; #endif } thunk; #ifdef COMPAT_FREEBSD32 u_long cmd32; void *data32; cmd32 = 0; data32 = NULL; switch (cmd) { case CIOCGSESSION32: cmd32 = cmd; data32 = data; cmd = CIOCGSESSION; data = (void *)&thunk.sopc; session_op_from_32((struct session_op32 *)data32, &thunk.sopc); break; case CIOCGSESSION232: cmd32 = cmd; data32 = data; cmd = CIOCGSESSION2; data = (void *)&thunk.sopc; session2_op_from_32((struct session2_op32 *)data32, &thunk.sopc); break; case CIOCCRYPT32: cmd32 = cmd; data32 = data; cmd = CIOCCRYPT; data = (void *)&thunk.copc; crypt_op_from_32((struct crypt_op32 *)data32, &thunk.copc); break; case CIOCCRYPTAEAD32: cmd32 = cmd; data32 = data; cmd = CIOCCRYPTAEAD; data = (void *)&thunk.aeadc; crypt_aead_from_32((struct crypt_aead32 *)data32, &thunk.aeadc); break; } #endif devfs_get_cdevpriv((void **)&fcr); switch (cmd) { #ifdef COMPAT_FREEBSD12 case CRIOGET: /* * NB: This may fail in cases that the old * implementation did not if the current process has * restricted filesystem access (e.g. running in a * jail that does not expose /dev/crypto or in * capability mode). */ error = kern_openat(td, AT_FDCWD, "/dev/crypto", UIO_SYSSPACE, O_RDWR, 0); if (error == 0) *(uint32_t *)data = td->td_retval[0]; break; #endif case CIOCGSESSION: case CIOCGSESSION2: if (cmd == CIOCGSESSION) { session2_op_from_op((void *)data, &thunk.sopc); sop = &thunk.sopc; } else sop = (struct session2_op *)data; error = cse_create(fcr, sop); if (cmd == CIOCGSESSION && error == 0) session2_op_to_op(sop, (void *)data); break; case CIOCFSESSION: ses = *(uint32_t *)data; if (!cse_delete(fcr, ses)) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; case CIOCCRYPT: cop = (struct crypt_op *)data; cse = cse_find(fcr, cop->ses); if (cse == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } error = cryptodev_op(cse, cop); cse_free(cse); break; case CIOCFINDDEV: error = cryptodev_find((struct crypt_find_op *)data); break; case CIOCCRYPTAEAD: caead = (struct crypt_aead *)data; cse = cse_find(fcr, caead->ses); if (cse == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } error = cryptodev_aead(cse, caead); cse_free(cse); break; default: error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); break; } #ifdef COMPAT_FREEBSD32 switch (cmd32) { case CIOCGSESSION32: if (error == 0) session_op_to_32((void *)data, data32); break; case CIOCGSESSION232: if (error == 0) session2_op_to_32((void *)data, data32); break; case CIOCCRYPT32: if (error == 0) crypt_op_to_32((void *)data, data32); break; case CIOCCRYPTAEAD32: if (error == 0) crypt_aead_to_32((void *)data, data32); break; } #endif return (error); } static struct cdevsw crypto_cdevsw = { .d_version = D_VERSION, .d_open = crypto_open, .d_ioctl = crypto_ioctl, .d_name = "crypto", }; static struct cdev *crypto_dev; /* * Initialization code, both for static and dynamic loading. */ static int cryptodev_modevent(module_t mod, int type, void *unused) { switch (type) { case MOD_LOAD: if (bootverbose) printf("crypto: \n"); crypto_dev = make_dev(&crypto_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "crypto"); return 0; case MOD_UNLOAD: /*XXX disallow if active sessions */ destroy_dev(crypto_dev); return 0; } return EINVAL; } static moduledata_t cryptodev_mod = { "cryptodev", cryptodev_modevent, 0 }; MODULE_VERSION(cryptodev, 1); DECLARE_MODULE(cryptodev, cryptodev_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_DEPEND(cryptodev, crypto, 1, 1, 1); MODULE_DEPEND(cryptodev, zlib, 1, 1, 1); diff --git a/sys/opencrypto/cryptosoft.c b/sys/opencrypto/cryptosoft.c index 84caf9d8c676..fe320047f5f6 100644 --- a/sys/opencrypto/cryptosoft.c +++ b/sys/opencrypto/cryptosoft.c @@ -1,1762 +1,1645 @@ /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * Copyright (c) 2014-2021 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Portions of this software were developed by Ararat River * Consulting, LLC under sponsorship of the FreeBSD Foundation. * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" struct swcr_auth { void *sw_ictx; void *sw_octx; const struct auth_hash *sw_axf; uint16_t sw_mlen; }; struct swcr_encdec { void *sw_kschedule; const struct enc_xform *sw_exf; }; struct swcr_compdec { const struct comp_algo *sw_cxf; }; struct swcr_session { struct mtx swcr_lock; int (*swcr_process)(struct swcr_session *, struct cryptop *); struct swcr_auth swcr_auth; struct swcr_encdec swcr_encdec; struct swcr_compdec swcr_compdec; }; static int32_t swcr_id; static void swcr_freesession(device_t dev, crypto_session_t cses); /* Used for CRYPTO_NULL_CBC. */ static int swcr_null(struct swcr_session *ses, struct cryptop *crp) { return (0); } /* * Apply a symmetric encryption/decryption algorithm. */ static int swcr_encdec(struct swcr_session *ses, struct cryptop *crp) { unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN]; unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; const struct crypto_session_params *csp; const struct enc_xform *exf; struct swcr_encdec *sw; size_t inlen, outlen; int i, blks, resid; struct crypto_buffer_cursor cc_in, cc_out; const unsigned char *inblk; unsigned char *outblk; int error; bool encrypting; error = 0; sw = &ses->swcr_encdec; exf = sw->sw_exf; csp = crypto_get_params(crp->crp_session); if (exf->native_blocksize == 0) { /* Check for non-padded data */ if ((crp->crp_payload_length % exf->blocksize) != 0) return (EINVAL); blks = exf->blocksize; } else blks = exf->native_blocksize; if (exf == &enc_xform_aes_icm && (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); if (crp->crp_cipher_key != NULL) { error = exf->setkey(sw->sw_kschedule, crp->crp_cipher_key, csp->csp_cipher_klen); if (error) return (error); } crypto_read_iv(crp, iv); if (exf->reinit) { /* * xforms that provide a reinit method perform all IV * handling themselves. */ exf->reinit(sw->sw_kschedule, iv, csp->csp_ivlen); } ivp = iv; crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); inblk = crypto_cursor_segment(&cc_in, &inlen); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; outblk = crypto_cursor_segment(&cc_out, &outlen); resid = crp->crp_payload_length; encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); /* * Loop through encrypting blocks. 'inlen' is the remaining * length of the current segment in the input buffer. * 'outlen' is the remaining length of current segment in the * output buffer. */ while (resid >= blks) { /* * If the current block is not contained within the * current input/output segment, use 'blk' as a local * buffer. */ if (inlen < blks) { crypto_cursor_copydata(&cc_in, blks, blk); inblk = blk; } if (outlen < blks) outblk = blk; /* * Ciphers without a 'reinit' hook are assumed to be * used in CBC mode where the chaining is done here. */ if (exf->reinit != NULL) { if (encrypting) exf->encrypt(sw->sw_kschedule, inblk, outblk); else exf->decrypt(sw->sw_kschedule, inblk, outblk); } else if (encrypting) { /* XOR with previous block */ for (i = 0; i < blks; i++) outblk[i] = inblk[i] ^ ivp[i]; exf->encrypt(sw->sw_kschedule, outblk, outblk); /* * Keep encrypted block for XOR'ing * with next block */ memcpy(iv, outblk, blks); ivp = iv; } else { /* decrypt */ /* * Keep encrypted block for XOR'ing * with next block */ nivp = (ivp == iv) ? iv2 : iv; memcpy(nivp, inblk, blks); exf->decrypt(sw->sw_kschedule, inblk, outblk); /* XOR with previous block */ for (i = 0; i < blks; i++) outblk[i] ^= ivp[i]; ivp = nivp; } if (inlen < blks) { inblk = crypto_cursor_segment(&cc_in, &inlen); } else { crypto_cursor_advance(&cc_in, blks); inlen -= blks; inblk += blks; } if (outlen < blks) { crypto_cursor_copyback(&cc_out, blks, blk); outblk = crypto_cursor_segment(&cc_out, &outlen); } else { crypto_cursor_advance(&cc_out, blks); outlen -= blks; outblk += blks; } resid -= blks; } /* Handle trailing partial block for stream ciphers. */ if (resid > 0) { KASSERT(exf->native_blocksize != 0, ("%s: partial block of %d bytes for cipher %s", __func__, i, exf->name)); KASSERT(exf->reinit != NULL, ("%s: partial block cipher %s without reinit hook", __func__, exf->name)); KASSERT(resid < blks, ("%s: partial block too big", __func__)); inblk = crypto_cursor_segment(&cc_in, &inlen); outblk = crypto_cursor_segment(&cc_out, &outlen); if (inlen < resid) { crypto_cursor_copydata(&cc_in, resid, blk); inblk = blk; } if (outlen < resid) outblk = blk; if (encrypting) exf->encrypt_last(sw->sw_kschedule, inblk, outblk, resid); else exf->decrypt_last(sw->sw_kschedule, inblk, outblk, resid); if (outlen < resid) crypto_cursor_copyback(&cc_out, resid, blk); } explicit_bzero(blk, sizeof(blk)); explicit_bzero(iv, sizeof(iv)); explicit_bzero(iv2, sizeof(iv2)); return (0); } static void swcr_authprepare(const struct auth_hash *axf, struct swcr_auth *sw, const uint8_t *key, int klen) { switch (axf->type) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: hmac_init_ipad(axf, key, klen, sw->sw_ictx); hmac_init_opad(axf, key, klen, sw->sw_octx); break; case CRYPTO_POLY1305: case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: axf->Setkey(sw->sw_ictx, key, klen); axf->Init(sw->sw_ictx); break; default: panic("%s: algorithm %d doesn't use keys", __func__, axf->type); } } /* * Compute or verify hash. */ static int swcr_authcompute(struct swcr_session *ses, struct cryptop *crp) { u_char aalg[HASH_MAX_LEN]; const struct crypto_session_params *csp; struct swcr_auth *sw; const struct auth_hash *axf; union authctx ctx; int err; sw = &ses->swcr_auth; axf = sw->sw_axf; csp = crypto_get_params(crp->crp_session); if (crp->crp_auth_key != NULL) { swcr_authprepare(axf, sw, crp->crp_auth_key, csp->csp_auth_klen); } bcopy(sw->sw_ictx, &ctx, axf->ctxsize); if (crp->crp_aad != NULL) err = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); else err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, axf->Update, &ctx); if (err) goto out; if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) err = crypto_apply_buf(&crp->crp_obuf, crp->crp_payload_output_start, crp->crp_payload_length, axf->Update, &ctx); else err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, axf->Update, &ctx); if (err) goto out; if (csp->csp_flags & CSP_F_ESN) axf->Update(&ctx, crp->crp_esn, 4); axf->Final(aalg, &ctx); if (sw->sw_octx != NULL) { bcopy(sw->sw_octx, &ctx, axf->ctxsize); axf->Update(&ctx, aalg, axf->hashsize); axf->Final(aalg, &ctx); } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { u_char uaalg[HASH_MAX_LEN]; crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg); if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0) err = EBADMSG; explicit_bzero(uaalg, sizeof(uaalg)); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg); } explicit_bzero(aalg, sizeof(aalg)); out: explicit_bzero(&ctx, sizeof(ctx)); return (err); } CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ static int swcr_gmac(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char tag[GMAC_DIGEST_LEN]; u_char iv[AES_BLOCK_LEN]; struct crypto_buffer_cursor cc; const u_char *inblk; union authctx ctx; struct swcr_auth *swa; const struct auth_hash *axf; uint32_t *blkp; size_t len; int blksz, error, ivlen, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = GMAC_BLOCK_LEN; KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", __func__)); /* Initialize the IV */ ivlen = AES_GCM_IV_LEN; crypto_read_iv(crp, iv); axf->Reinit(&ctx, iv, ivlen); crypto_cursor_init(&cc, &crp->crp_buf); crypto_cursor_advance(&cc, crp->crp_payload_start); for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) { inblk = crypto_cursor_segment(&cc, &len); if (len >= blksz) { len = rounddown(MIN(len, resid), blksz); crypto_cursor_advance(&cc, len); } else { len = blksz; crypto_cursor_copydata(&cc, len, blk); inblk = blk; } axf->Update(&ctx, inblk, len); } if (resid > 0) { memset(blk, 0, blksz); crypto_cursor_copydata(&cc, resid, blk); axf->Update(&ctx, blk, blksz); } /* length block */ memset(blk, 0, blksz); blkp = (uint32_t *)blk + 1; *blkp = htobe32(crp->crp_payload_length * 8); axf->Update(&ctx, blk, blksz); /* Finalize MAC */ axf->Final(tag, &ctx); error = 0; if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { u_char tag2[GMAC_DIGEST_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) error = EBADMSG; explicit_bzero(tag2, sizeof(tag2)); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } explicit_bzero(blkbuf, sizeof(blkbuf)); explicit_bzero(tag, sizeof(tag)); explicit_bzero(iv, sizeof(iv)); return (error); } static int swcr_gcm(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char tag[GMAC_DIGEST_LEN]; struct crypto_buffer_cursor cc_in, cc_out; const u_char *inblk; u_char *outblk; - union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; - const struct auth_hash *axf; const struct enc_xform *exf; uint32_t *blkp; size_t len; int blksz, error, ivlen, r, resid; swa = &ses->swcr_auth; - axf = swa->sw_axf; - - bcopy(swa->sw_ictx, &ctx, axf->ctxsize); - blksz = GMAC_BLOCK_LEN; - KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", - __func__)); - swe = &ses->swcr_encdec; exf = swe->sw_exf; - KASSERT(axf->blocksize == exf->native_blocksize, + blksz = GMAC_BLOCK_LEN; + KASSERT(blksz == exf->native_blocksize, ("%s: blocksize mismatch", __func__)); if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); ivlen = AES_GCM_IV_LEN; - /* Supply MAC with IV */ - axf->Reinit(&ctx, crp->crp_iv, ivlen); + /* Supply cipher with nonce. */ + if (crp->crp_cipher_key != NULL) + exf->setkey(swe->sw_kschedule, crp->crp_cipher_key, + crypto_get_params(crp->crp_session)->csp_cipher_klen); + exf->reinit(swe->sw_kschedule, crp->crp_iv, ivlen); /* Supply MAC with AAD */ if (crp->crp_aad != NULL) { len = rounddown(crp->crp_aad_length, blksz); if (len != 0) - axf->Update(&ctx, crp->crp_aad, len); + exf->update(swe->sw_kschedule, crp->crp_aad, len); if (crp->crp_aad_length != len) { memset(blk, 0, blksz); memcpy(blk, (char *)crp->crp_aad + len, crp->crp_aad_length - len); - axf->Update(&ctx, blk, blksz); + exf->update(swe->sw_kschedule, blk, blksz); } } else { crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_aad_start); for (resid = crp->crp_aad_length; resid >= blksz; resid -= len) { inblk = crypto_cursor_segment(&cc_in, &len); if (len >= blksz) { len = rounddown(MIN(len, resid), blksz); crypto_cursor_advance(&cc_in, len); } else { len = blksz; crypto_cursor_copydata(&cc_in, len, blk); inblk = blk; } - axf->Update(&ctx, inblk, len); + exf->update(swe->sw_kschedule, inblk, len); } if (resid > 0) { memset(blk, 0, blksz); crypto_cursor_copydata(&cc_in, resid, blk); - axf->Update(&ctx, blk, blksz); + exf->update(swe->sw_kschedule, blk, blksz); } } - if (crp->crp_cipher_key != NULL) - exf->setkey(swe->sw_kschedule, crp->crp_cipher_key, - crypto_get_params(crp->crp_session)->csp_cipher_klen); - exf->reinit(swe->sw_kschedule, crp->crp_iv, ivlen); - /* Do encryption with MAC */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { inblk = crypto_cursor_segment(&cc_in, &len); if (len < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else { crypto_cursor_advance(&cc_in, blksz); } if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { outblk = crypto_cursor_segment(&cc_out, &len); if (len < blksz) outblk = blk; exf->encrypt(swe->sw_kschedule, inblk, outblk); - axf->Update(&ctx, outblk, blksz); + exf->update(swe->sw_kschedule, outblk, blksz); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } else { - axf->Update(&ctx, inblk, blksz); + exf->update(swe->sw_kschedule, inblk, blksz); } } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } - axf->Update(&ctx, blk, resid); + exf->update(swe->sw_kschedule, blk, resid); } /* length block */ memset(blk, 0, blksz); blkp = (uint32_t *)blk + 1; *blkp = htobe32(crp->crp_aad_length * 8); blkp = (uint32_t *)blk + 3; *blkp = htobe32(crp->crp_payload_length * 8); - axf->Update(&ctx, blk, blksz); + exf->update(swe->sw_kschedule, blk, blksz); /* Finalize MAC */ - axf->Final(tag, &ctx); + exf->final(tag, swe->sw_kschedule); /* Validate tag */ error = 0; if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { u_char tag2[GMAC_DIGEST_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); explicit_bzero(tag2, sizeof(tag2)); if (r != 0) { error = EBADMSG; goto out; } /* tag matches, decrypt data */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); for (resid = crp->crp_payload_length; resid > blksz; resid -= blksz) { inblk = crypto_cursor_segment(&cc_in, &len); if (len < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else crypto_cursor_advance(&cc_in, blksz); outblk = crypto_cursor_segment(&cc_out, &len); if (len < blksz) outblk = blk; exf->decrypt(swe->sw_kschedule, inblk, outblk); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } out: explicit_bzero(blkbuf, sizeof(blkbuf)); explicit_bzero(tag, sizeof(tag)); return (error); } static void build_ccm_b0(const char *nonce, u_int nonce_length, u_int aad_length, u_int data_length, u_int tag_length, uint8_t *b0) { uint8_t *bp; uint8_t flags, L; KASSERT(nonce_length >= 7 && nonce_length <= 13, ("nonce_length must be between 7 and 13 bytes")); /* * Need to determine the L field value. This is the number of * bytes needed to specify the length of the message; the length * is whatever is left in the 16 bytes after specifying flags and * the nonce. */ L = 15 - nonce_length; flags = ((aad_length > 0) << 6) + (((tag_length - 2) / 2) << 3) + L - 1; /* * Now we need to set up the first block, which has flags, nonce, * and the message length. */ b0[0] = flags; memcpy(b0 + 1, nonce, nonce_length); bp = b0 + 1 + nonce_length; /* Need to copy L' [aka L-1] bytes of data_length */ for (uint8_t *dst = b0 + CCM_CBC_BLOCK_LEN - 1; dst >= bp; dst--) { *dst = data_length; data_length >>= 8; } } /* NB: OCF only supports AAD lengths < 2^32. */ static int build_ccm_aad_length(u_int aad_length, uint8_t *blk) { if (aad_length < ((1 << 16) - (1 << 8))) { be16enc(blk, aad_length); return (sizeof(uint16_t)); } else { blk[0] = 0xff; blk[1] = 0xfe; be32enc(blk + 2, aad_length); return (2 + sizeof(uint32_t)); } } static int swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp) { u_char iv[AES_BLOCK_LEN]; u_char blk[CCM_CBC_BLOCK_LEN]; u_char tag[AES_CBC_MAC_HASH_LEN]; union authctx ctx; const struct crypto_session_params *csp; struct swcr_auth *swa; const struct auth_hash *axf; int error, ivlen, len; csp = crypto_get_params(crp->crp_session); swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); /* Initialize the IV */ ivlen = csp->csp_ivlen; crypto_read_iv(crp, iv); /* Supply MAC with IV */ axf->Reinit(&ctx, crp->crp_iv, ivlen); /* Supply MAC with b0. */ build_ccm_b0(crp->crp_iv, ivlen, crp->crp_payload_length, 0, swa->sw_mlen, blk); axf->Update(&ctx, blk, CCM_CBC_BLOCK_LEN); len = build_ccm_aad_length(crp->crp_payload_length, blk); axf->Update(&ctx, blk, len); crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, axf->Update, &ctx); /* Finalize MAC */ axf->Final(tag, &ctx); error = 0; if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { u_char tag2[AES_CBC_MAC_HASH_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) error = EBADMSG; explicit_bzero(tag2, sizeof(tag)); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } explicit_bzero(tag, sizeof(tag)); explicit_bzero(blk, sizeof(blk)); explicit_bzero(iv, sizeof(iv)); return (error); } static int swcr_ccm(struct swcr_session *ses, struct cryptop *crp) { const struct crypto_session_params *csp; uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char tag[AES_CBC_MAC_HASH_LEN]; struct crypto_buffer_cursor cc_in, cc_out; const u_char *inblk; u_char *outblk; - union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; - const struct auth_hash *axf; const struct enc_xform *exf; size_t len; int blksz, error, ivlen, r, resid; csp = crypto_get_params(crp->crp_session); swa = &ses->swcr_auth; - axf = swa->sw_axf; - - bcopy(swa->sw_ictx, &ctx, axf->ctxsize); - blksz = AES_BLOCK_LEN; - KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", - __func__)); - swe = &ses->swcr_encdec; exf = swe->sw_exf; - KASSERT(axf->blocksize == exf->native_blocksize, + blksz = AES_BLOCK_LEN; + KASSERT(blksz == exf->native_blocksize, ("%s: blocksize mismatch", __func__)); if (crp->crp_payload_length > ccm_max_payload_length(csp)) return (EMSGSIZE); if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); ivlen = csp->csp_ivlen; - /* Supply MAC with IV */ - axf->Reinit(&ctx, crp->crp_iv, ivlen); + if (crp->crp_cipher_key != NULL) + exf->setkey(swe->sw_kschedule, crp->crp_cipher_key, + crypto_get_params(crp->crp_session)->csp_cipher_klen); + exf->reinit(swe->sw_kschedule, crp->crp_iv, ivlen); /* Supply MAC with b0. */ _Static_assert(sizeof(blkbuf) >= CCM_CBC_BLOCK_LEN, "blkbuf too small for b0"); build_ccm_b0(crp->crp_iv, ivlen, crp->crp_aad_length, crp->crp_payload_length, swa->sw_mlen, blk); - axf->Update(&ctx, blk, CCM_CBC_BLOCK_LEN); + exf->update(swe->sw_kschedule, blk, CCM_CBC_BLOCK_LEN); /* Supply MAC with AAD */ if (crp->crp_aad_length != 0) { len = build_ccm_aad_length(crp->crp_aad_length, blk); - axf->Update(&ctx, blk, len); + exf->update(swe->sw_kschedule, blk, len); if (crp->crp_aad != NULL) - axf->Update(&ctx, crp->crp_aad, + exf->update(swe->sw_kschedule, crp->crp_aad, crp->crp_aad_length); else crypto_apply(crp, crp->crp_aad_start, - crp->crp_aad_length, axf->Update, &ctx); + crp->crp_aad_length, exf->update, + swe->sw_kschedule); /* Pad the AAD (including length field) to a full block. */ len = (len + crp->crp_aad_length) % CCM_CBC_BLOCK_LEN; if (len != 0) { len = CCM_CBC_BLOCK_LEN - len; memset(blk, 0, CCM_CBC_BLOCK_LEN); - axf->Update(&ctx, blk, len); + exf->update(swe->sw_kschedule, blk, len); } } - if (crp->crp_cipher_key != NULL) - exf->setkey(swe->sw_kschedule, crp->crp_cipher_key, - crypto_get_params(crp->crp_session)->csp_cipher_klen); - exf->reinit(swe->sw_kschedule, crp->crp_iv, ivlen); - /* Do encryption/decryption with MAC */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { inblk = crypto_cursor_segment(&cc_in, &len); if (len < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else crypto_cursor_advance(&cc_in, blksz); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { outblk = crypto_cursor_segment(&cc_out, &len); if (len < blksz) outblk = blk; - axf->Update(&ctx, inblk, blksz); + exf->update(swe->sw_kschedule, inblk, blksz); exf->encrypt(swe->sw_kschedule, inblk, outblk); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } else { /* * One of the problems with CCM+CBC is that * the authentication is done on the * unencrypted data. As a result, we have to * decrypt the data twice: once to generate * the tag and a second time after the tag is * verified. */ exf->decrypt(swe->sw_kschedule, inblk, blk); - axf->Update(&ctx, blk, blksz); + exf->update(swe->sw_kschedule, blk, blksz); } } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { - axf->Update(&ctx, blk, resid); + exf->update(swe->sw_kschedule, blk, resid); exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } else { exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); - axf->Update(&ctx, blk, resid); + exf->update(swe->sw_kschedule, blk, resid); } } /* Finalize MAC */ - axf->Final(tag, &ctx); + exf->final(tag, swe->sw_kschedule); /* Validate tag */ error = 0; if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { u_char tag2[AES_CBC_MAC_HASH_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); explicit_bzero(tag2, sizeof(tag2)); if (r != 0) { error = EBADMSG; goto out; } /* tag matches, decrypt data */ exf->reinit(swe->sw_kschedule, crp->crp_iv, ivlen); crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); for (resid = crp->crp_payload_length; resid > blksz; resid -= blksz) { inblk = crypto_cursor_segment(&cc_in, &len); if (len < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else crypto_cursor_advance(&cc_in, blksz); outblk = crypto_cursor_segment(&cc_out, &len); if (len < blksz) outblk = blk; exf->decrypt(swe->sw_kschedule, inblk, outblk); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } out: explicit_bzero(blkbuf, sizeof(blkbuf)); explicit_bzero(tag, sizeof(tag)); return (error); } static int swcr_chacha20_poly1305(struct swcr_session *ses, struct cryptop *crp) { const struct crypto_session_params *csp; uint64_t blkbuf[howmany(CHACHA20_NATIVE_BLOCK_LEN, sizeof(uint64_t))]; u_char *blk = (u_char *)blkbuf; u_char tag[POLY1305_HASH_LEN]; struct crypto_buffer_cursor cc_in, cc_out; const u_char *inblk; u_char *outblk; uint64_t *blkp; - union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; - const struct auth_hash *axf; const struct enc_xform *exf; size_t len; int blksz, error, r, resid; swa = &ses->swcr_auth; - axf = swa->sw_axf; - swe = &ses->swcr_encdec; exf = swe->sw_exf; blksz = exf->native_blocksize; KASSERT(blksz <= sizeof(blkbuf), ("%s: blocksize mismatch", __func__)); if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); csp = crypto_get_params(crp->crp_session); - /* Generate Poly1305 key. */ if (crp->crp_cipher_key != NULL) - axf->Setkey(&ctx, crp->crp_cipher_key, csp->csp_cipher_klen); - else - axf->Setkey(&ctx, csp->csp_cipher_key, csp->csp_cipher_klen); - axf->Reinit(&ctx, crp->crp_iv, csp->csp_ivlen); + exf->setkey(swe->sw_kschedule, crp->crp_cipher_key, + csp->csp_cipher_klen); + exf->reinit(swe->sw_kschedule, crp->crp_iv, csp->csp_ivlen); /* Supply MAC with AAD */ if (crp->crp_aad != NULL) - axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); + exf->update(swe->sw_kschedule, crp->crp_aad, + crp->crp_aad_length); else crypto_apply(crp, crp->crp_aad_start, - crp->crp_aad_length, axf->Update, &ctx); + crp->crp_aad_length, exf->update, swe->sw_kschedule); if (crp->crp_aad_length % 16 != 0) { /* padding1 */ memset(blk, 0, 16); - axf->Update(&ctx, blk, 16 - crp->crp_aad_length % 16); + exf->update(swe->sw_kschedule, blk, + 16 - crp->crp_aad_length % 16); } - if (crp->crp_cipher_key != NULL) - exf->setkey(swe->sw_kschedule, crp->crp_cipher_key, - csp->csp_cipher_klen); - exf->reinit(swe->sw_kschedule, crp->crp_iv, csp->csp_ivlen); - /* Do encryption with MAC */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { inblk = crypto_cursor_segment(&cc_in, &len); if (len < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else crypto_cursor_advance(&cc_in, blksz); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { outblk = crypto_cursor_segment(&cc_out, &len); if (len < blksz) outblk = blk; exf->encrypt(swe->sw_kschedule, inblk, outblk); - axf->Update(&ctx, outblk, blksz); + exf->update(swe->sw_kschedule, outblk, blksz); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } else { - axf->Update(&ctx, inblk, blksz); + exf->update(swe->sw_kschedule, inblk, blksz); } } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } - axf->Update(&ctx, blk, resid); + exf->update(swe->sw_kschedule, blk, resid); if (resid % 16 != 0) { /* padding2 */ memset(blk, 0, 16); - axf->Update(&ctx, blk, 16 - resid % 16); + exf->update(swe->sw_kschedule, blk, 16 - resid % 16); } } /* lengths */ blkp = (uint64_t *)blk; blkp[0] = htole64(crp->crp_aad_length); blkp[1] = htole64(crp->crp_payload_length); - axf->Update(&ctx, blk, sizeof(uint64_t) * 2); + exf->update(swe->sw_kschedule, blk, sizeof(uint64_t) * 2); /* Finalize MAC */ - axf->Final(tag, &ctx); + exf->final(tag, swe->sw_kschedule); /* Validate tag */ error = 0; if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { u_char tag2[POLY1305_HASH_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); explicit_bzero(tag2, sizeof(tag2)); if (r != 0) { error = EBADMSG; goto out; } /* tag matches, decrypt data */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); for (resid = crp->crp_payload_length; resid > blksz; resid -= blksz) { inblk = crypto_cursor_segment(&cc_in, &len); if (len < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else crypto_cursor_advance(&cc_in, blksz); outblk = crypto_cursor_segment(&cc_out, &len); if (len < blksz) outblk = blk; exf->decrypt(swe->sw_kschedule, inblk, outblk); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } out: explicit_bzero(blkbuf, sizeof(blkbuf)); explicit_bzero(tag, sizeof(tag)); - explicit_bzero(&ctx, sizeof(ctx)); return (error); } /* * Apply a cipher and a digest to perform EtA. */ static int swcr_eta(struct swcr_session *ses, struct cryptop *crp) { int error; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { error = swcr_encdec(ses, crp); if (error == 0) error = swcr_authcompute(ses, crp); } else { error = swcr_authcompute(ses, crp); if (error == 0) error = swcr_encdec(ses, crp); } return (error); } /* * Apply a compression/decompression algorithm */ static int swcr_compdec(struct swcr_session *ses, struct cryptop *crp) { const struct comp_algo *cxf; uint8_t *data, *out; int adj; uint32_t result; cxf = ses->swcr_compdec.sw_cxf; /* We must handle the whole buffer of data in one time * then if there is not all the data in the mbuf, we must * copy in a buffer. */ data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT); if (data == NULL) return (EINVAL); crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, data); if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) result = cxf->compress(data, crp->crp_payload_length, &out); else result = cxf->decompress(data, crp->crp_payload_length, &out); free(data, M_CRYPTO_DATA); if (result == 0) return (EINVAL); crp->crp_olen = result; /* Check the compressed size when doing compression */ if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) { if (result >= crp->crp_payload_length) { /* Compression was useless, we lost time */ free(out, M_CRYPTO_DATA); return (0); } } /* Copy back the (de)compressed data. m_copyback is * extending the mbuf as necessary. */ crypto_copyback(crp, crp->crp_payload_start, result, out); if (result < crp->crp_payload_length) { switch (crp->crp_buf.cb_type) { case CRYPTO_BUF_MBUF: case CRYPTO_BUF_SINGLE_MBUF: adj = result - crp->crp_payload_length; m_adj(crp->crp_buf.cb_mbuf, adj); break; case CRYPTO_BUF_UIO: { struct uio *uio = crp->crp_buf.cb_uio; int ind; adj = crp->crp_payload_length - result; ind = uio->uio_iovcnt - 1; while (adj > 0 && ind >= 0) { if (adj < uio->uio_iov[ind].iov_len) { uio->uio_iov[ind].iov_len -= adj; break; } adj -= uio->uio_iov[ind].iov_len; uio->uio_iov[ind].iov_len = 0; ind--; uio->uio_iovcnt--; } } break; case CRYPTO_BUF_VMPAGE: adj = crp->crp_payload_length - result; crp->crp_buf.cb_vm_page_len -= adj; break; default: break; } } free(out, M_CRYPTO_DATA); return 0; } static int swcr_setup_cipher(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_encdec *swe; const struct enc_xform *txf; int error; swe = &ses->swcr_encdec; txf = crypto_cipher(csp); if (txf->ctxsize != 0) { swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swe->sw_kschedule == NULL) return (ENOMEM); } if (csp->csp_cipher_key != NULL) { error = txf->setkey(swe->sw_kschedule, csp->csp_cipher_key, csp->csp_cipher_klen); if (error) return (error); } swe->sw_exf = txf; return (0); } static int swcr_setup_auth(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; const struct auth_hash *axf; swa = &ses->swcr_auth; axf = crypto_auth_hash(csp); swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_octx == NULL) return (ENOBUFS); if (csp->csp_auth_key != NULL) { swcr_authprepare(axf, swa, csp->csp_auth_key, csp->csp_auth_klen); } if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA2_384: case CRYPTO_SHA2_512: axf->Init(swa->sw_ictx); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_AES_NIST_GMAC: axf->Init(swa->sw_ictx); axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_gmac; break; case CRYPTO_POLY1305: case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: /* * Blake2b and Blake2s support an optional key but do * not require one. */ if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); axf->Init(swa->sw_ictx); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_AES_CCM_CBC_MAC: axf->Init(swa->sw_ictx); axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_ccm_cbc_mac; break; } return (0); } static int -swcr_setup_gcm(struct swcr_session *ses, - const struct crypto_session_params *csp) -{ - struct swcr_auth *swa; - const struct auth_hash *axf; - - /* First, setup the auth side. */ - swa = &ses->swcr_auth; - switch (csp->csp_cipher_klen * 8) { - case 128: - axf = &auth_hash_nist_gmac_aes_128; - break; - case 192: - axf = &auth_hash_nist_gmac_aes_192; - break; - case 256: - axf = &auth_hash_nist_gmac_aes_256; - break; - default: - return (EINVAL); - } - swa->sw_axf = axf; - if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) - return (EINVAL); - if (csp->csp_auth_mlen == 0) - swa->sw_mlen = axf->hashsize; - else - swa->sw_mlen = csp->csp_auth_mlen; - swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); - if (swa->sw_ictx == NULL) - return (ENOBUFS); - axf->Init(swa->sw_ictx); - if (csp->csp_cipher_key != NULL) - axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, - csp->csp_cipher_klen); - - /* Second, setup the cipher side. */ - return (swcr_setup_cipher(ses, csp)); -} - -static int -swcr_setup_ccm(struct swcr_session *ses, +swcr_setup_aead(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; - const struct auth_hash *axf; - - /* First, setup the auth side. */ - swa = &ses->swcr_auth; - switch (csp->csp_cipher_klen * 8) { - case 128: - axf = &auth_hash_ccm_cbc_mac_128; - break; - case 192: - axf = &auth_hash_ccm_cbc_mac_192; - break; - case 256: - axf = &auth_hash_ccm_cbc_mac_256; - break; - default: - return (EINVAL); - } - swa->sw_axf = axf; - if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) - return (EINVAL); - if (csp->csp_auth_mlen == 0) - swa->sw_mlen = axf->hashsize; - else - swa->sw_mlen = csp->csp_auth_mlen; - swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); - if (swa->sw_ictx == NULL) - return (ENOBUFS); - axf->Init(swa->sw_ictx); - if (csp->csp_cipher_key != NULL) - axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, - csp->csp_cipher_klen); - - /* Second, setup the cipher side. */ - return (swcr_setup_cipher(ses, csp)); -} + int error; -static int -swcr_setup_chacha20_poly1305(struct swcr_session *ses, - const struct crypto_session_params *csp) -{ - struct swcr_auth *swa; - const struct auth_hash *axf; + error = swcr_setup_cipher(ses, csp); + if (error) + return (error); - /* First, setup the auth side. */ swa = &ses->swcr_auth; - axf = &auth_hash_chacha20_poly1305; - swa->sw_axf = axf; - if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) - return (EINVAL); if (csp->csp_auth_mlen == 0) - swa->sw_mlen = axf->hashsize; + swa->sw_mlen = ses->swcr_encdec.sw_exf->macsize; else swa->sw_mlen = csp->csp_auth_mlen; - - /* The auth state is regenerated for each nonce. */ - - /* Second, setup the cipher side. */ - return (swcr_setup_cipher(ses, csp)); + return (0); } static bool swcr_auth_supported(const struct crypto_session_params *csp) { const struct auth_hash *axf; axf = crypto_auth_hash(csp); if (axf == NULL) return (false); switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: break; case CRYPTO_AES_NIST_GMAC: switch (csp->csp_auth_klen * 8) { case 128: case 192: case 256: break; default: return (false); } if (csp->csp_auth_key == NULL) return (false); if (csp->csp_ivlen != AES_GCM_IV_LEN) return (false); break; case CRYPTO_POLY1305: if (csp->csp_auth_klen != POLY1305_KEY_LEN) return (false); break; case CRYPTO_AES_CCM_CBC_MAC: switch (csp->csp_auth_klen * 8) { case 128: case 192: case 256: break; default: return (false); } if (csp->csp_auth_key == NULL) return (false); break; } return (true); } static bool swcr_cipher_supported(const struct crypto_session_params *csp) { const struct enc_xform *txf; txf = crypto_cipher(csp); if (txf == NULL) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC && txf->ivsize != csp->csp_ivlen) return (false); return (true); } #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) static int swcr_probesession(device_t dev, const struct crypto_session_params *csp) { if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: switch (csp->csp_cipher_alg) { case CRYPTO_DEFLATE_COMP: break; default: return (EINVAL); } break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: case CRYPTO_CHACHA20_POLY1305: return (EINVAL); default: if (!swcr_cipher_supported(csp)) return (EINVAL); break; } break; case CSP_MODE_DIGEST: if (!swcr_auth_supported(csp)) return (EINVAL); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: switch (csp->csp_cipher_klen * 8) { case 128: case 192: case 256: break; default: return (EINVAL); } break; case CRYPTO_CHACHA20_POLY1305: break; default: return (EINVAL); } break; case CSP_MODE_ETA: /* AEAD algorithms cannot be used for EtA. */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: case CRYPTO_CHACHA20_POLY1305: return (EINVAL); } switch (csp->csp_auth_alg) { case CRYPTO_AES_NIST_GMAC: case CRYPTO_AES_CCM_CBC_MAC: return (EINVAL); } if (!swcr_cipher_supported(csp) || !swcr_auth_supported(csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_SOFTWARE); } /* * Generate a new software session. */ static int swcr_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct swcr_session *ses; const struct comp_algo *cxf; int error; ses = crypto_get_driver_session(cses); mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF); error = 0; switch (csp->csp_mode) { case CSP_MODE_COMPRESS: switch (csp->csp_cipher_alg) { case CRYPTO_DEFLATE_COMP: cxf = &comp_algo_deflate; break; #ifdef INVARIANTS default: panic("bad compression algo"); #endif } ses->swcr_compdec.sw_cxf = cxf; ses->swcr_process = swcr_compdec; break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_NULL_CBC: ses->swcr_process = swcr_null; break; #ifdef INVARIANTS case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: case CRYPTO_CHACHA20_POLY1305: panic("bad cipher algo"); #endif default: error = swcr_setup_cipher(ses, csp); if (error == 0) ses->swcr_process = swcr_encdec; } break; case CSP_MODE_DIGEST: error = swcr_setup_auth(ses, csp); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: - error = swcr_setup_gcm(ses, csp); + error = swcr_setup_aead(ses, csp); if (error == 0) ses->swcr_process = swcr_gcm; break; case CRYPTO_AES_CCM_16: - error = swcr_setup_ccm(ses, csp); + error = swcr_setup_aead(ses, csp); if (error == 0) ses->swcr_process = swcr_ccm; break; case CRYPTO_CHACHA20_POLY1305: - error = swcr_setup_chacha20_poly1305(ses, csp); + error = swcr_setup_aead(ses, csp); if (error == 0) ses->swcr_process = swcr_chacha20_poly1305; break; #ifdef INVARIANTS default: panic("bad aead algo"); #endif } break; case CSP_MODE_ETA: #ifdef INVARIANTS switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: case CRYPTO_CHACHA20_POLY1305: panic("bad eta cipher algo"); } switch (csp->csp_auth_alg) { case CRYPTO_AES_NIST_GMAC: case CRYPTO_AES_CCM_CBC_MAC: panic("bad eta auth algo"); } #endif error = swcr_setup_auth(ses, csp); if (error) break; if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) { /* Effectively degrade to digest mode. */ ses->swcr_process = swcr_authcompute; break; } error = swcr_setup_cipher(ses, csp); if (error == 0) ses->swcr_process = swcr_eta; break; default: error = EINVAL; } if (error) swcr_freesession(dev, cses); return (error); } static void swcr_freesession(device_t dev, crypto_session_t cses) { struct swcr_session *ses; ses = crypto_get_driver_session(cses); mtx_destroy(&ses->swcr_lock); zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA); zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA); zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA); } /* * Process a software request. */ static int swcr_process(device_t dev, struct cryptop *crp, int hint) { struct swcr_session *ses; ses = crypto_get_driver_session(crp->crp_session); mtx_lock(&ses->swcr_lock); crp->crp_etype = ses->swcr_process(ses, crp); mtx_unlock(&ses->swcr_lock); crypto_done(crp); return (0); } static void swcr_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "cryptosoft", -1) == NULL && BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) panic("cryptosoft: could not attach"); } static int swcr_probe(device_t dev) { device_set_desc(dev, "software crypto"); device_quiet(dev); return (BUS_PROBE_NOWILDCARD); } static int swcr_attach(device_t dev) { swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session), CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); if (swcr_id < 0) { device_printf(dev, "cannot initialize!"); return (ENXIO); } return (0); } static int swcr_detach(device_t dev) { crypto_unregister_all(swcr_id); return 0; } static device_method_t swcr_methods[] = { DEVMETHOD(device_identify, swcr_identify), DEVMETHOD(device_probe, swcr_probe), DEVMETHOD(device_attach, swcr_attach), DEVMETHOD(device_detach, swcr_detach), DEVMETHOD(cryptodev_probesession, swcr_probesession), DEVMETHOD(cryptodev_newsession, swcr_newsession), DEVMETHOD(cryptodev_freesession,swcr_freesession), DEVMETHOD(cryptodev_process, swcr_process), {0, 0}, }; static driver_t swcr_driver = { "cryptosoft", swcr_methods, 0, /* NB: no softc */ }; static devclass_t swcr_devclass; /* * NB: We explicitly reference the crypto module so we * get the necessary ordering when built as a loadable * module. This is required because we bundle the crypto * module code together with the cryptosoft driver (otherwise * normal module dependencies would handle things). */ extern int crypto_modevent(struct module *, int, void *); /* XXX where to attach */ DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); MODULE_VERSION(cryptosoft, 1); MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); diff --git a/sys/opencrypto/xform_aes_icm.c b/sys/opencrypto/xform_aes_icm.c index 09880ee426b8..c33839d8a931 100644 --- a/sys/opencrypto/xform_aes_icm.c +++ b/sys/opencrypto/xform_aes_icm.c @@ -1,199 +1,285 @@ /* $OpenBSD: xform.c,v 1.16 2001/08/28 12:20:43 ben Exp $ */ /*- * The authors of this code are John Ioannidis (ji@tla.org), * Angelos D. Keromytis (kermit@csd.uch.gr), * Niels Provos (provos@physnet.uni-hamburg.de) and * Damien Miller (djm@mindrot.org). * * This code was written by John Ioannidis for BSD/OS in Athens, Greece, * in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, * by Angelos D. Keromytis. * * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis * and Niels Provos. * * Additional features in 1999 by Angelos D. Keromytis. * * AES XTS implementation in 2008 by Damien Miller * * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, * Angelos D. Keromytis and Niels Provos. * * Copyright (C) 2001, Angelos D. Keromytis. * * Copyright (C) 2008, Damien Miller * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to * all. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); +#include +#include #include +struct aes_gcm_ctx { + struct aes_icm_ctx cipher; + struct aes_gmac_ctx gmac; +}; + +struct aes_ccm_ctx { + struct aes_icm_ctx cipher; + struct aes_cbc_mac_ctx cbc_mac; +}; + static int aes_icm_setkey(void *, const uint8_t *, int); static void aes_icm_crypt(void *, const uint8_t *, uint8_t *); static void aes_icm_crypt_last(void *, const uint8_t *, uint8_t *, size_t); static void aes_icm_reinit(void *, const uint8_t *, size_t); +static int aes_gcm_setkey(void *, const uint8_t *, int); static void aes_gcm_reinit(void *, const uint8_t *, size_t); +static int aes_gcm_update(void *, const void *, u_int); +static void aes_gcm_final(uint8_t *, void *); +static int aes_ccm_setkey(void *, const uint8_t *, int); static void aes_ccm_reinit(void *, const uint8_t *, size_t); +static int aes_ccm_update(void *, const void *, u_int); +static void aes_ccm_final(uint8_t *, void *); /* Encryption instances */ const struct enc_xform enc_xform_aes_icm = { .type = CRYPTO_AES_ICM, .name = "AES-ICM", .ctxsize = sizeof(struct aes_icm_ctx), .blocksize = 1, .native_blocksize = AES_BLOCK_LEN, .ivsize = AES_BLOCK_LEN, .minkey = AES_MIN_KEY, .maxkey = AES_MAX_KEY, .encrypt = aes_icm_crypt, .decrypt = aes_icm_crypt, .setkey = aes_icm_setkey, .reinit = aes_icm_reinit, .encrypt_last = aes_icm_crypt_last, .decrypt_last = aes_icm_crypt_last, }; const struct enc_xform enc_xform_aes_nist_gcm = { .type = CRYPTO_AES_NIST_GCM_16, .name = "AES-GCM", - .ctxsize = sizeof(struct aes_icm_ctx), + .ctxsize = sizeof(struct aes_gcm_ctx), .blocksize = 1, .native_blocksize = AES_BLOCK_LEN, .ivsize = AES_GCM_IV_LEN, .minkey = AES_MIN_KEY, .maxkey = AES_MAX_KEY, + .macsize = AES_GMAC_HASH_LEN, .encrypt = aes_icm_crypt, .decrypt = aes_icm_crypt, - .setkey = aes_icm_setkey, + .setkey = aes_gcm_setkey, .reinit = aes_gcm_reinit, .encrypt_last = aes_icm_crypt_last, .decrypt_last = aes_icm_crypt_last, + .update = aes_gcm_update, + .final = aes_gcm_final, }; const struct enc_xform enc_xform_ccm = { .type = CRYPTO_AES_CCM_16, .name = "AES-CCM", - .ctxsize = sizeof(struct aes_icm_ctx), + .ctxsize = sizeof(struct aes_ccm_ctx), .blocksize = 1, .native_blocksize = AES_BLOCK_LEN, .ivsize = AES_CCM_IV_LEN, .minkey = AES_MIN_KEY, .maxkey = AES_MAX_KEY, + .macsize = AES_CBC_MAC_HASH_LEN, .encrypt = aes_icm_crypt, .decrypt = aes_icm_crypt, - .setkey = aes_icm_setkey, + .setkey = aes_ccm_setkey, .reinit = aes_ccm_reinit, .encrypt_last = aes_icm_crypt_last, .decrypt_last = aes_icm_crypt_last, + .update = aes_ccm_update, + .final = aes_ccm_final, }; /* * Encryption wrapper routines. */ static void aes_icm_reinit(void *key, const uint8_t *iv, size_t ivlen) { struct aes_icm_ctx *ctx; ctx = key; KASSERT(ivlen <= sizeof(ctx->ac_block), ("%s: ivlen too large", __func__)); bcopy(iv, ctx->ac_block, ivlen); } static void -aes_gcm_reinit(void *key, const uint8_t *iv, size_t ivlen) +aes_gcm_reinit(void *vctx, const uint8_t *iv, size_t ivlen) { - struct aes_icm_ctx *ctx; + struct aes_gcm_ctx *ctx = vctx; KASSERT(ivlen == AES_GCM_IV_LEN, ("%s: invalid IV length", __func__)); - aes_icm_reinit(key, iv, ivlen); + aes_icm_reinit(&ctx->cipher, iv, ivlen); - ctx = key; /* GCM starts with 2 as counter 1 is used for final xor of tag. */ - bzero(&ctx->ac_block[AESICM_BLOCKSIZE - 4], 4); - ctx->ac_block[AESICM_BLOCKSIZE - 1] = 2; + bzero(&ctx->cipher.ac_block[AESICM_BLOCKSIZE - 4], 4); + ctx->cipher.ac_block[AESICM_BLOCKSIZE - 1] = 2; + + AES_GMAC_Reinit(&ctx->gmac, iv, ivlen); } static void -aes_ccm_reinit(void *key, const uint8_t *iv, size_t ivlen) +aes_ccm_reinit(void *vctx, const uint8_t *iv, size_t ivlen) { - struct aes_icm_ctx *ctx; + struct aes_ccm_ctx *ctx = vctx; KASSERT(ivlen >= 7 && ivlen <= 13, ("%s: invalid IV length", __func__)); - ctx = key; /* CCM has flags, then the IV, then the counter, which starts at 1 */ - bzero(ctx->ac_block, sizeof(ctx->ac_block)); - ctx->ac_block[0] = (15 - ivlen) - 1; - bcopy(iv, ctx->ac_block + 1, ivlen); - ctx->ac_block[AESICM_BLOCKSIZE - 1] = 1; + bzero(ctx->cipher.ac_block, sizeof(ctx->cipher.ac_block)); + ctx->cipher.ac_block[0] = (15 - ivlen) - 1; + bcopy(iv, ctx->cipher.ac_block + 1, ivlen); + ctx->cipher.ac_block[AESICM_BLOCKSIZE - 1] = 1; + + AES_CBC_MAC_Reinit(&ctx->cbc_mac, iv, ivlen); } static void aes_icm_crypt(void *key, const uint8_t *in, uint8_t *out) { struct aes_icm_ctx *ctx; int i; ctx = key; aes_icm_crypt_last(key, in, out, AESICM_BLOCKSIZE); /* increment counter */ for (i = AESICM_BLOCKSIZE - 1; i >= 0; i--) if (++ctx->ac_block[i]) /* continue on overflow */ break; } static void aes_icm_crypt_last(void *key, const uint8_t *in, uint8_t *out, size_t len) { struct aes_icm_ctx *ctx; uint8_t keystream[AESICM_BLOCKSIZE]; int i; ctx = key; rijndaelEncrypt(ctx->ac_ek, ctx->ac_nr, ctx->ac_block, keystream); for (i = 0; i < len; i++) out[i] = in[i] ^ keystream[i]; explicit_bzero(keystream, sizeof(keystream)); } static int aes_icm_setkey(void *sched, const uint8_t *key, int len) { struct aes_icm_ctx *ctx; if (len != 16 && len != 24 && len != 32) return (EINVAL); ctx = sched; ctx->ac_nr = rijndaelKeySetupEnc(ctx->ac_ek, key, len * 8); return (0); } + +static int +aes_gcm_setkey(void *vctx, const uint8_t *key, int len) +{ + struct aes_gcm_ctx *ctx = vctx; + int error; + + error = aes_icm_setkey(&ctx->cipher, key, len); + if (error != 0) + return (error); + + AES_GMAC_Setkey(&ctx->gmac, key, len); + return (0); +} + +static int +aes_ccm_setkey(void *vctx, const uint8_t *key, int len) +{ + struct aes_ccm_ctx *ctx = vctx; + int error; + + error = aes_icm_setkey(&ctx->cipher, key, len); + if (error != 0) + return (error); + + AES_CBC_MAC_Setkey(&ctx->cbc_mac, key, len); + return (0); +} + +static int +aes_gcm_update(void *vctx, const void *buf, u_int len) +{ + struct aes_gcm_ctx *ctx = vctx; + + return (AES_GMAC_Update(&ctx->gmac, buf, len)); +} + +static int +aes_ccm_update(void *vctx, const void *buf, u_int len) +{ + struct aes_ccm_ctx *ctx = vctx; + + return (AES_CBC_MAC_Update(&ctx->cbc_mac, buf, len)); +} + +static void +aes_gcm_final(uint8_t *tag, void *vctx) +{ + struct aes_gcm_ctx *ctx = vctx; + + AES_GMAC_Final(tag, &ctx->gmac); +} + +static void +aes_ccm_final(uint8_t *tag, void *vctx) +{ + struct aes_ccm_ctx *ctx = vctx; + + AES_CBC_MAC_Final(tag, &ctx->cbc_mac); +} diff --git a/sys/opencrypto/xform_auth.h b/sys/opencrypto/xform_auth.h index aa2f55564c5f..b5fd2efdb3b4 100644 --- a/sys/opencrypto/xform_auth.h +++ b/sys/opencrypto/xform_auth.h @@ -1,100 +1,99 @@ /* $FreeBSD$ */ /* $OpenBSD: xform.h,v 1.8 2001/08/28 12:20:43 ben Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000 Angelos D. Keromytis * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #ifndef _CRYPTO_XFORM_AUTH_H_ #define _CRYPTO_XFORM_AUTH_H_ #include #include #include #include #include #include #include #include #include #include #include /* XXX use a define common with other hash stuff ! */ #define AH_ALEN_MAX 64 /* max authenticator hash length */ /* Declarations */ struct auth_hash { int type; const char *name; uint16_t keysize; uint16_t hashsize; uint16_t ctxsize; uint16_t blocksize; void (*Init) (void *); void (*Setkey) (void *, const uint8_t *, u_int); void (*Reinit) (void *, const uint8_t *, u_int); int (*Update) (void *, const void *, u_int); void (*Final) (uint8_t *, void *); }; extern const struct auth_hash auth_hash_null; extern const struct auth_hash auth_hash_hmac_sha1; extern const struct auth_hash auth_hash_hmac_ripemd_160; extern const struct auth_hash auth_hash_hmac_sha2_224; extern const struct auth_hash auth_hash_hmac_sha2_256; extern const struct auth_hash auth_hash_hmac_sha2_384; extern const struct auth_hash auth_hash_hmac_sha2_512; extern const struct auth_hash auth_hash_sha1; extern const struct auth_hash auth_hash_sha2_224; extern const struct auth_hash auth_hash_sha2_256; extern const struct auth_hash auth_hash_sha2_384; extern const struct auth_hash auth_hash_sha2_512; extern const struct auth_hash auth_hash_nist_gmac_aes_128; extern const struct auth_hash auth_hash_nist_gmac_aes_192; extern const struct auth_hash auth_hash_nist_gmac_aes_256; extern const struct auth_hash auth_hash_blake2b; extern const struct auth_hash auth_hash_blake2s; extern const struct auth_hash auth_hash_poly1305; extern const struct auth_hash auth_hash_ccm_cbc_mac_128; extern const struct auth_hash auth_hash_ccm_cbc_mac_192; extern const struct auth_hash auth_hash_ccm_cbc_mac_256; -extern const struct auth_hash auth_hash_chacha20_poly1305; union authctx { SHA1_CTX sha1ctx; RMD160_CTX rmd160ctx; SHA224_CTX sha224ctx; SHA256_CTX sha256ctx; SHA384_CTX sha384ctx; SHA512_CTX sha512ctx; struct aes_gmac_ctx aes_gmac_ctx; struct aes_cbc_mac_ctx aes_cbc_mac_ctx; }; #endif /* _CRYPTO_XFORM_AUTH_H_ */ diff --git a/sys/opencrypto/xform_chacha20_poly1305.c b/sys/opencrypto/xform_chacha20_poly1305.c index d2ddf6a410c9..cb1fc4cea88d 100644 --- a/sys/opencrypto/xform_chacha20_poly1305.c +++ b/sys/opencrypto/xform_chacha20_poly1305.c @@ -1,185 +1,146 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2020 Netflix Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include -struct chacha20_poly1305_cipher_ctx { +struct chacha20_poly1305_ctx { + struct crypto_onetimeauth_poly1305_state auth; const void *key; uint32_t ic; bool ietf; char nonce[CHACHA20_POLY1305_IV_LEN]; }; static int chacha20_poly1305_setkey(void *vctx, const uint8_t *key, int len) { - struct chacha20_poly1305_cipher_ctx *ctx = vctx; + struct chacha20_poly1305_ctx *ctx = vctx; if (len != CHACHA20_POLY1305_KEY) return (EINVAL); ctx->key = key; return (0); } static void chacha20_poly1305_reinit(void *vctx, const uint8_t *iv, size_t ivlen) { - struct chacha20_poly1305_cipher_ctx *ctx = vctx; + struct chacha20_poly1305_ctx *ctx = vctx; + char block[CHACHA20_NATIVE_BLOCK_LEN]; KASSERT(ivlen == 8 || ivlen == sizeof(ctx->nonce), ("%s: invalid nonce length", __func__)); - /* Block 0 is used for the poly1305 key. */ memcpy(ctx->nonce, iv, ivlen); ctx->ietf = (ivlen == CHACHA20_POLY1305_IV_LEN); + + /* Block 0 is used for the poly1305 key. */ + if (ctx->ietf) + crypto_stream_chacha20_ietf(block, sizeof(block), iv, ctx->key); + else + crypto_stream_chacha20(block, sizeof(block), iv, ctx->key); + crypto_onetimeauth_poly1305_init(&ctx->auth, block); + explicit_bzero(block, sizeof(block)); + + /* Start with block 1 for ciphertext. */ ctx->ic = 1; } static void chacha20_poly1305_crypt(void *vctx, const uint8_t *in, uint8_t *out) { - struct chacha20_poly1305_cipher_ctx *ctx = vctx; + struct chacha20_poly1305_ctx *ctx = vctx; int error __diagused; if (ctx->ietf) error = crypto_stream_chacha20_ietf_xor_ic(out, in, CHACHA20_NATIVE_BLOCK_LEN, ctx->nonce, ctx->ic, ctx->key); else error = crypto_stream_chacha20_xor_ic(out, in, CHACHA20_NATIVE_BLOCK_LEN, ctx->nonce, ctx->ic, ctx->key); KASSERT(error == 0, ("%s failed: %d", __func__, error)); ctx->ic++; } static void chacha20_poly1305_crypt_last(void *vctx, const uint8_t *in, uint8_t *out, size_t len) { - struct chacha20_poly1305_cipher_ctx *ctx = vctx; + struct chacha20_poly1305_ctx *ctx = vctx; int error __diagused; if (ctx->ietf) error = crypto_stream_chacha20_ietf_xor_ic(out, in, len, ctx->nonce, ctx->ic, ctx->key); else error = crypto_stream_chacha20_xor_ic(out, in, len, ctx->nonce, ctx->ic, ctx->key); KASSERT(error == 0, ("%s failed: %d", __func__, error)); } +static int +chacha20_poly1305_update(void *vctx, const void *data, u_int len) +{ + struct chacha20_poly1305_ctx *ctx = vctx; + + crypto_onetimeauth_poly1305_update(&ctx->auth, data, len); + return (0); +} + +static void +chacha20_poly1305_final(uint8_t *digest, void *vctx) +{ + struct chacha20_poly1305_ctx *ctx = vctx; + + crypto_onetimeauth_poly1305_final(&ctx->auth, digest); +} + const struct enc_xform enc_xform_chacha20_poly1305 = { .type = CRYPTO_CHACHA20_POLY1305, .name = "ChaCha20-Poly1305", - .ctxsize = sizeof(struct chacha20_poly1305_cipher_ctx), + .ctxsize = sizeof(struct chacha20_poly1305_ctx), .blocksize = 1, .native_blocksize = CHACHA20_NATIVE_BLOCK_LEN, .ivsize = CHACHA20_POLY1305_IV_LEN, .minkey = CHACHA20_POLY1305_KEY, .maxkey = CHACHA20_POLY1305_KEY, + .macsize = POLY1305_HASH_LEN, .encrypt = chacha20_poly1305_crypt, .decrypt = chacha20_poly1305_crypt, .setkey = chacha20_poly1305_setkey, .reinit = chacha20_poly1305_reinit, .encrypt_last = chacha20_poly1305_crypt_last, .decrypt_last = chacha20_poly1305_crypt_last, -}; - -struct chacha20_poly1305_auth_ctx { - struct crypto_onetimeauth_poly1305_state state; - const void *key; -}; -CTASSERT(sizeof(union authctx) >= sizeof(struct chacha20_poly1305_auth_ctx)); - -static void -chacha20_poly1305_Init(void *vctx) -{ -} - -static void -chacha20_poly1305_Setkey(void *vctx, const uint8_t *key, u_int klen) -{ - struct chacha20_poly1305_auth_ctx *ctx = vctx; - - ctx->key = key; -} - -static void -chacha20_poly1305_Reinit(void *vctx, const uint8_t *nonce, u_int noncelen) -{ - struct chacha20_poly1305_auth_ctx *ctx = vctx; - char block[CHACHA20_NATIVE_BLOCK_LEN]; - - switch (noncelen) { - case 8: - crypto_stream_chacha20(block, sizeof(block), nonce, ctx->key); - break; - case CHACHA20_POLY1305_IV_LEN: - crypto_stream_chacha20_ietf(block, sizeof(block), nonce, ctx->key); - break; - default: - __assert_unreachable(); - } - crypto_onetimeauth_poly1305_init(&ctx->state, block); - explicit_bzero(block, sizeof(block)); -} - -static int -chacha20_poly1305_Update(void *vctx, const void *data, u_int len) -{ - struct chacha20_poly1305_auth_ctx *ctx = vctx; - - crypto_onetimeauth_poly1305_update(&ctx->state, data, len); - return (0); -} - -static void -chacha20_poly1305_Final(uint8_t *digest, void *vctx) -{ - struct chacha20_poly1305_auth_ctx *ctx = vctx; - - crypto_onetimeauth_poly1305_final(&ctx->state, digest); -} - -const struct auth_hash auth_hash_chacha20_poly1305 = { - .type = CRYPTO_POLY1305, - .name = "ChaCha20-Poly1305", - .keysize = POLY1305_KEY_LEN, - .hashsize = POLY1305_HASH_LEN, - .ctxsize = sizeof(struct chacha20_poly1305_auth_ctx), - .blocksize = crypto_onetimeauth_poly1305_BYTES, - .Init = chacha20_poly1305_Init, - .Setkey = chacha20_poly1305_Setkey, - .Reinit = chacha20_poly1305_Reinit, - .Update = chacha20_poly1305_Update, - .Final = chacha20_poly1305_Final, + .update = chacha20_poly1305_update, + .final = chacha20_poly1305_final, }; diff --git a/sys/opencrypto/xform_enc.h b/sys/opencrypto/xform_enc.h index baf423dd1079..1912e6900481 100644 --- a/sys/opencrypto/xform_enc.h +++ b/sys/opencrypto/xform_enc.h @@ -1,100 +1,107 @@ /* $FreeBSD$ */ /* $OpenBSD: xform.h,v 1.8 2001/08/28 12:20:43 ben Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000 Angelos D. Keromytis * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #ifndef _CRYPTO_XFORM_ENC_H_ #define _CRYPTO_XFORM_ENC_H_ #include #include #include #include #include #ifdef _STANDALONE #include #endif #define AESICM_BLOCKSIZE AES_BLOCK_LEN #define AES_XTS_BLOCKSIZE 16 #define AES_XTS_IVSIZE 8 #define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */ /* Declarations */ struct enc_xform { int type; const char *name; size_t ctxsize; uint16_t blocksize; /* Required input block size -- 1 for stream ciphers. */ uint16_t native_blocksize; /* Used for stream ciphers. */ uint16_t ivsize; uint16_t minkey, maxkey; + uint16_t macsize; /* For AEAD ciphers. */ /* * Encrypt/decrypt a single block. For stream ciphers this * encrypts/decrypts a single "native" block. */ void (*encrypt) (void *, const uint8_t *, uint8_t *); void (*decrypt) (void *, const uint8_t *, uint8_t *); int (*setkey) (void *, const uint8_t *, int len); void (*reinit) (void *, const uint8_t *, size_t); /* * For stream ciphers, encrypt/decrypt the final partial block * of 'len' bytes. */ void (*encrypt_last) (void *, const uint8_t *, uint8_t *, size_t len); void (*decrypt_last) (void *, const uint8_t *, uint8_t *, size_t len); + + /* + * For AEAD ciphers, update and generate MAC/tag. + */ + int (*update) (void *, const void *, u_int); + void (*final) (uint8_t *, void *); }; extern const struct enc_xform enc_xform_null; extern const struct enc_xform enc_xform_rijndael128; extern const struct enc_xform enc_xform_aes_icm; extern const struct enc_xform enc_xform_aes_nist_gcm; extern const struct enc_xform enc_xform_aes_nist_gmac; extern const struct enc_xform enc_xform_aes_xts; extern const struct enc_xform enc_xform_camellia; extern const struct enc_xform enc_xform_chacha20; extern const struct enc_xform enc_xform_chacha20_poly1305; extern const struct enc_xform enc_xform_ccm; struct aes_icm_ctx { uint32_t ac_ek[4*(RIJNDAEL_MAXNR + 1)]; /* ac_block is initialized to IV */ uint8_t ac_block[AESICM_BLOCKSIZE]; int ac_nr; }; struct aes_xts_ctx { rijndael_ctx key1; rijndael_ctx key2; uint8_t tweak[AES_XTS_BLOCKSIZE]; }; #endif /* _CRYPTO_XFORM_ENC_H_ */