Index: head/share/man/man7/crypto.7 =================================================================== --- head/share/man/man7/crypto.7 (revision 361389) +++ head/share/man/man7/crypto.7 (revision 361390) @@ -1,124 +1,124 @@ .\" Copyright (c) 2014 The FreeBSD Foundation .\" All rights reserved. .\" .\" This documentation was written by John-Mark Gurney under .\" the sponsorship of the FreeBSD Foundation and .\" Rubicon Communications, LLC (Netgate). .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd May 3, 2020 +.Dd May 22, 2020 .Dt CRYPTO 7 .Os .Sh NAME .Nm crypto .Nd OpenCrypto algorithms .Sh SYNOPSIS In the kernel configuration file: .Cd "device crypto" .Pp Or load the crypto.ko module. .Sh DESCRIPTION The following cryptographic algorithms that are part of the OpenCrypto framework have the following requirements. .Pp Cipher algorithms: .Bl -tag -width ".Dv CRYPTO_AES_CBC" .It Dv CRYPTO_AES_CBC .Bl -tag -width "Block size :" -compact -offset indent .It IV size : 16 .It Block size : 16 .It Key size : 16, 24 or 32 .El .Pp This algorithm implements Cipher-block chaining. .It Dv CRYPTO_AES_NIST_GCM_16 .Bl -tag -width "Block size :" -compact -offset indent .It IV size : 12 .It Block size : 1 .It Key size : 16, 24 or 32 .It Digest size : 16 .El .Pp This algorithm implements Galois/Counter Mode. This cipher uses AEAD .Pq Authenticated Encryption with Associated Data mode. .Pp The authentication tag will be read/written from/to the offset .Va crp_digest_start specified in the request. .Pp Note: You must provide an IV on every call. .It Dv CRYPTO_AES_ICM .Bl -tag -width "Block size :" -compact -offset indent .It IV size : 16 .It Block size : -1 (aesni), 16 (software) +1 .It Key size : 16, 24 or 32 .El .Pp This algorithm implements Integer Counter Mode. This is similar to what most people call counter mode, but instead of the counter being split into a nonce and a counter part, then entire nonce is used as the initial counter. This does mean that if a counter is required that rolls over at 32 bits, the transaction need to be split into two parts where the counter rolls over. The counter incremented as a 128-bit big endian number. .Pp Note: You must provide an IV on every call. .It Dv CRYPTO_AES_XTS .Bl -tag -width "Block size :" -compact -offset indent .It IV size : 8 .It Block size : 16 .It Key size : 32 or 64 .El .Pp This algorithm implements XEX Tweakable Block Cipher with Ciphertext Stealing as defined in NIST SP 800-38E. .Pp NOTE: The ciphertext stealing part is not implemented which is why this cipher is listed as having a block size of 16 instead of 1. .El .Sh HISTORY The .Nm manpage first appeared in .Fx 10.1 . .Sh SEE ALSO .Xr crypto 4 , .Xr crypto 9 .Sh BUGS Not all the implemented algorithms are listed. Index: head/sys/crypto/chacha20/chacha-sw.c =================================================================== --- head/sys/crypto/chacha20/chacha-sw.c (revision 361389) +++ head/sys/crypto/chacha20/chacha-sw.c (revision 361390) @@ -1,56 +1,57 @@ /* This file is in the public domain. */ #include __FBSDID("$FreeBSD$"); #include #include static int chacha20_xform_setkey(void *ctx, const uint8_t *key, int len) { if (len != CHACHA_MINKEYLEN && len != 32) return (EINVAL); chacha_keysetup(ctx, key, len * 8); return (0); } static void chacha20_xform_reinit(void *ctx, const u_int8_t *iv) { chacha_ivsetup(ctx, iv + 8, iv); } static void chacha20_xform_crypt(void *ctx, const uint8_t *in, uint8_t *out) { - chacha_encrypt_bytes(ctx, in, out, 1); + chacha_encrypt_bytes(ctx, in, out, CHACHA_BLOCKLEN); } static void -chacha20_xform_crypt_multi(void *ctx, const uint8_t *in, uint8_t *out, +chacha20_xform_crypt_last(void *ctx, const uint8_t *in, uint8_t *out, size_t len) { chacha_encrypt_bytes(ctx, in, out, len); } struct enc_xform enc_xform_chacha20 = { .type = CRYPTO_CHACHA20, .name = "chacha20", .ctxsize = sizeof(struct chacha_ctx), .blocksize = 1, + .native_blocksize = CHACHA_BLOCKLEN, .ivsize = CHACHA_NONCELEN + CHACHA_CTRLEN, .minkey = CHACHA_MINKEYLEN, .maxkey = 32, .encrypt = chacha20_xform_crypt, .decrypt = chacha20_xform_crypt, .setkey = chacha20_xform_setkey, .reinit = chacha20_xform_reinit, - .encrypt_multi = chacha20_xform_crypt_multi, - .decrypt_multi = chacha20_xform_crypt_multi, + .encrypt_last = chacha20_xform_crypt_last, + .decrypt_last = chacha20_xform_crypt_last, }; Index: head/sys/opencrypto/cryptosoft.c =================================================================== --- head/sys/opencrypto/cryptosoft.c (revision 361389) +++ head/sys/opencrypto/cryptosoft.c (revision 361390) @@ -1,1402 +1,1416 @@ /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" struct swcr_auth { void *sw_ictx; void *sw_octx; struct auth_hash *sw_axf; uint16_t sw_mlen; }; struct swcr_encdec { void *sw_kschedule; struct enc_xform *sw_exf; }; struct swcr_compdec { struct comp_algo *sw_cxf; }; struct swcr_session { struct mtx swcr_lock; int (*swcr_process)(struct swcr_session *, struct cryptop *); struct swcr_auth swcr_auth; struct swcr_encdec swcr_encdec; struct swcr_compdec swcr_compdec; }; static int32_t swcr_id; static void swcr_freesession(device_t dev, crypto_session_t cses); /* Used for CRYPTO_NULL_CBC. */ static int swcr_null(struct swcr_session *ses, struct cryptop *crp) { return (0); } /* * Apply a symmetric encryption/decryption algorithm. */ static int swcr_encdec(struct swcr_session *ses, struct cryptop *crp) { unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN]; unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; const struct crypto_session_params *csp; struct swcr_encdec *sw; struct enc_xform *exf; int i, j, k, blks, ind, count, ivlen; struct uio *uio, uiolcl; struct iovec iovlcl[4]; struct iovec *iov; int iovcnt, iovalloc; int error; bool encrypting; error = 0; sw = &ses->swcr_encdec; exf = sw->sw_exf; - blks = exf->blocksize; ivlen = exf->ivsize; - /* Check for non-padded data */ - if ((crp->crp_payload_length % blks) != 0) - return EINVAL; + if (exf->native_blocksize == 0) { + /* Check for non-padded data */ + if ((crp->crp_payload_length % exf->blocksize) != 0) + return (EINVAL); + blks = exf->blocksize; + } else + blks = exf->native_blocksize; + if (exf == &enc_xform_aes_icm && (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); crypto_read_iv(crp, iv); if (crp->crp_cipher_key != NULL) { csp = crypto_get_params(crp->crp_session); error = exf->setkey(sw->sw_kschedule, crp->crp_cipher_key, csp->csp_cipher_klen); if (error) return (error); } iov = iovlcl; iovcnt = nitems(iovlcl); iovalloc = 0; uio = &uiolcl; switch (crp->crp_buf_type) { case CRYPTO_BUF_MBUF: error = crypto_mbuftoiov(crp->crp_mbuf, &iov, &iovcnt, &iovalloc); if (error) return (error); uio->uio_iov = iov; uio->uio_iovcnt = iovcnt; break; case CRYPTO_BUF_UIO: uio = crp->crp_uio; break; case CRYPTO_BUF_CONTIG: iov[0].iov_base = crp->crp_buf; iov[0].iov_len = crp->crp_ilen; uio->uio_iov = iov; uio->uio_iovcnt = 1; break; } ivp = iv; if (exf->reinit) { /* * xforms that provide a reinit method perform all IV * handling themselves. */ exf->reinit(sw->sw_kschedule, iv); } count = crp->crp_payload_start; ind = cuio_getptr(uio, count, &k); if (ind == -1) { error = EINVAL; goto out; } i = crp->crp_payload_length; encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); - while (i > 0) { + while (i >= blks) { /* * If there's insufficient data at the end of * an iovec, we have to do some copying. */ if (uio->uio_iov[ind].iov_len < k + blks && uio->uio_iov[ind].iov_len != k) { cuio_copydata(uio, count, blks, blk); /* Actual encryption/decryption */ if (exf->reinit) { if (encrypting) { exf->encrypt(sw->sw_kschedule, blk, blk); } else { exf->decrypt(sw->sw_kschedule, blk, blk); } } else if (encrypting) { /* XOR with previous block */ for (j = 0; j < blks; j++) blk[j] ^= ivp[j]; exf->encrypt(sw->sw_kschedule, blk, blk); /* * Keep encrypted block for XOR'ing * with next block */ bcopy(blk, iv, blks); ivp = iv; } else { /* decrypt */ /* * Keep encrypted block for XOR'ing * with next block */ nivp = (ivp == iv) ? iv2 : iv; bcopy(blk, nivp, blks); exf->decrypt(sw->sw_kschedule, blk, blk); /* XOR with previous block */ for (j = 0; j < blks; j++) blk[j] ^= ivp[j]; ivp = nivp; } /* Copy back decrypted block */ cuio_copyback(uio, count, blks, blk); count += blks; /* Advance pointer */ ind = cuio_getptr(uio, count, &k); if (ind == -1) { error = EINVAL; goto out; } i -= blks; /* Could be done... */ if (i == 0) break; } - while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) { + while (uio->uio_iov[ind].iov_len >= k + blks && i >= blks) { uint8_t *idat; - size_t nb, rem; - nb = blks; - rem = MIN((size_t)i, - uio->uio_iov[ind].iov_len - (size_t)k); idat = (uint8_t *)uio->uio_iov[ind].iov_base + k; if (exf->reinit) { - if (encrypting && exf->encrypt_multi == NULL) + if (encrypting) exf->encrypt(sw->sw_kschedule, idat, idat); - else if (encrypting) { - nb = rounddown(rem, blks); - exf->encrypt_multi(sw->sw_kschedule, - idat, idat, nb); - } else if (exf->decrypt_multi == NULL) + else exf->decrypt(sw->sw_kschedule, idat, idat); - else { - nb = rounddown(rem, blks); - exf->decrypt_multi(sw->sw_kschedule, - idat, idat, nb); - } } else if (encrypting) { /* XOR with previous block/IV */ for (j = 0; j < blks; j++) idat[j] ^= ivp[j]; exf->encrypt(sw->sw_kschedule, idat, idat); ivp = idat; } else { /* decrypt */ /* * Keep encrypted block to be used * in next block's processing. */ nivp = (ivp == iv) ? iv2 : iv; bcopy(idat, nivp, blks); exf->decrypt(sw->sw_kschedule, idat, idat); /* XOR with previous block/IV */ for (j = 0; j < blks; j++) idat[j] ^= ivp[j]; ivp = nivp; } - count += nb; - k += nb; - i -= nb; + count += blks; + k += blks; + i -= blks; } /* * Advance to the next iov if the end of the current iov * is aligned with the end of a cipher block. * Note that the code is equivalent to calling: * ind = cuio_getptr(uio, count, &k); */ if (i > 0 && k == uio->uio_iov[ind].iov_len) { k = 0; ind++; if (ind >= uio->uio_iovcnt) { error = EINVAL; goto out; } } } + /* Handle trailing partial block for stream ciphers. */ + if (i > 0) { + KASSERT(exf->native_blocksize != 0, + ("%s: partial block of %d bytes for cipher %s", + __func__, i, exf->name)); + KASSERT(exf->reinit != NULL, + ("%s: partial block cipher %s without reinit hook", + __func__, exf->name)); + KASSERT(i < blks, ("%s: partial block too big", __func__)); + + cuio_copydata(uio, count, i, blk); + if (encrypting) { + exf->encrypt_last(sw->sw_kschedule, blk, blk, i); + } else { + exf->decrypt_last(sw->sw_kschedule, blk, blk, i); + } + cuio_copyback(uio, count, i, blk); + } + out: if (iovalloc) free(iov, M_CRYPTO_DATA); return (error); } static void swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw, const uint8_t *key, int klen) { switch (axf->type) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: hmac_init_ipad(axf, key, klen, sw->sw_ictx); hmac_init_opad(axf, key, klen, sw->sw_octx); break; case CRYPTO_POLY1305: case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: axf->Setkey(sw->sw_ictx, key, klen); axf->Init(sw->sw_ictx); break; default: panic("%s: algorithm %d doesn't use keys", __func__, axf->type); } } /* * Compute or verify hash. */ static int swcr_authcompute(struct swcr_session *ses, struct cryptop *crp) { u_char aalg[HASH_MAX_LEN]; u_char uaalg[HASH_MAX_LEN]; const struct crypto_session_params *csp; struct swcr_auth *sw; struct auth_hash *axf; union authctx ctx; int err; sw = &ses->swcr_auth; axf = sw->sw_axf; if (crp->crp_auth_key != NULL) { csp = crypto_get_params(crp->crp_session); swcr_authprepare(axf, sw, crp->crp_auth_key, csp->csp_auth_klen); } bcopy(sw->sw_ictx, &ctx, axf->ctxsize); err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, (int (*)(void *, void *, unsigned int))axf->Update, &ctx); if (err) return err; err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, (int (*)(void *, void *, unsigned int))axf->Update, &ctx); if (err) return err; switch (axf->type) { case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA2_384: case CRYPTO_SHA2_512: axf->Final(aalg, &ctx); break; case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_RIPEMD160_HMAC: if (sw->sw_octx == NULL) return EINVAL; axf->Final(aalg, &ctx); bcopy(sw->sw_octx, &ctx, axf->ctxsize); axf->Update(&ctx, aalg, axf->hashsize); axf->Final(aalg, &ctx); break; case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: case CRYPTO_NULL_HMAC: case CRYPTO_POLY1305: axf->Final(aalg, &ctx); break; } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg); if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0) return (EBADMSG); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg); } return (0); } CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ static int swcr_gmac(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; union authctx ctx; struct swcr_auth *swa; struct auth_hash *axf; uint32_t *blkp; int blksz, i, ivlen, len; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = axf->blocksize; /* Initialize the IV */ ivlen = AES_GCM_IV_LEN; crypto_read_iv(crp, iv); axf->Reinit(&ctx, iv, ivlen); for (i = 0; i < crp->crp_payload_length; i += blksz) { len = MIN(crp->crp_payload_length - i, blksz); crypto_copydata(crp, crp->crp_payload_start + i, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } /* length block */ bzero(blk, blksz); blkp = (uint32_t *)blk + 1; *blkp = htobe32(crp->crp_payload_length * 8); axf->Update(&ctx, blk, blksz); /* Finalize MAC */ axf->Final(aalg, &ctx); if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, uaalg); if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0) return (EBADMSG); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg); } return (0); } static int swcr_gcm(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; uint32_t *blkp; int blksz, i, ivlen, len, r; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = axf->blocksize; swe = &ses->swcr_encdec; exf = swe->sw_exf; + KASSERT(axf->blocksize == exf->native_blocksize, + ("%s: blocksize mismatch", __func__)); if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); /* Initialize the IV */ ivlen = AES_GCM_IV_LEN; bcopy(crp->crp_iv, iv, ivlen); /* Supply MAC with IV */ axf->Reinit(&ctx, iv, ivlen); /* Supply MAC with AAD */ for (i = 0; i < crp->crp_aad_length; i += blksz) { len = MIN(crp->crp_aad_length - i, blksz); crypto_copydata(crp, crp->crp_aad_start + i, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } exf->reinit(swe->sw_kschedule, iv); /* Do encryption with MAC */ for (i = 0; i < crp->crp_payload_length; i += len) { len = MIN(crp->crp_payload_length - i, blksz); if (len < blksz) bzero(blk, blksz); crypto_copydata(crp, crp->crp_payload_start + i, len, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { exf->encrypt(swe->sw_kschedule, blk, blk); axf->Update(&ctx, blk, len); crypto_copyback(crp, crp->crp_payload_start + i, len, blk); } else { axf->Update(&ctx, blk, len); } } /* length block */ bzero(blk, blksz); blkp = (uint32_t *)blk + 1; *blkp = htobe32(crp->crp_aad_length * 8); blkp = (uint32_t *)blk + 3; *blkp = htobe32(crp->crp_payload_length * 8); axf->Update(&ctx, blk, blksz); /* Finalize MAC */ axf->Final(aalg, &ctx); /* Validate tag */ if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, uaalg); r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen); if (r != 0) return (EBADMSG); /* tag matches, decrypt data */ for (i = 0; i < crp->crp_payload_length; i += blksz) { len = MIN(crp->crp_payload_length - i, blksz); if (len < blksz) bzero(blk, blksz); crypto_copydata(crp, crp->crp_payload_start + i, len, blk); exf->decrypt(swe->sw_kschedule, blk, blk); crypto_copyback(crp, crp->crp_payload_start + i, len, blk); } } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg); } return (0); } static int swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; union authctx ctx; struct swcr_auth *swa; struct auth_hash *axf; int blksz, i, ivlen, len; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = axf->blocksize; /* Initialize the IV */ ivlen = AES_CCM_IV_LEN; crypto_read_iv(crp, iv); /* * AES CCM-CBC-MAC needs to know the length of both the auth * data and payload data before doing the auth computation. */ ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length; ctx.aes_cbc_mac_ctx.cryptDataLength = 0; axf->Reinit(&ctx, iv, ivlen); for (i = 0; i < crp->crp_payload_length; i += blksz) { len = MIN(crp->crp_payload_length - i, blksz); crypto_copydata(crp, crp->crp_payload_start + i, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } /* Finalize MAC */ axf->Final(aalg, &ctx); if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, uaalg); if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0) return (EBADMSG); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg); } return (0); } static int swcr_ccm(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; int blksz, i, ivlen, len, r; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = axf->blocksize; swe = &ses->swcr_encdec; exf = swe->sw_exf; + KASSERT(axf->blocksize == exf->native_blocksize, + ("%s: blocksize mismatch", __func__)); if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); /* Initialize the IV */ ivlen = AES_CCM_IV_LEN; bcopy(crp->crp_iv, iv, ivlen); /* * AES CCM-CBC-MAC needs to know the length of both the auth * data and payload data before doing the auth computation. */ ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length; ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length; /* Supply MAC with IV */ axf->Reinit(&ctx, iv, ivlen); /* Supply MAC with AAD */ for (i = 0; i < crp->crp_aad_length; i += blksz) { len = MIN(crp->crp_aad_length - i, blksz); crypto_copydata(crp, crp->crp_aad_start + i, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } exf->reinit(swe->sw_kschedule, iv); /* Do encryption/decryption with MAC */ for (i = 0; i < crp->crp_payload_length; i += len) { len = MIN(crp->crp_payload_length - i, blksz); if (len < blksz) bzero(blk, blksz); crypto_copydata(crp, crp->crp_payload_start + i, len, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { axf->Update(&ctx, blk, len); exf->encrypt(swe->sw_kschedule, blk, blk); crypto_copyback(crp, crp->crp_payload_start + i, len, blk); } else { /* * One of the problems with CCM+CBC is that * the authentication is done on the * unecncrypted data. As a result, we have to * decrypt the data twice: once to generate * the tag and a second time after the tag is * verified. */ exf->decrypt(swe->sw_kschedule, blk, blk); axf->Update(&ctx, blk, len); } } /* Finalize MAC */ axf->Final(aalg, &ctx); /* Validate tag */ if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, uaalg); r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen); if (r != 0) return (EBADMSG); /* tag matches, decrypt data */ exf->reinit(swe->sw_kschedule, iv); for (i = 0; i < crp->crp_payload_length; i += blksz) { len = MIN(crp->crp_payload_length - i, blksz); if (len < blksz) bzero(blk, blksz); crypto_copydata(crp, crp->crp_payload_start + i, len, blk); exf->decrypt(swe->sw_kschedule, blk, blk); crypto_copyback(crp, crp->crp_payload_start + i, len, blk); } } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg); } return (0); } /* * Apply a cipher and a digest to perform EtA. */ static int swcr_eta(struct swcr_session *ses, struct cryptop *crp) { int error; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { error = swcr_encdec(ses, crp); if (error == 0) error = swcr_authcompute(ses, crp); } else { error = swcr_authcompute(ses, crp); if (error == 0) error = swcr_encdec(ses, crp); } return (error); } /* * Apply a compression/decompression algorithm */ static int swcr_compdec(struct swcr_session *ses, struct cryptop *crp) { u_int8_t *data, *out; struct comp_algo *cxf; int adj; u_int32_t result; cxf = ses->swcr_compdec.sw_cxf; /* We must handle the whole buffer of data in one time * then if there is not all the data in the mbuf, we must * copy in a buffer. */ data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT); if (data == NULL) return (EINVAL); crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, data); if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) result = cxf->compress(data, crp->crp_payload_length, &out); else result = cxf->decompress(data, crp->crp_payload_length, &out); free(data, M_CRYPTO_DATA); if (result == 0) return (EINVAL); crp->crp_olen = result; /* Check the compressed size when doing compression */ if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) { if (result >= crp->crp_payload_length) { /* Compression was useless, we lost time */ free(out, M_CRYPTO_DATA); return (0); } } /* Copy back the (de)compressed data. m_copyback is * extending the mbuf as necessary. */ crypto_copyback(crp, crp->crp_payload_start, result, out); if (result < crp->crp_payload_length) { switch (crp->crp_buf_type) { case CRYPTO_BUF_MBUF: adj = result - crp->crp_payload_length; m_adj(crp->crp_mbuf, adj); break; case CRYPTO_BUF_UIO: { struct uio *uio = crp->crp_uio; int ind; adj = crp->crp_payload_length - result; ind = uio->uio_iovcnt - 1; while (adj > 0 && ind >= 0) { if (adj < uio->uio_iov[ind].iov_len) { uio->uio_iov[ind].iov_len -= adj; break; } adj -= uio->uio_iov[ind].iov_len; uio->uio_iov[ind].iov_len = 0; ind--; uio->uio_iovcnt--; } } break; } } free(out, M_CRYPTO_DATA); return 0; } static int swcr_setup_cipher(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_encdec *swe; struct enc_xform *txf; int error; swe = &ses->swcr_encdec; txf = crypto_cipher(csp); MPASS(txf->ivsize == csp->csp_ivlen); if (txf->ctxsize != 0) { swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swe->sw_kschedule == NULL) return (ENOMEM); } if (csp->csp_cipher_key != NULL) { error = txf->setkey(swe->sw_kschedule, csp->csp_cipher_key, csp->csp_cipher_klen); if (error) return (error); } swe->sw_exf = txf; return (0); } static int swcr_setup_auth(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; struct auth_hash *axf; swa = &ses->swcr_auth; axf = crypto_auth_hash(csp); swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_octx == NULL) return (ENOBUFS); if (csp->csp_auth_key != NULL) { swcr_authprepare(axf, swa, csp->csp_auth_key, csp->csp_auth_klen); } if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA2_384: case CRYPTO_SHA2_512: axf->Init(swa->sw_ictx); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_AES_NIST_GMAC: axf->Init(swa->sw_ictx); axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_gmac; break; case CRYPTO_POLY1305: case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: /* * Blake2b and Blake2s support an optional key but do * not require one. */ if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); axf->Init(swa->sw_ictx); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_AES_CCM_CBC_MAC: axf->Init(swa->sw_ictx); axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_ccm_cbc_mac; break; } return (0); } static int swcr_setup_gcm(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; struct auth_hash *axf; if (csp->csp_ivlen != AES_GCM_IV_LEN) return (EINVAL); /* First, setup the auth side. */ swa = &ses->swcr_auth; switch (csp->csp_cipher_klen * 8) { case 128: axf = &auth_hash_nist_gmac_aes_128; break; case 192: axf = &auth_hash_nist_gmac_aes_192; break; case 256: axf = &auth_hash_nist_gmac_aes_256; break; default: return (EINVAL); } swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); axf->Init(swa->sw_ictx); if (csp->csp_cipher_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, csp->csp_cipher_klen); /* Second, setup the cipher side. */ return (swcr_setup_cipher(ses, csp)); } static int swcr_setup_ccm(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; struct auth_hash *axf; if (csp->csp_ivlen != AES_CCM_IV_LEN) return (EINVAL); /* First, setup the auth side. */ swa = &ses->swcr_auth; switch (csp->csp_cipher_klen * 8) { case 128: axf = &auth_hash_ccm_cbc_mac_128; break; case 192: axf = &auth_hash_ccm_cbc_mac_192; break; case 256: axf = &auth_hash_ccm_cbc_mac_256; break; default: return (EINVAL); } swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); axf->Init(swa->sw_ictx); if (csp->csp_cipher_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, csp->csp_cipher_klen); /* Second, setup the cipher side. */ return (swcr_setup_cipher(ses, csp)); } static bool swcr_auth_supported(const struct crypto_session_params *csp) { struct auth_hash *axf; axf = crypto_auth_hash(csp); if (axf == NULL) return (false); switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: break; case CRYPTO_AES_NIST_GMAC: switch (csp->csp_auth_klen * 8) { case 128: case 192: case 256: break; default: return (false); } if (csp->csp_auth_key == NULL) return (false); if (csp->csp_ivlen != AES_GCM_IV_LEN) return (false); break; case CRYPTO_POLY1305: if (csp->csp_auth_klen != POLY1305_KEY_LEN) return (false); break; case CRYPTO_AES_CCM_CBC_MAC: switch (csp->csp_auth_klen * 8) { case 128: case 192: case 256: break; default: return (false); } if (csp->csp_auth_key == NULL) return (false); if (csp->csp_ivlen != AES_CCM_IV_LEN) return (false); break; } return (true); } static bool swcr_cipher_supported(const struct crypto_session_params *csp) { struct enc_xform *txf; txf = crypto_cipher(csp); if (txf == NULL) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC && txf->ivsize != csp->csp_ivlen) return (false); return (true); } static int swcr_probesession(device_t dev, const struct crypto_session_params *csp) { if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: switch (csp->csp_cipher_alg) { case CRYPTO_DEFLATE_COMP: break; default: return (EINVAL); } break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: return (EINVAL); default: if (!swcr_cipher_supported(csp)) return (EINVAL); break; } break; case CSP_MODE_DIGEST: if (!swcr_auth_supported(csp)) return (EINVAL); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: break; default: return (EINVAL); } break; case CSP_MODE_ETA: /* AEAD algorithms cannot be used for EtA. */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: return (EINVAL); } switch (csp->csp_auth_alg) { case CRYPTO_AES_NIST_GMAC: case CRYPTO_AES_CCM_CBC_MAC: return (EINVAL); } if (!swcr_cipher_supported(csp) || !swcr_auth_supported(csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_SOFTWARE); } /* * Generate a new software session. */ static int swcr_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct swcr_session *ses; struct swcr_encdec *swe; struct swcr_auth *swa; struct comp_algo *cxf; int error; ses = crypto_get_driver_session(cses); mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF); error = 0; swe = &ses->swcr_encdec; swa = &ses->swcr_auth; switch (csp->csp_mode) { case CSP_MODE_COMPRESS: switch (csp->csp_cipher_alg) { case CRYPTO_DEFLATE_COMP: cxf = &comp_algo_deflate; break; #ifdef INVARIANTS default: panic("bad compression algo"); #endif } ses->swcr_compdec.sw_cxf = cxf; ses->swcr_process = swcr_compdec; break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_NULL_CBC: ses->swcr_process = swcr_null; break; #ifdef INVARIANTS case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: panic("bad cipher algo"); #endif default: error = swcr_setup_cipher(ses, csp); if (error == 0) ses->swcr_process = swcr_encdec; } break; case CSP_MODE_DIGEST: error = swcr_setup_auth(ses, csp); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: error = swcr_setup_gcm(ses, csp); if (error == 0) ses->swcr_process = swcr_gcm; break; case CRYPTO_AES_CCM_16: error = swcr_setup_ccm(ses, csp); if (error == 0) ses->swcr_process = swcr_ccm; break; #ifdef INVARIANTS default: panic("bad aead algo"); #endif } break; case CSP_MODE_ETA: #ifdef INVARIANTS switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: panic("bad eta cipher algo"); } switch (csp->csp_auth_alg) { case CRYPTO_AES_NIST_GMAC: case CRYPTO_AES_CCM_CBC_MAC: panic("bad eta auth algo"); } #endif error = swcr_setup_auth(ses, csp); if (error) break; if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) { /* Effectively degrade to digest mode. */ ses->swcr_process = swcr_authcompute; break; } error = swcr_setup_cipher(ses, csp); if (error == 0) ses->swcr_process = swcr_eta; break; default: error = EINVAL; } if (error) swcr_freesession(dev, cses); return (error); } static void swcr_freesession(device_t dev, crypto_session_t cses) { struct swcr_session *ses; struct swcr_auth *swa; struct auth_hash *axf; ses = crypto_get_driver_session(cses); mtx_destroy(&ses->swcr_lock); zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA); axf = ses->swcr_auth.sw_axf; if (axf != NULL) { swa = &ses->swcr_auth; if (swa->sw_ictx != NULL) { explicit_bzero(swa->sw_ictx, axf->ctxsize); free(swa->sw_ictx, M_CRYPTO_DATA); } if (swa->sw_octx != NULL) { explicit_bzero(swa->sw_octx, axf->ctxsize); free(swa->sw_octx, M_CRYPTO_DATA); } } } /* * Process a software request. */ static int swcr_process(device_t dev, struct cryptop *crp, int hint) { struct swcr_session *ses; ses = crypto_get_driver_session(crp->crp_session); mtx_lock(&ses->swcr_lock); crp->crp_etype = ses->swcr_process(ses, crp); mtx_unlock(&ses->swcr_lock); crypto_done(crp); return (0); } static void swcr_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "cryptosoft", -1) == NULL && BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) panic("cryptosoft: could not attach"); } static int swcr_probe(device_t dev) { device_set_desc(dev, "software crypto"); return (BUS_PROBE_NOWILDCARD); } static int swcr_attach(device_t dev) { swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session), CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); if (swcr_id < 0) { device_printf(dev, "cannot initialize!"); return (ENXIO); } return (0); } static int swcr_detach(device_t dev) { crypto_unregister_all(swcr_id); return 0; } static device_method_t swcr_methods[] = { DEVMETHOD(device_identify, swcr_identify), DEVMETHOD(device_probe, swcr_probe), DEVMETHOD(device_attach, swcr_attach), DEVMETHOD(device_detach, swcr_detach), DEVMETHOD(cryptodev_probesession, swcr_probesession), DEVMETHOD(cryptodev_newsession, swcr_newsession), DEVMETHOD(cryptodev_freesession,swcr_freesession), DEVMETHOD(cryptodev_process, swcr_process), {0, 0}, }; static driver_t swcr_driver = { "cryptosoft", swcr_methods, 0, /* NB: no softc */ }; static devclass_t swcr_devclass; /* * NB: We explicitly reference the crypto module so we * get the necessary ordering when built as a loadable * module. This is required because we bundle the crypto * module code together with the cryptosoft driver (otherwise * normal module dependencies would handle things). */ extern int crypto_modevent(struct module *, int, void *); /* XXX where to attach */ DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); MODULE_VERSION(cryptosoft, 1); MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); Index: head/sys/opencrypto/xform_aes_icm.c =================================================================== --- head/sys/opencrypto/xform_aes_icm.c (revision 361389) +++ head/sys/opencrypto/xform_aes_icm.c (revision 361390) @@ -1,173 +1,194 @@ /* $OpenBSD: xform.c,v 1.16 2001/08/28 12:20:43 ben Exp $ */ /*- * The authors of this code are John Ioannidis (ji@tla.org), * Angelos D. Keromytis (kermit@csd.uch.gr), * Niels Provos (provos@physnet.uni-hamburg.de) and * Damien Miller (djm@mindrot.org). * * This code was written by John Ioannidis for BSD/OS in Athens, Greece, * in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, * by Angelos D. Keromytis. * * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis * and Niels Provos. * * Additional features in 1999 by Angelos D. Keromytis. * * AES XTS implementation in 2008 by Damien Miller * * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, * Angelos D. Keromytis and Niels Provos. * * Copyright (C) 2001, Angelos D. Keromytis. * * Copyright (C) 2008, Damien Miller * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to * all. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #include static int aes_icm_setkey(void *, const uint8_t *, int); static void aes_icm_crypt(void *, const uint8_t *, uint8_t *); +static void aes_icm_crypt_last(void *, const uint8_t *, uint8_t *, size_t); static void aes_icm_reinit(void *, const uint8_t *); static void aes_gcm_reinit(void *, const uint8_t *); static void aes_ccm_reinit(void *, const uint8_t *); /* Encryption instances */ struct enc_xform enc_xform_aes_icm = { .type = CRYPTO_AES_ICM, .name = "AES-ICM", .ctxsize = sizeof(struct aes_icm_ctx), - .blocksize = AES_BLOCK_LEN, + .blocksize = 1, + .native_blocksize = AES_BLOCK_LEN, .ivsize = AES_BLOCK_LEN, .minkey = AES_MIN_KEY, .maxkey = AES_MAX_KEY, .encrypt = aes_icm_crypt, .decrypt = aes_icm_crypt, .setkey = aes_icm_setkey, .reinit = aes_icm_reinit, + .encrypt_last = aes_icm_crypt_last, + .decrypt_last = aes_icm_crypt_last, }; struct enc_xform enc_xform_aes_nist_gcm = { .type = CRYPTO_AES_NIST_GCM_16, .name = "AES-GCM", .ctxsize = sizeof(struct aes_icm_ctx), - .blocksize = AES_ICM_BLOCK_LEN, + .blocksize = 1, + .native_blocksize = AES_BLOCK_LEN, .ivsize = AES_GCM_IV_LEN, .minkey = AES_MIN_KEY, .maxkey = AES_MAX_KEY, .encrypt = aes_icm_crypt, .decrypt = aes_icm_crypt, .setkey = aes_icm_setkey, .reinit = aes_gcm_reinit, + .encrypt_last = aes_icm_crypt_last, + .decrypt_last = aes_icm_crypt_last, }; struct enc_xform enc_xform_ccm = { .type = CRYPTO_AES_CCM_16, .name = "AES-CCM", .ctxsize = sizeof(struct aes_icm_ctx), - .blocksize = AES_ICM_BLOCK_LEN, .ivsize = AES_CCM_IV_LEN, + .blocksize = 1, + .native_blocksize = AES_BLOCK_LEN, + .ivsize = AES_CCM_IV_LEN, .minkey = AES_MIN_KEY, .maxkey = AES_MAX_KEY, .encrypt = aes_icm_crypt, .decrypt = aes_icm_crypt, .setkey = aes_icm_setkey, .reinit = aes_ccm_reinit, + .encrypt_last = aes_icm_crypt_last, + .decrypt_last = aes_icm_crypt_last, }; /* * Encryption wrapper routines. */ static void aes_icm_reinit(void *key, const uint8_t *iv) { struct aes_icm_ctx *ctx; ctx = key; bcopy(iv, ctx->ac_block, AESICM_BLOCKSIZE); } static void aes_gcm_reinit(void *key, const uint8_t *iv) { struct aes_icm_ctx *ctx; aes_icm_reinit(key, iv); ctx = key; /* GCM starts with 2 as counter 1 is used for final xor of tag. */ bzero(&ctx->ac_block[AESICM_BLOCKSIZE - 4], 4); ctx->ac_block[AESICM_BLOCKSIZE - 1] = 2; } static void aes_ccm_reinit(void *key, const uint8_t *iv) { struct aes_icm_ctx *ctx; ctx = key; /* CCM has flags, then the IV, then the counter, which starts at 1 */ bzero(ctx->ac_block, sizeof(ctx->ac_block)); /* 3 bytes for length field; this gives a nonce of 12 bytes */ ctx->ac_block[0] = (15 - AES_CCM_IV_LEN) - 1; bcopy(iv, ctx->ac_block+1, AES_CCM_IV_LEN); ctx->ac_block[AESICM_BLOCKSIZE - 1] = 1; } static void aes_icm_crypt(void *key, const uint8_t *in, uint8_t *out) { struct aes_icm_ctx *ctx; - uint8_t keystream[AESICM_BLOCKSIZE]; int i; ctx = key; - rijndaelEncrypt(ctx->ac_ek, ctx->ac_nr, ctx->ac_block, keystream); - for (i = 0; i < AESICM_BLOCKSIZE; i++) - out[i] = in[i] ^ keystream[i]; - explicit_bzero(keystream, sizeof(keystream)); + aes_icm_crypt_last(key, in, out, AESICM_BLOCKSIZE); /* increment counter */ for (i = AESICM_BLOCKSIZE - 1; i >= 0; i--) if (++ctx->ac_block[i]) /* continue on overflow */ break; +} + +static void +aes_icm_crypt_last(void *key, const uint8_t *in, uint8_t *out, size_t len) +{ + struct aes_icm_ctx *ctx; + uint8_t keystream[AESICM_BLOCKSIZE]; + int i; + + ctx = key; + rijndaelEncrypt(ctx->ac_ek, ctx->ac_nr, ctx->ac_block, keystream); + for (i = 0; i < len; i++) + out[i] = in[i] ^ keystream[i]; + explicit_bzero(keystream, sizeof(keystream)); } static int aes_icm_setkey(void *sched, const uint8_t *key, int len) { struct aes_icm_ctx *ctx; if (len != 16 && len != 24 && len != 32) return (EINVAL); ctx = sched; ctx->ac_nr = rijndaelKeySetupEnc(ctx->ac_ek, key, len * 8); return (0); } Index: head/sys/opencrypto/xform_enc.h =================================================================== --- head/sys/opencrypto/xform_enc.h (revision 361389) +++ head/sys/opencrypto/xform_enc.h (revision 361390) @@ -1,94 +1,99 @@ /* $FreeBSD$ */ /* $OpenBSD: xform.h,v 1.8 2001/08/28 12:20:43 ben Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000 Angelos D. Keromytis * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #ifndef _CRYPTO_XFORM_ENC_H_ #define _CRYPTO_XFORM_ENC_H_ #include #include #include #include #include #ifdef _STANDALONE #include #endif #define AESICM_BLOCKSIZE AES_BLOCK_LEN #define AES_XTS_BLOCKSIZE 16 #define AES_XTS_IVSIZE 8 #define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */ /* Declarations */ struct enc_xform { int type; char *name; size_t ctxsize; u_int16_t blocksize; /* Required input block size -- 1 for stream ciphers. */ + uint16_t native_blocksize; /* Used for stream ciphers. */ u_int16_t ivsize; u_int16_t minkey, maxkey; + + /* + * Encrypt/decrypt a single block. For stream ciphers this + * encrypts/decrypts a single "native" block. + */ void (*encrypt) (void *, const uint8_t *, uint8_t *); void (*decrypt) (void *, const uint8_t *, uint8_t *); int (*setkey) (void *, const uint8_t *, int len); void (*reinit) (void *, const u_int8_t *); + /* - * Encrypt/decrypt 1+ blocks of input -- total size is 'len' bytes. - * Len is guaranteed to be a multiple of the defined 'blocksize'. - * Optional interface -- most useful for stream ciphers with a small - * blocksize (1). + * For stream ciphers, encrypt/decrypt the final partial block + * of 'len' bytes. */ - void (*encrypt_multi) (void *, const uint8_t *, uint8_t *, size_t len); - void (*decrypt_multi) (void *, const uint8_t *, uint8_t *, size_t len); + void (*encrypt_last) (void *, const uint8_t *, uint8_t *, size_t len); + void (*decrypt_last) (void *, const uint8_t *, uint8_t *, size_t len); }; extern struct enc_xform enc_xform_null; extern struct enc_xform enc_xform_rijndael128; extern struct enc_xform enc_xform_aes_icm; extern struct enc_xform enc_xform_aes_nist_gcm; extern struct enc_xform enc_xform_aes_nist_gmac; extern struct enc_xform enc_xform_aes_xts; extern struct enc_xform enc_xform_camellia; extern struct enc_xform enc_xform_chacha20; extern struct enc_xform enc_xform_ccm; struct aes_icm_ctx { u_int32_t ac_ek[4*(RIJNDAEL_MAXNR + 1)]; /* ac_block is initialized to IV */ u_int8_t ac_block[AESICM_BLOCKSIZE]; int ac_nr; }; struct aes_xts_ctx { rijndael_ctx key1; rijndael_ctx key2; u_int8_t tweak[AES_XTS_BLOCKSIZE]; }; #endif /* _CRYPTO_XFORM_ENC_H_ */