diff --git a/share/man/man7/crypto.7 b/share/man/man7/crypto.7 index d75daa62adcb..c7af22af4deb 100644 --- a/share/man/man7/crypto.7 +++ b/share/man/man7/crypto.7 @@ -1,180 +1,180 @@ .\" Copyright (c) 2014-2021 The FreeBSD Foundation .\" All rights reserved. .\" .\" Portions of this documentation were written by John-Mark Gurney .\" under the sponsorship of the FreeBSD Foundation and .\" Rubicon Communications, LLC (Netgate). .\" .\" Portions of this documentation were written by Ararat River .\" Consulting, LLC under sponsorship of the FreeBSD Foundation. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" .Dd October 6, 2021 .Dt CRYPTO 7 .Os .Sh NAME .Nm crypto .Nd OpenCrypto algorithms .Sh DESCRIPTION The in-kernel OpenCrypto framework supports several different encryption and authentication algorithms. This document describes the parameters and requirements of these algorithms. Unless otherwise noted, all sizes listed below are in bytes. .Ss Authenticators Authenticators compute a value (also known as a digest, hash, or tag) over an input of bytes. In-kernel requests can either compute the value for a given input, or verify if a given tag matches the computed tag for a given input. The following authentication algorithms are supported: .Bl -column "CRYPTO_AES_CCM_CBC_MAC" "XXX" "16, 24, 32" "Digest" .It Sy Name Ta Sy Nonce Ta Sy Key Sizes Ta Sy Digest Ta Sy Description .It Dv CRYPTO_AES_CCM_CBC_MAC Ta 12 Ta 16, 24, 32 Ta 16 Ta Authentication-only mode of AES-CCM .It Dv CRYPTO_AES_NIST_GMAC Ta 12 Ta 16, 24, 32 Ta 16 Ta Galois message authentication code .It Dv CRYPTO_BLAKE2B Ta Ta 0, 64 Ta 64 Ta Blake2b .It Dv CRYPTO_BLAKE2S Ta Ta 0, 32 Ta 32 Ta Blake2s .It Dv CRYPTO_NULL_HMAC Ta Ta Ta 12 Ta IPsec NULL HMAC .It Dv CRYPTO_POLY1305 Ta Ta 32 Ta 16 Ta Poly1305 authenticator .It Dv CRYPTO_RIPEMD160 Ta Ta Ta 20 Ta RIPE Message Digest-160 .It Dv CRYPTO_RIPEMD160_HMAC Ta Ta 64 Ta 20 Ta RIPE Message Digest-160 HMAC .It Dv CRYPTO_SHA1 Ta Ta Ta 20 Ta SHA-1 .It Dv CRYPTO_SHA1_HMAC Ta Ta 64 Ta 20 Ta SHA-1 HMAC .It Dv CRYPTO_SHA2_224 Ta Ta Ta 28 Ta SHA-2 224 .It Dv CRYPTO_SHA2_224_HMAC Ta Ta 64 Ta 28 Ta SHA-2 224 HMAC .It Dv CRYPTO_SHA2_256 Ta Ta Ta 32 Ta SHA-2 256 .It Dv CRYPTO_SHA2_256_HMAC Ta Ta 64 Ta 32 Ta SHA-2 256 HMAC .It Dv CRYPTO_SHA2_384 Ta Ta Ta 48 Ta SHA-2 384 .It Dv CRYPTO_SHA2_384_HMAC Ta Ta 128 Ta 48 Ta SHA-2 384 HMAC .It Dv CRYPTO_SHA2_512 Ta Ta Ta 64 Ta SHA-2 512 .It Dv CRYPTO_SHA2_512_HMAC Ta Ta 128 Ta 64 Ta SHA-2 512 HMAC .El .Ss Block Ciphers Block ciphers in OCF can only operate on messages whose length is an exact multiple of the cipher's block size. OCF supports the following block ciphers: .Bl -column "CRYPTO_CAMELLIA_CBC" "IV Size" "Block Size" "16, 24, 32" .It Sy Name Ta Sy IV Size Ta Sy Block Size Ta Sy Key Sizes Ta Sy Description .It Dv CRYPTO_AES_CBC Ta 16 Ta 16 Ta 16, 24, 32 Ta AES-CBC .It Dv CRYPTO_AES_XTS Ta 8 Ta 16 Ta 32, 64 Ta AES-XTS .It Dv CRYPTO_CAMELLIA_CBC Ta 16 Ta 16 Ta 16, 24, 32 Ta Camellia CBC .It Dv CRYPTO_NULL_CBC Ta 0 Ta 4 Ta 0-256 Ta IPsec NULL cipher .El .Pp .Dv CRYPTO_AES_XTS implements XEX Tweakable Block Cipher with Ciphertext Stealing as defined in NIST SP 800-38E. OCF consumers provide the first 8 bytes of the IV. The remaining 8 bytes are defined to be a block counter beginning at 0. .Pp NOTE: The ciphertext stealing part is not implemented in all backends which is why this cipher requires input that is a multiple of the block size. .Ss Stream Ciphers Stream ciphers can operate on messages with arbitrary lengths. OCF supports the following stream ciphers: .Bl -column "CRYPTO_CHACHA20" "IV Size" "16, 24, 32" .It Sy Name Ta Sy IV Size Ta Sy Key Sizes Ta Sy Description .It Dv CRYPTO_AES_ICM Ta 16 Ta 16, 24, 32 Ta AES Counter Mode .It Dv CRYPTO_CHACHA20 Ta 16 Ta 16, 32 Ta ChaCha20 .El .Pp The IV for each request must be provided in .Fa crp_iv via the .Dv CRYPTO_F_IV_SEPARATE flag. .Pp .Dv CRYPTO_AES_ICM uses the entire IV as a 128-bit big endian block counter. The IV sets the initial counter value for a message. If a consumer wishes to use an IV whose value is split into separate nonce and counter fields (e.g., IPsec), the consumer is responsible for splitting requests to handle counter rollover. .Pp .Dv CRYPTO_CHACHA20 accepts a 16 byte IV. The first 8 bytes are used as a nonce. The last 8 bytes are used as a 64-bit little-endian block counter. .Ss Authenticated Encryption with Associated Data Algorithms AEAD algorithms in OCF combine a stream cipher with an authentication algorithm to provide both secrecy and authentication. AEAD algorithms accept additional authentication data (AAD) in addition to the ciphertext or plaintext. AAD is passed to the authentication algorithm as input in a method defined by the specific AEAD algorithm. .Pp AEAD algorithms in OCF accept a nonce that is combined with an algorithm-defined counter to construct the IV for the underlying stream cipher. This nonce must be provided in .Fa crp_iv via the .Dv CRYPTO_F_IV_SEPARATE flag. Some AEAD algorithms support multiple nonce sizes. The first size listed is the default nonce size. .Pp The following AEAD algorithms are supported: .Bl -column "CRYPTO_AES_NIST_GCM_16" "12, 7-13" "16, 24, 32" "Tag" .It Sy Name Ta Sy Nonce Ta Sy Key Sizes Ta Sy Tag Ta Sy Description .It Dv CRYPTO_AES_NIST_GCM_16 Ta 12 Ta 16, 24, 32 Ta 16 Ta AES Galois/Counter Mode .It Dv CRYPTO_AES_CCM_16 Ta 12, 7-13 Ta 16, 24, 32 Ta 16 Ta AES Counter with CBC-MAC -.It Dv CRYPTO_CHACHA20_POLY1305 Ta 12 Ta 32 Ta 16 Ta +.It Dv CRYPTO_CHACHA20_POLY1305 Ta 12, 8 Ta 32 Ta 16 Ta ChaCha20-Poly1305 .El .Sh SEE ALSO .Xr crypto 4 , .Xr crypto 9 .Sh HISTORY The .Nm manual page first appeared in .Fx 10.1 . diff --git a/sys/crypto/openssl/ossl_chacha20.c b/sys/crypto/openssl/ossl_chacha20.c index a6f56aca1f8f..aa125121e8b4 100644 --- a/sys/crypto/openssl/ossl_chacha20.c +++ b/sys/crypto/openssl/ossl_chacha20.c @@ -1,438 +1,440 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2020 Netflix, Inc * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include #include #include #include #include #include #include #include int ossl_chacha20(struct cryptop *crp, const struct crypto_session_params *csp) { _Alignas(8) unsigned int key[CHACHA_KEY_SIZE / 4]; unsigned int counter[CHACHA_CTR_SIZE / 4]; unsigned char block[CHACHA_BLK_SIZE]; struct crypto_buffer_cursor cc_in, cc_out; const unsigned char *in, *inseg, *cipher_key; unsigned char *out, *outseg; size_t resid, todo, inlen, outlen; uint32_t next_counter; u_int i; if (crp->crp_cipher_key != NULL) cipher_key = crp->crp_cipher_key; else cipher_key = csp->csp_cipher_key; for (i = 0; i < nitems(key); i++) key[i] = CHACHA_U8TOU32(cipher_key + i * 4); crypto_read_iv(crp, counter); for (i = 0; i < nitems(counter); i++) counter[i] = le32toh(counter[i]); resid = crp->crp_payload_length; crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); inseg = crypto_cursor_segment(&cc_in, &inlen); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; outseg = crypto_cursor_segment(&cc_out, &outlen); while (resid >= CHACHA_BLK_SIZE) { if (inlen < CHACHA_BLK_SIZE) { crypto_cursor_copydata(&cc_in, CHACHA_BLK_SIZE, block); in = block; inlen = CHACHA_BLK_SIZE; } else in = inseg; if (outlen < CHACHA_BLK_SIZE) { out = block; outlen = CHACHA_BLK_SIZE; } else out = outseg; /* Figure out how many blocks we can encrypt/decrypt at once. */ todo = rounddown(MIN(resid, MIN(inlen, outlen)), CHACHA_BLK_SIZE); #ifdef __LP64__ /* ChaCha20_ctr32() assumes length is <= 4GB. */ todo = (uint32_t)todo; #endif /* Truncate if the 32-bit counter would roll over. */ next_counter = counter[0] + todo / CHACHA_BLK_SIZE; if (next_counter < counter[0]) { todo -= next_counter * CHACHA_BLK_SIZE; next_counter = 0; } ChaCha20_ctr32(out, in, todo, key, counter); counter[0] = next_counter; if (counter[0] == 0) counter[1]++; if (out == block) { crypto_cursor_copyback(&cc_out, CHACHA_BLK_SIZE, block); outseg = crypto_cursor_segment(&cc_out, &outlen); } else { crypto_cursor_advance(&cc_out, todo); outseg += todo; outlen -= todo; } if (in == block) { inseg = crypto_cursor_segment(&cc_in, &inlen); } else { crypto_cursor_advance(&cc_in, todo); inseg += todo; inlen -= todo; } resid -= todo; } if (resid > 0) { memset(block, 0, sizeof(block)); crypto_cursor_copydata(&cc_in, resid, block); ChaCha20_ctr32(block, block, CHACHA_BLK_SIZE, key, counter); crypto_cursor_copyback(&cc_out, resid, block); } explicit_bzero(block, sizeof(block)); explicit_bzero(counter, sizeof(counter)); explicit_bzero(key, sizeof(key)); return (0); } int ossl_chacha20_poly1305_encrypt(struct cryptop *crp, const struct crypto_session_params *csp) { _Alignas(8) unsigned int key[CHACHA_KEY_SIZE / 4]; unsigned int counter[CHACHA_CTR_SIZE / 4]; _Alignas(8) unsigned char block[CHACHA_BLK_SIZE]; unsigned char tag[POLY1305_HASH_LEN]; POLY1305 auth_ctx; struct crypto_buffer_cursor cc_in, cc_out; const unsigned char *in, *inseg, *cipher_key; unsigned char *out, *outseg; size_t resid, todo, inlen, outlen; uint32_t next_counter; u_int i; if (crp->crp_cipher_key != NULL) cipher_key = crp->crp_cipher_key; else cipher_key = csp->csp_cipher_key; for (i = 0; i < nitems(key); i++) key[i] = CHACHA_U8TOU32(cipher_key + i * 4); - crypto_read_iv(crp, counter + 1); + memset(counter, 0, sizeof(counter)); + crypto_read_iv(crp, counter + (CHACHA_CTR_SIZE - csp->csp_ivlen) / 4); for (i = 1; i < nitems(counter); i++) counter[i] = le32toh(counter[i]); /* Block 0 is used to generate the poly1305 key. */ counter[0] = 0; memset(block, 0, sizeof(block)); ChaCha20_ctr32(block, block, sizeof(block), key, counter); Poly1305_Init(&auth_ctx, block); /* MAC the AAD. */ if (crp->crp_aad != NULL) Poly1305_Update(&auth_ctx, crp->crp_aad, crp->crp_aad_length); else crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, ossl_poly1305_update, &auth_ctx); if (crp->crp_aad_length % 16 != 0) { /* padding1 */ memset(block, 0, 16); Poly1305_Update(&auth_ctx, block, 16 - crp->crp_aad_length % 16); } /* Encryption starts with block 1. */ counter[0] = 1; /* Do encryption with MAC */ resid = crp->crp_payload_length; crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); inseg = crypto_cursor_segment(&cc_in, &inlen); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; outseg = crypto_cursor_segment(&cc_out, &outlen); while (resid >= CHACHA_BLK_SIZE) { if (inlen < CHACHA_BLK_SIZE) { crypto_cursor_copydata(&cc_in, CHACHA_BLK_SIZE, block); in = block; inlen = CHACHA_BLK_SIZE; } else in = inseg; if (outlen < CHACHA_BLK_SIZE) { out = block; outlen = CHACHA_BLK_SIZE; } else out = outseg; /* Figure out how many blocks we can encrypt/decrypt at once. */ todo = rounddown(MIN(resid, MIN(inlen, outlen)), CHACHA_BLK_SIZE); #ifdef __LP64__ /* ChaCha20_ctr32() assumes length is <= 4GB. */ todo = (uint32_t)todo; #endif /* Truncate if the 32-bit counter would roll over. */ next_counter = counter[0] + todo / CHACHA_BLK_SIZE; - if (next_counter < counter[0]) { + if (csp->csp_ivlen == 8 && next_counter < counter[0]) { todo -= next_counter * CHACHA_BLK_SIZE; next_counter = 0; } ChaCha20_ctr32(out, in, todo, key, counter); Poly1305_Update(&auth_ctx, out, todo); counter[0] = next_counter; - if (counter[0] == 0) + if (csp->csp_ivlen == 8 && counter[0] == 0) counter[1]++; if (out == block) { crypto_cursor_copyback(&cc_out, CHACHA_BLK_SIZE, block); outseg = crypto_cursor_segment(&cc_out, &outlen); } else { crypto_cursor_advance(&cc_out, todo); outseg += todo; outlen -= todo; } if (in == block) { inseg = crypto_cursor_segment(&cc_in, &inlen); } else { crypto_cursor_advance(&cc_in, todo); inseg += todo; inlen -= todo; } resid -= todo; } if (resid > 0) { memset(block, 0, sizeof(block)); crypto_cursor_copydata(&cc_in, resid, block); ChaCha20_ctr32(block, block, CHACHA_BLK_SIZE, key, counter); crypto_cursor_copyback(&cc_out, resid, block); /* padding2 */ todo = roundup2(resid, 16); memset(block + resid, 0, todo - resid); Poly1305_Update(&auth_ctx, block, todo); } /* lengths */ le64enc(block, crp->crp_aad_length); le64enc(block + 8, crp->crp_payload_length); Poly1305_Update(&auth_ctx, block, sizeof(uint64_t) * 2); Poly1305_Final(&auth_ctx, tag); crypto_copyback(crp, crp->crp_digest_start, csp->csp_auth_mlen == 0 ? POLY1305_HASH_LEN : csp->csp_auth_mlen, tag); explicit_bzero(&auth_ctx, sizeof(auth_ctx)); explicit_bzero(tag, sizeof(tag)); explicit_bzero(block, sizeof(block)); explicit_bzero(counter, sizeof(counter)); explicit_bzero(key, sizeof(key)); return (0); } int ossl_chacha20_poly1305_decrypt(struct cryptop *crp, const struct crypto_session_params *csp) { _Alignas(8) unsigned int key[CHACHA_KEY_SIZE / 4]; unsigned int counter[CHACHA_CTR_SIZE / 4]; _Alignas(8) unsigned char block[CHACHA_BLK_SIZE]; unsigned char tag[POLY1305_HASH_LEN], tag2[POLY1305_HASH_LEN]; struct poly1305_context auth_ctx; struct crypto_buffer_cursor cc_in, cc_out; const unsigned char *in, *inseg, *cipher_key; unsigned char *out, *outseg; size_t resid, todo, inlen, outlen; uint32_t next_counter; int error; u_int i, mlen; if (crp->crp_cipher_key != NULL) cipher_key = crp->crp_cipher_key; else cipher_key = csp->csp_cipher_key; for (i = 0; i < nitems(key); i++) key[i] = CHACHA_U8TOU32(cipher_key + i * 4); - crypto_read_iv(crp, counter + 1); + memset(counter, 0, sizeof(counter)); + crypto_read_iv(crp, counter + (CHACHA_CTR_SIZE - csp->csp_ivlen) / 4); for (i = 1; i < nitems(counter); i++) counter[i] = le32toh(counter[i]); /* Block 0 is used to generate the poly1305 key. */ counter[0] = 0; memset(block, 0, sizeof(block)); ChaCha20_ctr32(block, block, sizeof(block), key, counter); Poly1305_Init(&auth_ctx, block); /* MAC the AAD. */ if (crp->crp_aad != NULL) Poly1305_Update(&auth_ctx, crp->crp_aad, crp->crp_aad_length); else crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, ossl_poly1305_update, &auth_ctx); if (crp->crp_aad_length % 16 != 0) { /* padding1 */ memset(block, 0, 16); Poly1305_Update(&auth_ctx, block, 16 - crp->crp_aad_length % 16); } /* Mac the ciphertext. */ crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, ossl_poly1305_update, &auth_ctx); if (crp->crp_payload_length % 16 != 0) { /* padding2 */ memset(block, 0, 16); Poly1305_Update(&auth_ctx, block, 16 - crp->crp_payload_length % 16); } /* lengths */ le64enc(block, crp->crp_aad_length); le64enc(block + 8, crp->crp_payload_length); Poly1305_Update(&auth_ctx, block, sizeof(uint64_t) * 2); Poly1305_Final(&auth_ctx, tag); mlen = csp->csp_auth_mlen == 0 ? POLY1305_HASH_LEN : csp->csp_auth_mlen; crypto_copydata(crp, crp->crp_digest_start, mlen, tag2); if (timingsafe_bcmp(tag, tag2, mlen) != 0) { error = EBADMSG; goto out; } /* Decryption starts with block 1. */ counter[0] = 1; resid = crp->crp_payload_length; crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); inseg = crypto_cursor_segment(&cc_in, &inlen); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; outseg = crypto_cursor_segment(&cc_out, &outlen); while (resid >= CHACHA_BLK_SIZE) { if (inlen < CHACHA_BLK_SIZE) { crypto_cursor_copydata(&cc_in, CHACHA_BLK_SIZE, block); in = block; inlen = CHACHA_BLK_SIZE; } else in = inseg; if (outlen < CHACHA_BLK_SIZE) { out = block; outlen = CHACHA_BLK_SIZE; } else out = outseg; /* Figure out how many blocks we can encrypt/decrypt at once. */ todo = rounddown(MIN(resid, MIN(inlen, outlen)), CHACHA_BLK_SIZE); #ifdef __LP64__ /* ChaCha20_ctr32() assumes length is <= 4GB. */ todo = (uint32_t)todo; #endif /* Truncate if the 32-bit counter would roll over. */ next_counter = counter[0] + todo / CHACHA_BLK_SIZE; - if (next_counter < counter[0]) { + if (csp->csp_ivlen == 8 && next_counter < counter[0]) { todo -= next_counter * CHACHA_BLK_SIZE; next_counter = 0; } ChaCha20_ctr32(out, in, todo, key, counter); counter[0] = next_counter; - if (counter[0] == 0) + if (csp->csp_ivlen == 8 && counter[0] == 0) counter[1]++; if (out == block) { crypto_cursor_copyback(&cc_out, CHACHA_BLK_SIZE, block); outseg = crypto_cursor_segment(&cc_out, &outlen); } else { crypto_cursor_advance(&cc_out, todo); outseg += todo; outlen -= todo; } if (in == block) { inseg = crypto_cursor_segment(&cc_in, &inlen); } else { crypto_cursor_advance(&cc_in, todo); inseg += todo; inlen -= todo; } resid -= todo; } if (resid > 0) { memset(block, 0, sizeof(block)); crypto_cursor_copydata(&cc_in, resid, block); ChaCha20_ctr32(block, block, CHACHA_BLK_SIZE, key, counter); crypto_cursor_copyback(&cc_out, resid, block); } error = 0; out: explicit_bzero(&auth_ctx, sizeof(auth_ctx)); explicit_bzero(tag, sizeof(tag)); explicit_bzero(block, sizeof(block)); explicit_bzero(counter, sizeof(counter)); explicit_bzero(key, sizeof(key)); return (error); } diff --git a/sys/opencrypto/crypto.c b/sys/opencrypto/crypto.c index a3a42827d51b..f0fd3fe662a9 100644 --- a/sys/opencrypto/crypto.c +++ b/sys/opencrypto/crypto.c @@ -1,2328 +1,2333 @@ /*- * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. * Copyright (c) 2021 The FreeBSD Foundation * * Portions of this software were developed by Ararat River * Consulting, LLC under sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Cryptographic Subsystem. * * This code is derived from the Openbsd Cryptographic Framework (OCF) * that has the copyright shown below. Very little of the original * code remains. */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include "opt_compat.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) #include #endif SDT_PROVIDER_DEFINE(opencrypto); /* * Crypto drivers register themselves by allocating a slot in the * crypto_drivers table with crypto_get_driverid() and then registering * each asym algorithm they support with crypto_kregister(). */ static struct mtx crypto_drivers_mtx; /* lock on driver table */ #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) /* * Crypto device/driver capabilities structure. * * Synchronization: * (d) - protected by CRYPTO_DRIVER_LOCK() * (q) - protected by CRYPTO_Q_LOCK() * Not tagged fields are read-only. */ struct cryptocap { device_t cc_dev; uint32_t cc_hid; uint32_t cc_sessions; /* (d) # of sessions */ uint32_t cc_koperations; /* (d) # os asym operations */ uint8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; int cc_flags; /* (d) flags */ #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ int cc_qblocked; /* (q) symmetric q blocked */ int cc_kqblocked; /* (q) asymmetric q blocked */ size_t cc_session_size; volatile int cc_refs; }; static struct cryptocap **crypto_drivers = NULL; static int crypto_drivers_size = 0; struct crypto_session { struct cryptocap *cap; struct crypto_session_params csp; uint64_t id; /* Driver softc follows. */ }; /* * There are two queues for crypto requests; one for symmetric (e.g. * cipher) operations and one for asymmetric (e.g. MOD)operations. * A single mutex is used to lock access to both queues. We could * have one per-queue but having one simplifies handling of block/unblock * operations. */ static int crp_sleep = 0; static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ static TAILQ_HEAD(,cryptkop) crp_kq; static struct mtx crypto_q_mtx; #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, "In-kernel cryptography"); /* * Taskqueue used to dispatch the crypto requests * that have the CRYPTO_F_ASYNC flag */ static struct taskqueue *crypto_tq; /* * Crypto seq numbers are operated on with modular arithmetic */ #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) struct crypto_ret_worker { struct mtx crypto_ret_mtx; TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */ uint32_t reorder_ops; /* total ordered sym jobs received */ uint32_t reorder_cur_seq; /* current sym job dispatched */ struct proc *cryptoretproc; }; static struct crypto_ret_worker *crypto_ret_workers = NULL; #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) #define FOREACH_CRYPTO_RETW(w) \ for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) #define CRYPTO_RETW_EMPTY(w) \ (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q)) static int crypto_workers_num = 0; SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN, &crypto_workers_num, 0, "Number of crypto workers used to dispatch crypto jobs"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, &crypto_workers_num, 0, "Number of crypto workers used to dispatch crypto jobs"); #endif static uma_zone_t cryptop_zone; int crypto_userasymcrypto = 1; SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW, &crypto_userasymcrypto, 0, "Enable user-mode access to asymmetric crypto support"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, &crypto_userasymcrypto, 0, "Enable/disable user-mode access to asymmetric crypto support"); #endif int crypto_devallowsoft = 0; SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RWTUN, &crypto_devallowsoft, 0, "Enable use of software crypto by /dev/crypto"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RWTUN, &crypto_devallowsoft, 0, "Enable/disable use of software crypto by /dev/crypto"); #endif MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); static void crypto_proc(void); static struct proc *cryptoproc; static void crypto_ret_proc(struct crypto_ret_worker *ret_worker); static void crypto_destroy(void); static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); static int crypto_kinvoke(struct cryptkop *krp); static void crypto_task_invoke(void *ctx, int pending); static void crypto_batch_enqueue(struct cryptop *crp); static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)]; SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, cryptostats, nitems(cryptostats), "Crypto system statistics"); #define CRYPTOSTAT_INC(stat) do { \ counter_u64_add( \ cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\ 1); \ } while (0) static void cryptostats_init(void *arg __unused) { COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK); } SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL); static void cryptostats_fini(void *arg __unused) { COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats)); } SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini, NULL); /* Try to avoid directly exposing the key buffer as a symbol */ static struct keybuf *keybuf; static struct keybuf empty_keybuf = { .kb_nents = 0 }; /* Obtain the key buffer from boot metadata */ static void keybuf_init(void) { caddr_t kmdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); keybuf = (struct keybuf *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_KEYBUF); if (keybuf == NULL) keybuf = &empty_keybuf; } /* It'd be nice if we could store these in some kind of secure memory... */ struct keybuf * get_keybuf(void) { return (keybuf); } static struct cryptocap * cap_ref(struct cryptocap *cap) { refcount_acquire(&cap->cc_refs); return (cap); } static void cap_rele(struct cryptocap *cap) { if (refcount_release(&cap->cc_refs) == 0) return; KASSERT(cap->cc_sessions == 0, ("freeing crypto driver with active sessions")); KASSERT(cap->cc_koperations == 0, ("freeing crypto driver with active key operations")); free(cap, M_CRYPTO_DATA); } static int crypto_init(void) { struct crypto_ret_worker *ret_worker; int error; mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", MTX_DEF|MTX_QUIET); TAILQ_INIT(&crp_q); TAILQ_INIT(&crp_kq); mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); cryptop_zone = uma_zcreate("cryptop", sizeof(struct cryptop), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); crypto_drivers_size = CRYPTO_DRIVERS_INITIAL; crypto_drivers = malloc(crypto_drivers_size * sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) crypto_workers_num = mp_ncpus; crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &crypto_tq); taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, "crypto"); error = kproc_create((void (*)(void *)) crypto_proc, NULL, &cryptoproc, 0, 0, "crypto"); if (error) { printf("crypto_init: cannot start crypto thread; error %d", error); goto bad; } crypto_ret_workers = mallocarray(crypto_workers_num, sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO); FOREACH_CRYPTO_RETW(ret_worker) { TAILQ_INIT(&ret_worker->crp_ordered_ret_q); TAILQ_INIT(&ret_worker->crp_ret_q); TAILQ_INIT(&ret_worker->crp_ret_kq); ret_worker->reorder_ops = 0; ret_worker->reorder_cur_seq = 0; mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF); error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker, &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker)); if (error) { printf("crypto_init: cannot start cryptoret thread; error %d", error); goto bad; } } keybuf_init(); return 0; bad: crypto_destroy(); return error; } /* * Signal a crypto thread to terminate. We use the driver * table lock to synchronize the sleep/wakeups so that we * are sure the threads have terminated before we release * the data structures they use. See crypto_finis below * for the other half of this song-and-dance. */ static void crypto_terminate(struct proc **pp, void *q) { struct proc *p; mtx_assert(&crypto_drivers_mtx, MA_OWNED); p = *pp; *pp = NULL; if (p) { wakeup_one(q); PROC_LOCK(p); /* NB: insure we don't miss wakeup */ CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); PROC_UNLOCK(p); CRYPTO_DRIVER_LOCK(); } } static void hmac_init_pad(const struct auth_hash *axf, const char *key, int klen, void *auth_ctx, uint8_t padval) { uint8_t hmac_key[HMAC_MAX_BLOCK_LEN]; u_int i; KASSERT(axf->blocksize <= sizeof(hmac_key), ("Invalid HMAC block size %d", axf->blocksize)); /* * If the key is larger than the block size, use the digest of * the key as the key instead. */ memset(hmac_key, 0, sizeof(hmac_key)); if (klen > axf->blocksize) { axf->Init(auth_ctx); axf->Update(auth_ctx, key, klen); axf->Final(hmac_key, auth_ctx); klen = axf->hashsize; } else memcpy(hmac_key, key, klen); for (i = 0; i < axf->blocksize; i++) hmac_key[i] ^= padval; axf->Init(auth_ctx); axf->Update(auth_ctx, hmac_key, axf->blocksize); explicit_bzero(hmac_key, sizeof(hmac_key)); } void hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen, void *auth_ctx) { hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL); } void hmac_init_opad(const struct auth_hash *axf, const char *key, int klen, void *auth_ctx) { hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL); } static void crypto_destroy(void) { struct crypto_ret_worker *ret_worker; int i; /* * Terminate any crypto threads. */ if (crypto_tq != NULL) taskqueue_drain_all(crypto_tq); CRYPTO_DRIVER_LOCK(); crypto_terminate(&cryptoproc, &crp_q); FOREACH_CRYPTO_RETW(ret_worker) crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q); CRYPTO_DRIVER_UNLOCK(); /* XXX flush queues??? */ /* * Reclaim dynamically allocated resources. */ for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] != NULL) cap_rele(crypto_drivers[i]); } free(crypto_drivers, M_CRYPTO_DATA); if (cryptop_zone != NULL) uma_zdestroy(cryptop_zone); mtx_destroy(&crypto_q_mtx); FOREACH_CRYPTO_RETW(ret_worker) mtx_destroy(&ret_worker->crypto_ret_mtx); free(crypto_ret_workers, M_CRYPTO_DATA); if (crypto_tq != NULL) taskqueue_free(crypto_tq); mtx_destroy(&crypto_drivers_mtx); } uint32_t crypto_ses2hid(crypto_session_t crypto_session) { return (crypto_session->cap->cc_hid); } uint32_t crypto_ses2caps(crypto_session_t crypto_session) { return (crypto_session->cap->cc_flags & 0xff000000); } void * crypto_get_driver_session(crypto_session_t crypto_session) { return (crypto_session + 1); } const struct crypto_session_params * crypto_get_params(crypto_session_t crypto_session) { return (&crypto_session->csp); } struct auth_hash * crypto_auth_hash(const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: return (&auth_hash_hmac_sha1); case CRYPTO_SHA2_224_HMAC: return (&auth_hash_hmac_sha2_224); case CRYPTO_SHA2_256_HMAC: return (&auth_hash_hmac_sha2_256); case CRYPTO_SHA2_384_HMAC: return (&auth_hash_hmac_sha2_384); case CRYPTO_SHA2_512_HMAC: return (&auth_hash_hmac_sha2_512); case CRYPTO_NULL_HMAC: return (&auth_hash_null); case CRYPTO_RIPEMD160_HMAC: return (&auth_hash_hmac_ripemd_160); case CRYPTO_SHA1: return (&auth_hash_sha1); case CRYPTO_SHA2_224: return (&auth_hash_sha2_224); case CRYPTO_SHA2_256: return (&auth_hash_sha2_256); case CRYPTO_SHA2_384: return (&auth_hash_sha2_384); case CRYPTO_SHA2_512: return (&auth_hash_sha2_512); case CRYPTO_AES_NIST_GMAC: switch (csp->csp_auth_klen) { case 128 / 8: return (&auth_hash_nist_gmac_aes_128); case 192 / 8: return (&auth_hash_nist_gmac_aes_192); case 256 / 8: return (&auth_hash_nist_gmac_aes_256); default: return (NULL); } case CRYPTO_BLAKE2B: return (&auth_hash_blake2b); case CRYPTO_BLAKE2S: return (&auth_hash_blake2s); case CRYPTO_POLY1305: return (&auth_hash_poly1305); case CRYPTO_AES_CCM_CBC_MAC: switch (csp->csp_auth_klen) { case 128 / 8: return (&auth_hash_ccm_cbc_mac_128); case 192 / 8: return (&auth_hash_ccm_cbc_mac_192); case 256 / 8: return (&auth_hash_ccm_cbc_mac_256); default: return (NULL); } default: return (NULL); } } struct enc_xform * crypto_cipher(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_RIJNDAEL128_CBC: return (&enc_xform_rijndael128); case CRYPTO_AES_XTS: return (&enc_xform_aes_xts); case CRYPTO_AES_ICM: return (&enc_xform_aes_icm); case CRYPTO_AES_NIST_GCM_16: return (&enc_xform_aes_nist_gcm); case CRYPTO_CAMELLIA_CBC: return (&enc_xform_camellia); case CRYPTO_NULL_CBC: return (&enc_xform_null); case CRYPTO_CHACHA20: return (&enc_xform_chacha20); case CRYPTO_AES_CCM_16: return (&enc_xform_ccm); case CRYPTO_CHACHA20_POLY1305: return (&enc_xform_chacha20_poly1305); default: return (NULL); } } static struct cryptocap * crypto_checkdriver(uint32_t hid) { return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]); } /* * Select a driver for a new session that supports the specified * algorithms and, optionally, is constrained according to the flags. */ static struct cryptocap * crypto_select_driver(const struct crypto_session_params *csp, int flags) { struct cryptocap *cap, *best; int best_match, error, hid; CRYPTO_DRIVER_ASSERT(); best = NULL; for (hid = 0; hid < crypto_drivers_size; hid++) { /* * If there is no driver for this slot, or the driver * is not appropriate (hardware or software based on * match), then skip. */ cap = crypto_drivers[hid]; if (cap == NULL || (cap->cc_flags & flags) == 0) continue; error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp); if (error >= 0) continue; /* * Use the driver with the highest probe value. * Hardware drivers use a higher probe value than * software. In case of a tie, prefer the driver with * the fewest active sessions. */ if (best == NULL || error > best_match || (error == best_match && cap->cc_sessions < best->cc_sessions)) { best = cap; best_match = error; } } return best; } static enum alg_type { ALG_NONE = 0, ALG_CIPHER, ALG_DIGEST, ALG_KEYED_DIGEST, ALG_COMPRESSION, ALG_AEAD } alg_types[] = { [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_CBC] = ALG_CIPHER, [CRYPTO_SHA1] = ALG_DIGEST, [CRYPTO_NULL_HMAC] = ALG_DIGEST, [CRYPTO_NULL_CBC] = ALG_CIPHER, [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION, [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER, [CRYPTO_AES_XTS] = ALG_CIPHER, [CRYPTO_AES_ICM] = ALG_CIPHER, [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD, [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST, [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST, [CRYPTO_CHACHA20] = ALG_CIPHER, [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_RIPEMD160] = ALG_DIGEST, [CRYPTO_SHA2_224] = ALG_DIGEST, [CRYPTO_SHA2_256] = ALG_DIGEST, [CRYPTO_SHA2_384] = ALG_DIGEST, [CRYPTO_SHA2_512] = ALG_DIGEST, [CRYPTO_POLY1305] = ALG_KEYED_DIGEST, [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_CCM_16] = ALG_AEAD, [CRYPTO_CHACHA20_POLY1305] = ALG_AEAD, }; static enum alg_type alg_type(int alg) { if (alg < nitems(alg_types)) return (alg_types[alg]); return (ALG_NONE); } static bool alg_is_compression(int alg) { return (alg_type(alg) == ALG_COMPRESSION); } static bool alg_is_cipher(int alg) { return (alg_type(alg) == ALG_CIPHER); } static bool alg_is_digest(int alg) { return (alg_type(alg) == ALG_DIGEST || alg_type(alg) == ALG_KEYED_DIGEST); } static bool alg_is_keyed_digest(int alg) { return (alg_type(alg) == ALG_KEYED_DIGEST); } static bool alg_is_aead(int alg) { return (alg_type(alg) == ALG_AEAD); } static bool ccm_tag_length_valid(int len) { /* RFC 3610 */ switch (len) { case 4: case 6: case 8: case 10: case 12: case 14: case 16: return (true); default: return (false); } } #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) /* Various sanity checks on crypto session parameters. */ static bool check_csp(const struct crypto_session_params *csp) { struct auth_hash *axf; /* Mode-independent checks. */ if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) return (false); if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) return (false); if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0) return (false); if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0) return (false); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: if (!alg_is_compression(csp->csp_cipher_alg)) return (false); if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || csp->csp_auth_mlen != 0) return (false); break; case CSP_MODE_CIPHER: if (!alg_is_cipher(csp->csp_cipher_alg)) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0) return (false); } if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || csp->csp_auth_mlen != 0) return (false); break; case CSP_MODE_DIGEST: if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); /* IV is optional for digests (e.g. GMAC). */ switch (csp->csp_auth_alg) { case CRYPTO_AES_CCM_CBC_MAC: if (csp->csp_ivlen < 7 || csp->csp_ivlen > 13) return (false); break; case CRYPTO_AES_NIST_GMAC: if (csp->csp_ivlen != AES_GCM_IV_LEN) return (false); break; default: if (csp->csp_ivlen != 0) return (false); break; } if (!alg_is_digest(csp->csp_auth_alg)) return (false); /* Key is optional for BLAKE2 digests. */ if (csp->csp_auth_alg == CRYPTO_BLAKE2B || csp->csp_auth_alg == CRYPTO_BLAKE2S) ; else if (alg_is_keyed_digest(csp->csp_auth_alg)) { if (csp->csp_auth_klen == 0) return (false); } else { if (csp->csp_auth_klen != 0) return (false); } if (csp->csp_auth_mlen != 0) { axf = crypto_auth_hash(csp); if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) return (false); if (csp->csp_auth_alg == CRYPTO_AES_CCM_CBC_MAC && !ccm_tag_length_valid(csp->csp_auth_mlen)) return (false); } break; case CSP_MODE_AEAD: if (!alg_is_aead(csp->csp_cipher_alg)) return (false); if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0 || csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0) return (false); switch (csp->csp_cipher_alg) { case CRYPTO_AES_CCM_16: if (csp->csp_auth_mlen != 0 && !ccm_tag_length_valid(csp->csp_auth_mlen)) return (false); if (csp->csp_ivlen < 7 || csp->csp_ivlen > 13) return (false); break; case CRYPTO_AES_NIST_GCM_16: - case CRYPTO_CHACHA20_POLY1305: if (csp->csp_auth_mlen > 16) return (false); break; + case CRYPTO_CHACHA20_POLY1305: + if (csp->csp_ivlen != 8 && csp->csp_ivlen != 12) + return (false); + if (csp->csp_auth_mlen > POLY1305_HASH_LEN) + return (false); + break; } break; case CSP_MODE_ETA: if (!alg_is_cipher(csp->csp_cipher_alg)) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0) return (false); } if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (!alg_is_digest(csp->csp_auth_alg)) return (false); /* Key is optional for BLAKE2 digests. */ if (csp->csp_auth_alg == CRYPTO_BLAKE2B || csp->csp_auth_alg == CRYPTO_BLAKE2S) ; else if (alg_is_keyed_digest(csp->csp_auth_alg)) { if (csp->csp_auth_klen == 0) return (false); } else { if (csp->csp_auth_klen != 0) return (false); } if (csp->csp_auth_mlen != 0) { axf = crypto_auth_hash(csp); if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) return (false); } break; default: return (false); } return (true); } /* * Delete a session after it has been detached from its driver. */ static void crypto_deletesession(crypto_session_t cses) { struct cryptocap *cap; cap = cses->cap; zfree(cses, M_CRYPTO_DATA); CRYPTO_DRIVER_LOCK(); cap->cc_sessions--; if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) wakeup(cap); CRYPTO_DRIVER_UNLOCK(); cap_rele(cap); } /* * Create a new session. The crid argument specifies a crypto * driver to use or constraints on a driver to select (hardware * only, software only, either). Whatever driver is selected * must be capable of the requested crypto algorithms. */ int crypto_newsession(crypto_session_t *cses, const struct crypto_session_params *csp, int crid) { static uint64_t sessid = 0; crypto_session_t res; struct cryptocap *cap; int err; if (!check_csp(csp)) return (EINVAL); res = NULL; CRYPTO_DRIVER_LOCK(); if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { /* * Use specified driver; verify it is capable. */ cap = crypto_checkdriver(crid); if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0) cap = NULL; } else { /* * No requested driver; select based on crid flags. */ cap = crypto_select_driver(csp, crid); } if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); CRYPTDEB("no driver"); return (EOPNOTSUPP); } cap_ref(cap); cap->cc_sessions++; CRYPTO_DRIVER_UNLOCK(); /* Allocate a single block for the generic session and driver softc. */ res = malloc(sizeof(*res) + cap->cc_session_size, M_CRYPTO_DATA, M_WAITOK | M_ZERO); res->cap = cap; res->csp = *csp; res->id = atomic_fetchadd_64(&sessid, 1); /* Call the driver initialization routine. */ err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp); if (err != 0) { CRYPTDEB("dev newsession failed: %d", err); crypto_deletesession(res); return (err); } *cses = res; return (0); } /* * Delete an existing session (or a reserved session on an unregistered * driver). */ void crypto_freesession(crypto_session_t cses) { struct cryptocap *cap; if (cses == NULL) return; cap = cses->cap; /* Call the driver cleanup routine, if available. */ CRYPTODEV_FREESESSION(cap->cc_dev, cses); crypto_deletesession(cses); } /* * Return a new driver id. Registers a driver with the system so that * it can be probed by subsequent sessions. */ int32_t crypto_get_driverid(device_t dev, size_t sessionsize, int flags) { struct cryptocap *cap, **newdrv; int i; if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { device_printf(dev, "no flags specified when registering driver\n"); return -1; } cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); cap->cc_dev = dev; cap->cc_session_size = sessionsize; cap->cc_flags = flags; refcount_init(&cap->cc_refs, 1); CRYPTO_DRIVER_LOCK(); for (;;) { for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] == NULL) break; } if (i < crypto_drivers_size) break; /* Out of entries, allocate some more. */ if (2 * crypto_drivers_size <= crypto_drivers_size) { CRYPTO_DRIVER_UNLOCK(); printf("crypto: driver count wraparound!\n"); cap_rele(cap); return (-1); } CRYPTO_DRIVER_UNLOCK(); newdrv = malloc(2 * crypto_drivers_size * sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO); CRYPTO_DRIVER_LOCK(); memcpy(newdrv, crypto_drivers, crypto_drivers_size * sizeof(*crypto_drivers)); crypto_drivers_size *= 2; free(crypto_drivers, M_CRYPTO_DATA); crypto_drivers = newdrv; } cap->cc_hid = i; crypto_drivers[i] = cap; CRYPTO_DRIVER_UNLOCK(); if (bootverbose) printf("crypto: assign %s driver id %u, flags 0x%x\n", device_get_nameunit(dev), i, flags); return i; } /* * Lookup a driver by name. We match against the full device * name and unit, and against just the name. The latter gives * us a simple widlcarding by device name. On success return the * driver/hardware identifier; otherwise return -1. */ int crypto_find_driver(const char *match) { struct cryptocap *cap; int i, len = strlen(match); CRYPTO_DRIVER_LOCK(); for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] == NULL) continue; cap = crypto_drivers[i]; if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 || strncmp(match, device_get_name(cap->cc_dev), len) == 0) { CRYPTO_DRIVER_UNLOCK(); return (i); } } CRYPTO_DRIVER_UNLOCK(); return (-1); } /* * Return the device_t for the specified driver or NULL * if the driver identifier is invalid. */ device_t crypto_find_device_byhid(int hid) { struct cryptocap *cap; device_t dev; dev = NULL; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(hid); if (cap != NULL) dev = cap->cc_dev; CRYPTO_DRIVER_UNLOCK(); return (dev); } /* * Return the device/driver capabilities. */ int crypto_getcaps(int hid) { struct cryptocap *cap; int flags; flags = 0; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(hid); if (cap != NULL) flags = cap->cc_flags; CRYPTO_DRIVER_UNLOCK(); return (flags); } /* * Register support for a key-related algorithm. This routine * is called once for each algorithm supported a driver. */ int crypto_kregister(uint32_t driverid, int kalg, uint32_t flags) { struct cryptocap *cap; int err; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL && (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { /* * XXX Do some performance testing to determine placing. * XXX We probably need an auxiliary data structure that * XXX describes relative performances. */ cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; if (bootverbose) printf("crypto: %s registers key alg %u flags %u\n" , device_get_nameunit(cap->cc_dev) , kalg , flags ); gone_in_dev(cap->cc_dev, 14, "asymmetric crypto"); err = 0; } else err = EINVAL; CRYPTO_DRIVER_UNLOCK(); return err; } /* * Unregister all algorithms associated with a crypto driver. * If there are pending sessions using it, leave enough information * around so that subsequent calls using those sessions will * correctly detect the driver has been unregistered and reroute * requests. */ int crypto_unregister_all(uint32_t driverid) { struct cryptocap *cap; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); return (EINVAL); } cap->cc_flags |= CRYPTOCAP_F_CLEANUP; crypto_drivers[driverid] = NULL; /* * XXX: This doesn't do anything to kick sessions that * have no pending operations. */ while (cap->cc_sessions != 0 || cap->cc_koperations != 0) mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0); CRYPTO_DRIVER_UNLOCK(); cap_rele(cap); return (0); } /* * Clear blockage on a driver. The what parameter indicates whether * the driver is now ready for cryptop's and/or cryptokop's. */ int crypto_unblock(uint32_t driverid, int what) { struct cryptocap *cap; int err; CRYPTO_Q_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL) { if (what & CRYPTO_SYMQ) cap->cc_qblocked = 0; if (what & CRYPTO_ASYMQ) cap->cc_kqblocked = 0; if (crp_sleep) wakeup_one(&crp_q); err = 0; } else err = EINVAL; CRYPTO_Q_UNLOCK(); return err; } size_t crypto_buffer_len(struct crypto_buffer *cb) { switch (cb->cb_type) { case CRYPTO_BUF_CONTIG: return (cb->cb_buf_len); case CRYPTO_BUF_MBUF: if (cb->cb_mbuf->m_flags & M_PKTHDR) return (cb->cb_mbuf->m_pkthdr.len); return (m_length(cb->cb_mbuf, NULL)); case CRYPTO_BUF_SINGLE_MBUF: return (cb->cb_mbuf->m_len); case CRYPTO_BUF_VMPAGE: return (cb->cb_vm_page_len); case CRYPTO_BUF_UIO: return (cb->cb_uio->uio_resid); default: return (0); } } #ifdef INVARIANTS /* Various sanity checks on crypto requests. */ static void cb_sanity(struct crypto_buffer *cb, const char *name) { KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST, ("incoming crp with invalid %s buffer type", name)); switch (cb->cb_type) { case CRYPTO_BUF_CONTIG: KASSERT(cb->cb_buf_len >= 0, ("incoming crp with -ve %s buffer length", name)); break; case CRYPTO_BUF_VMPAGE: KASSERT(CRYPTO_HAS_VMPAGE, ("incoming crp uses dmap on supported arch")); KASSERT(cb->cb_vm_page_len >= 0, ("incoming crp with -ve %s buffer length", name)); KASSERT(cb->cb_vm_page_offset >= 0, ("incoming crp with -ve %s buffer offset", name)); KASSERT(cb->cb_vm_page_offset < PAGE_SIZE, ("incoming crp with %s buffer offset greater than page size" , name)); break; default: break; } } static void crp_sanity(struct cryptop *crp) { struct crypto_session_params *csp; struct crypto_buffer *out; size_t ilen, len, olen; KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE && crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST, ("incoming crp with invalid output buffer type")); KASSERT(crp->crp_etype == 0, ("incoming crp with error")); KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), ("incoming crp already done")); csp = &crp->crp_session->csp; cb_sanity(&crp->crp_buf, "input"); ilen = crypto_buffer_len(&crp->crp_buf); olen = ilen; out = NULL; if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) { if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) { cb_sanity(&crp->crp_obuf, "output"); out = &crp->crp_obuf; olen = crypto_buffer_len(out); } } else KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE, ("incoming crp with separate output buffer " "but no session support")); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || crp->crp_op == CRYPTO_OP_DECOMPRESS, ("invalid compression op %x", crp->crp_op)); break; case CSP_MODE_CIPHER: KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT || crp->crp_op == CRYPTO_OP_DECRYPT, ("invalid cipher op %x", crp->crp_op)); break; case CSP_MODE_DIGEST: KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST || crp->crp_op == CRYPTO_OP_VERIFY_DIGEST, ("invalid digest op %x", crp->crp_op)); break; case CSP_MODE_AEAD: KASSERT(crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || crp->crp_op == (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), ("invalid AEAD op %x", crp->crp_op)); KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, ("AEAD without a separate IV")); break; case CSP_MODE_ETA: KASSERT(crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || crp->crp_op == (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), ("invalid ETA op %x", crp->crp_op)); break; } if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { if (crp->crp_aad == NULL) { KASSERT(crp->crp_aad_start == 0 || crp->crp_aad_start < ilen, ("invalid AAD start")); KASSERT(crp->crp_aad_length != 0 || crp->crp_aad_start == 0, ("AAD with zero length and non-zero start")); KASSERT(crp->crp_aad_length == 0 || crp->crp_aad_start + crp->crp_aad_length <= ilen, ("AAD outside input length")); } else { KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD, ("session doesn't support separate AAD buffer")); KASSERT(crp->crp_aad_start == 0, ("separate AAD buffer with non-zero AAD start")); KASSERT(crp->crp_aad_length != 0, ("separate AAD buffer with zero length")); } } else { KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 && crp->crp_aad_length == 0, ("AAD region in request not supporting AAD")); } if (csp->csp_ivlen == 0) { KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0, ("IV_SEPARATE set when IV isn't used")); KASSERT(crp->crp_iv_start == 0, ("crp_iv_start set when IV isn't used")); } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { KASSERT(crp->crp_iv_start == 0, ("IV_SEPARATE used with non-zero IV start")); } else { KASSERT(crp->crp_iv_start < ilen, ("invalid IV start")); KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen, ("IV outside buffer length")); } /* XXX: payload_start of 0 should always be < ilen? */ KASSERT(crp->crp_payload_start == 0 || crp->crp_payload_start < ilen, ("invalid payload start")); KASSERT(crp->crp_payload_start + crp->crp_payload_length <= ilen, ("payload outside input buffer")); if (out == NULL) { KASSERT(crp->crp_payload_output_start == 0, ("payload output start non-zero without output buffer")); } else { KASSERT(crp->crp_payload_output_start < olen, ("invalid payload output start")); KASSERT(crp->crp_payload_output_start + crp->crp_payload_length <= olen, ("payload outside output buffer")); } if (csp->csp_mode == CSP_MODE_DIGEST || csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) len = ilen; else len = olen; KASSERT(crp->crp_digest_start == 0 || crp->crp_digest_start < len, ("invalid digest start")); /* XXX: For the mlen == 0 case this check isn't perfect. */ KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len, ("digest outside buffer")); } else { KASSERT(crp->crp_digest_start == 0, ("non-zero digest start for request without a digest")); } if (csp->csp_cipher_klen != 0) KASSERT(csp->csp_cipher_key != NULL || crp->crp_cipher_key != NULL, ("cipher request without a key")); if (csp->csp_auth_klen != 0) KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL, ("auth request without a key")); KASSERT(crp->crp_callback != NULL, ("incoming crp without callback")); } #endif /* * Add a crypto request to a queue, to be processed by the kernel thread. */ int crypto_dispatch(struct cryptop *crp) { struct cryptocap *cap; int result; #ifdef INVARIANTS crp_sanity(crp); #endif CRYPTOSTAT_INC(cs_ops); crp->crp_retw_id = crp->crp_session->id % crypto_workers_num; if (CRYPTOP_ASYNC(crp)) { if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) { struct crypto_ret_worker *ret_worker; ret_worker = CRYPTO_RETW(crp->crp_retw_id); CRYPTO_RETW_LOCK(ret_worker); crp->crp_seq = ret_worker->reorder_ops++; CRYPTO_RETW_UNLOCK(ret_worker); } TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); taskqueue_enqueue(crypto_tq, &crp->crp_task); return (0); } if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { /* * Caller marked the request to be processed * immediately; dispatch it directly to the * driver unless the driver is currently blocked. */ cap = crp->crp_session->cap; if (!cap->cc_qblocked) { result = crypto_invoke(cap, crp, 0); if (result != ERESTART) return (result); /* * The driver ran out of resources, put the request on * the queue. */ } } crypto_batch_enqueue(crp); return 0; } void crypto_batch_enqueue(struct cryptop *crp) { CRYPTO_Q_LOCK(); TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); if (crp_sleep) wakeup_one(&crp_q); CRYPTO_Q_UNLOCK(); } /* * Add an asymetric crypto request to a queue, * to be processed by the kernel thread. */ int crypto_kdispatch(struct cryptkop *krp) { int error; CRYPTOSTAT_INC(cs_kops); krp->krp_cap = NULL; error = crypto_kinvoke(krp); if (error == ERESTART) { CRYPTO_Q_LOCK(); TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); if (crp_sleep) wakeup_one(&crp_q); CRYPTO_Q_UNLOCK(); error = 0; } return error; } /* * Verify a driver is suitable for the specified operation. */ static __inline int kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp) { return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0; } /* * Select a driver for an asym operation. The driver must * support the necessary algorithm. The caller can constrain * which device is selected with the flags parameter. The * algorithm we use here is pretty stupid; just use the first * driver that supports the algorithms we need. If there are * multiple suitable drivers we choose the driver with the * fewest active operations. We prefer hardware-backed * drivers to software ones when either may be used. */ static struct cryptocap * crypto_select_kdriver(const struct cryptkop *krp, int flags) { struct cryptocap *cap, *best; int match, hid; CRYPTO_DRIVER_ASSERT(); /* * Look first for hardware crypto devices if permitted. */ if (flags & CRYPTOCAP_F_HARDWARE) match = CRYPTOCAP_F_HARDWARE; else match = CRYPTOCAP_F_SOFTWARE; best = NULL; again: for (hid = 0; hid < crypto_drivers_size; hid++) { /* * If there is no driver for this slot, or the driver * is not appropriate (hardware or software based on * match), then skip. */ cap = crypto_drivers[hid]; if (cap == NULL || (cap->cc_flags & match) == 0) continue; /* verify all the algorithms are supported. */ if (kdriver_suitable(cap, krp)) { if (best == NULL || cap->cc_koperations < best->cc_koperations) best = cap; } } if (best != NULL) return best; if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { /* sort of an Algol 68-style for loop */ match = CRYPTOCAP_F_SOFTWARE; goto again; } return best; } /* * Choose a driver for an asymmetric crypto request. */ static struct cryptocap * crypto_lookup_kdriver(struct cryptkop *krp) { struct cryptocap *cap; uint32_t crid; /* If this request is requeued, it might already have a driver. */ cap = krp->krp_cap; if (cap != NULL) return (cap); /* Use krp_crid to choose a driver. */ crid = krp->krp_crid; if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { cap = crypto_checkdriver(crid); if (cap != NULL) { /* * Driver present, it must support the * necessary algorithm and, if s/w drivers are * excluded, it must be registered as * hardware-backed. */ if (!kdriver_suitable(cap, krp) || (!crypto_devallowsoft && (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0)) cap = NULL; } } else { /* * No requested driver; select based on crid flags. */ if (!crypto_devallowsoft) /* NB: disallow s/w drivers */ crid &= ~CRYPTOCAP_F_SOFTWARE; cap = crypto_select_kdriver(krp, crid); } if (cap != NULL) { krp->krp_cap = cap_ref(cap); krp->krp_hid = cap->cc_hid; } return (cap); } /* * Dispatch an asymmetric crypto request. */ static int crypto_kinvoke(struct cryptkop *krp) { struct cryptocap *cap = NULL; int error; KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); KASSERT(krp->krp_callback != NULL, ("%s: krp->crp_callback == NULL", __func__)); CRYPTO_DRIVER_LOCK(); cap = crypto_lookup_kdriver(krp); if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); krp->krp_status = ENODEV; crypto_kdone(krp); return (0); } /* * If the device is blocked, return ERESTART to requeue it. */ if (cap->cc_kqblocked) { /* * XXX: Previously this set krp_status to ERESTART and * invoked crypto_kdone but the caller would still * requeue it. */ CRYPTO_DRIVER_UNLOCK(); return (ERESTART); } cap->cc_koperations++; CRYPTO_DRIVER_UNLOCK(); error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0); if (error == ERESTART) { CRYPTO_DRIVER_LOCK(); cap->cc_koperations--; CRYPTO_DRIVER_UNLOCK(); return (error); } KASSERT(error == 0, ("error %d returned from crypto_kprocess", error)); return (0); } static void crypto_task_invoke(void *ctx, int pending) { struct cryptocap *cap; struct cryptop *crp; int result; crp = (struct cryptop *)ctx; cap = crp->crp_session->cap; result = crypto_invoke(cap, crp, 0); if (result == ERESTART) crypto_batch_enqueue(crp); } /* * Dispatch a crypto request to the appropriate crypto devices. */ static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) { KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); KASSERT(crp->crp_callback != NULL, ("%s: crp->crp_callback == NULL", __func__)); KASSERT(crp->crp_session != NULL, ("%s: crp->crp_session == NULL", __func__)); if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { struct crypto_session_params csp; crypto_session_t nses; /* * Driver has unregistered; migrate the session and return * an error to the caller so they'll resubmit the op. * * XXX: What if there are more already queued requests for this * session? * * XXX: Real solution is to make sessions refcounted * and force callers to hold a reference when * assigning to crp_session. Could maybe change * crypto_getreq to accept a session pointer to make * that work. Alternatively, we could abandon the * notion of rewriting crp_session in requests forcing * the caller to deal with allocating a new session. * Perhaps provide a method to allow a crp's session to * be swapped that callers could use. */ csp = crp->crp_session->csp; crypto_freesession(crp->crp_session); /* * XXX: Key pointers may no longer be valid. If we * really want to support this we need to define the * KPI such that 'csp' is required to be valid for the * duration of a session by the caller perhaps. * * XXX: If the keys have been changed this will reuse * the old keys. This probably suggests making * rekeying more explicit and updating the key * pointers in 'csp' when the keys change. */ if (crypto_newsession(&nses, &csp, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) crp->crp_session = nses; crp->crp_etype = EAGAIN; crypto_done(crp); return 0; } else { /* * Invoke the driver to process the request. */ return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); } } void crypto_destroyreq(struct cryptop *crp) { #ifdef DIAGNOSTIC { struct cryptop *crp2; struct crypto_ret_worker *ret_worker; CRYPTO_Q_LOCK(); TAILQ_FOREACH(crp2, &crp_q, crp_next) { KASSERT(crp2 != crp, ("Freeing cryptop from the crypto queue (%p).", crp)); } CRYPTO_Q_UNLOCK(); FOREACH_CRYPTO_RETW(ret_worker) { CRYPTO_RETW_LOCK(ret_worker); TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { KASSERT(crp2 != crp, ("Freeing cryptop from the return queue (%p).", crp)); } CRYPTO_RETW_UNLOCK(ret_worker); } } #endif } void crypto_freereq(struct cryptop *crp) { if (crp == NULL) return; crypto_destroyreq(crp); uma_zfree(cryptop_zone, crp); } static void _crypto_initreq(struct cryptop *crp, crypto_session_t cses) { crp->crp_session = cses; } void crypto_initreq(struct cryptop *crp, crypto_session_t cses) { memset(crp, 0, sizeof(*crp)); _crypto_initreq(crp, cses); } struct cryptop * crypto_getreq(crypto_session_t cses, int how) { struct cryptop *crp; MPASS(how == M_WAITOK || how == M_NOWAIT); crp = uma_zalloc(cryptop_zone, how | M_ZERO); if (crp != NULL) _crypto_initreq(crp, cses); return (crp); } /* * Invoke the callback on behalf of the driver. */ void crypto_done(struct cryptop *crp) { KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); crp->crp_flags |= CRYPTO_F_DONE; if (crp->crp_etype != 0) CRYPTOSTAT_INC(cs_errs); /* * CBIMM means unconditionally do the callback immediately; * CBIFSYNC means do the callback immediately only if the * operation was done synchronously. Both are used to avoid * doing extraneous context switches; the latter is mostly * used with the software crypto driver. */ if (!CRYPTOP_ASYNC_KEEPORDER(crp) && ((crp->crp_flags & CRYPTO_F_CBIMM) || ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) { /* * Do the callback directly. This is ok when the * callback routine does very little (e.g. the * /dev/crypto callback method just does a wakeup). */ crp->crp_callback(crp); } else { struct crypto_ret_worker *ret_worker; bool wake; ret_worker = CRYPTO_RETW(crp->crp_retw_id); wake = false; /* * Normal case; queue the callback for the thread. */ CRYPTO_RETW_LOCK(ret_worker); if (CRYPTOP_ASYNC_KEEPORDER(crp)) { struct cryptop *tmp; TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q, cryptop_q, crp_next) { if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q, tmp, crp, crp_next); break; } } if (tmp == NULL) { TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q, crp, crp_next); } if (crp->crp_seq == ret_worker->reorder_cur_seq) wake = true; } else { if (CRYPTO_RETW_EMPTY(ret_worker)) wake = true; TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next); } if (wake) wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ CRYPTO_RETW_UNLOCK(ret_worker); } } /* * Invoke the callback on behalf of the driver. */ void crypto_kdone(struct cryptkop *krp) { struct crypto_ret_worker *ret_worker; struct cryptocap *cap; if (krp->krp_status != 0) CRYPTOSTAT_INC(cs_kerrs); cap = krp->krp_cap; if (cap != NULL) { CRYPTO_DRIVER_LOCK(); KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0")); cap->cc_koperations--; if (cap->cc_koperations == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) wakeup(cap); CRYPTO_DRIVER_UNLOCK(); krp->krp_cap = NULL; cap_rele(cap); } ret_worker = CRYPTO_RETW(0); CRYPTO_RETW_LOCK(ret_worker); if (CRYPTO_RETW_EMPTY(ret_worker)) wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next); CRYPTO_RETW_UNLOCK(ret_worker); } int crypto_getfeat(int *featp) { int hid, kalg, feat = 0; CRYPTO_DRIVER_LOCK(); for (hid = 0; hid < crypto_drivers_size; hid++) { const struct cryptocap *cap = crypto_drivers[hid]; if (cap == NULL || ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && !crypto_devallowsoft)) { continue; } for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) feat |= 1 << kalg; } CRYPTO_DRIVER_UNLOCK(); *featp = feat; return (0); } /* * Terminate a thread at module unload. The process that * initiated this is waiting for us to signal that we're gone; * wake it up and exit. We use the driver table lock to insure * we don't do the wakeup before they're waiting. There is no * race here because the waiter sleeps on the proc lock for the * thread so it gets notified at the right time because of an * extra wakeup that's done in exit1(). */ static void crypto_finis(void *chan) { CRYPTO_DRIVER_LOCK(); wakeup_one(chan); CRYPTO_DRIVER_UNLOCK(); kproc_exit(0); } /* * Crypto thread, dispatches crypto requests. */ static void crypto_proc(void) { struct cryptop *crp, *submit; struct cryptkop *krp; struct cryptocap *cap; int result, hint; #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) fpu_kern_thread(FPU_KERN_NORMAL); #endif CRYPTO_Q_LOCK(); for (;;) { /* * Find the first element in the queue that can be * processed and look-ahead to see if multiple ops * are ready for the same driver. */ submit = NULL; hint = 0; TAILQ_FOREACH(crp, &crp_q, crp_next) { cap = crp->crp_session->cap; /* * Driver cannot disappeared when there is an active * session. */ KASSERT(cap != NULL, ("%s:%u Driver disappeared.", __func__, __LINE__)); if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { /* Op needs to be migrated, process it. */ if (submit == NULL) submit = crp; break; } if (!cap->cc_qblocked) { if (submit != NULL) { /* * We stop on finding another op, * regardless whether its for the same * driver or not. We could keep * searching the queue but it might be * better to just use a per-driver * queue instead. */ if (submit->crp_session->cap == cap) hint = CRYPTO_HINT_MORE; break; } else { submit = crp; if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) break; /* keep scanning for more are q'd */ } } } if (submit != NULL) { TAILQ_REMOVE(&crp_q, submit, crp_next); cap = submit->crp_session->cap; KASSERT(cap != NULL, ("%s:%u Driver disappeared.", __func__, __LINE__)); CRYPTO_Q_UNLOCK(); result = crypto_invoke(cap, submit, hint); CRYPTO_Q_LOCK(); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ cap->cc_qblocked = 1; TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); CRYPTOSTAT_INC(cs_blocks); } } /* As above, but for key ops */ TAILQ_FOREACH(krp, &crp_kq, krp_next) { cap = krp->krp_cap; if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { /* * Operation needs to be migrated, * clear krp_cap so a new driver is * selected. */ krp->krp_cap = NULL; cap_rele(cap); break; } if (!cap->cc_kqblocked) break; } if (krp != NULL) { TAILQ_REMOVE(&crp_kq, krp, krp_next); CRYPTO_Q_UNLOCK(); result = crypto_kinvoke(krp); CRYPTO_Q_LOCK(); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptkop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ krp->krp_cap->cc_kqblocked = 1; TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); CRYPTOSTAT_INC(cs_kblocks); } } if (submit == NULL && krp == NULL) { /* * Nothing more to be processed. Sleep until we're * woken because there are more ops to process. * This happens either by submission or by a driver * becoming unblocked and notifying us through * crypto_unblock. Note that when we wakeup we * start processing each queue again from the * front. It's not clear that it's important to * preserve this ordering since ops may finish * out of order if dispatched to different devices * and some become blocked while others do not. */ crp_sleep = 1; msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); crp_sleep = 0; if (cryptoproc == NULL) break; CRYPTOSTAT_INC(cs_intrs); } } CRYPTO_Q_UNLOCK(); crypto_finis(&crp_q); } /* * Crypto returns thread, does callbacks for processed crypto requests. * Callbacks are done here, rather than in the crypto drivers, because * callbacks typically are expensive and would slow interrupt handling. */ static void crypto_ret_proc(struct crypto_ret_worker *ret_worker) { struct cryptop *crpt; struct cryptkop *krpt; CRYPTO_RETW_LOCK(ret_worker); for (;;) { /* Harvest return q's for completed ops */ crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); if (crpt != NULL) { if (crpt->crp_seq == ret_worker->reorder_cur_seq) { TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); ret_worker->reorder_cur_seq++; } else { crpt = NULL; } } if (crpt == NULL) { crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); if (crpt != NULL) TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); } krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq); if (krpt != NULL) TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next); if (crpt != NULL || krpt != NULL) { CRYPTO_RETW_UNLOCK(ret_worker); /* * Run callbacks unlocked. */ if (crpt != NULL) crpt->crp_callback(crpt); if (krpt != NULL) krpt->krp_callback(krpt); CRYPTO_RETW_LOCK(ret_worker); } else { /* * Nothing more to be processed. Sleep until we're * woken because there are more returns to process. */ msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, "crypto_ret_wait", 0); if (ret_worker->cryptoretproc == NULL) break; CRYPTOSTAT_INC(cs_rets); } } CRYPTO_RETW_UNLOCK(ret_worker); crypto_finis(&ret_worker->crp_ret_q); } #ifdef DDB static void db_show_drivers(void) { int hid; db_printf("%12s %4s %4s %8s %2s %2s\n" , "Device" , "Ses" , "Kops" , "Flags" , "QB" , "KB" ); for (hid = 0; hid < crypto_drivers_size; hid++) { const struct cryptocap *cap = crypto_drivers[hid]; if (cap == NULL) continue; db_printf("%-12s %4u %4u %08x %2u %2u\n" , device_get_nameunit(cap->cc_dev) , cap->cc_sessions , cap->cc_koperations , cap->cc_flags , cap->cc_qblocked , cap->cc_kqblocked ); } } DB_SHOW_COMMAND(crypto, db_show_crypto) { struct cryptop *crp; struct crypto_ret_worker *ret_worker; db_show_drivers(); db_printf("\n"); db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", "Device", "Callback"); TAILQ_FOREACH(crp, &crp_q, crp_next) { db_printf("%4u %08x %4u %4u %04x %8p %8p\n" , crp->crp_session->cap->cc_hid , (int) crypto_ses2caps(crp->crp_session) , crp->crp_olen , crp->crp_etype , crp->crp_flags , device_get_nameunit(crp->crp_session->cap->cc_dev) , crp->crp_callback ); } FOREACH_CRYPTO_RETW(ret_worker) { db_printf("\n%8s %4s %4s %4s %8s\n", "ret_worker", "HID", "Etype", "Flags", "Callback"); if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { db_printf("%8td %4u %4u %04x %8p\n" , CRYPTO_RETW_ID(ret_worker) , crp->crp_session->cap->cc_hid , crp->crp_etype , crp->crp_flags , crp->crp_callback ); } } } } DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) { struct cryptkop *krp; struct crypto_ret_worker *ret_worker; db_show_drivers(); db_printf("\n"); db_printf("%4s %5s %4s %4s %8s %4s %8s\n", "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); TAILQ_FOREACH(krp, &crp_kq, krp_next) { db_printf("%4u %5u %4u %4u %08x %4u %8p\n" , krp->krp_op , krp->krp_status , krp->krp_iparams, krp->krp_oparams , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } ret_worker = CRYPTO_RETW(0); if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { db_printf("%4s %5s %8s %4s %8s\n", "Op", "Status", "CRID", "HID", "Callback"); TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) { db_printf("%4u %5u %08x %4u %8p\n" , krp->krp_op , krp->krp_status , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } } } #endif int crypto_modevent(module_t mod, int type, void *unused); /* * Initialization code, both for static and dynamic loading. * Note this is not invoked with the usual MODULE_DECLARE * mechanism but instead is listed as a dependency by the * cryptosoft driver. This guarantees proper ordering of * calls on module load/unload. */ int crypto_modevent(module_t mod, int type, void *unused) { int error = EINVAL; switch (type) { case MOD_LOAD: error = crypto_init(); if (error == 0 && bootverbose) printf("crypto: \n"); break; case MOD_UNLOAD: /*XXX disallow if active sessions */ error = 0; crypto_destroy(); return 0; } return error; } MODULE_VERSION(crypto, 1); MODULE_DEPEND(crypto, zlib, 1, 1, 1); diff --git a/sys/opencrypto/cryptosoft.c b/sys/opencrypto/cryptosoft.c index fb43a08970c7..567a0f4748d5 100644 --- a/sys/opencrypto/cryptosoft.c +++ b/sys/opencrypto/cryptosoft.c @@ -1,1763 +1,1760 @@ /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * Copyright (c) 2014-2021 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Portions of this software were developed by Ararat River * Consulting, LLC under sponsorship of the FreeBSD Foundation. * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" struct swcr_auth { void *sw_ictx; void *sw_octx; struct auth_hash *sw_axf; uint16_t sw_mlen; }; struct swcr_encdec { void *sw_kschedule; struct enc_xform *sw_exf; }; struct swcr_compdec { struct comp_algo *sw_cxf; }; struct swcr_session { struct mtx swcr_lock; int (*swcr_process)(struct swcr_session *, struct cryptop *); struct swcr_auth swcr_auth; struct swcr_encdec swcr_encdec; struct swcr_compdec swcr_compdec; }; static int32_t swcr_id; static void swcr_freesession(device_t dev, crypto_session_t cses); /* Used for CRYPTO_NULL_CBC. */ static int swcr_null(struct swcr_session *ses, struct cryptop *crp) { return (0); } /* * Apply a symmetric encryption/decryption algorithm. */ static int swcr_encdec(struct swcr_session *ses, struct cryptop *crp) { unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN]; unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; const struct crypto_session_params *csp; struct swcr_encdec *sw; struct enc_xform *exf; size_t inlen, outlen; int i, blks, resid; struct crypto_buffer_cursor cc_in, cc_out; const unsigned char *inblk; unsigned char *outblk; int error; bool encrypting; error = 0; sw = &ses->swcr_encdec; exf = sw->sw_exf; csp = crypto_get_params(crp->crp_session); if (exf->native_blocksize == 0) { /* Check for non-padded data */ if ((crp->crp_payload_length % exf->blocksize) != 0) return (EINVAL); blks = exf->blocksize; } else blks = exf->native_blocksize; if (exf == &enc_xform_aes_icm && (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); if (crp->crp_cipher_key != NULL) { error = exf->setkey(sw->sw_kschedule, crp->crp_cipher_key, csp->csp_cipher_klen); if (error) return (error); } crypto_read_iv(crp, iv); if (exf->reinit) { /* * xforms that provide a reinit method perform all IV * handling themselves. */ exf->reinit(sw->sw_kschedule, iv, csp->csp_ivlen); } ivp = iv; crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); inblk = crypto_cursor_segment(&cc_in, &inlen); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; outblk = crypto_cursor_segment(&cc_out, &outlen); resid = crp->crp_payload_length; encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); /* * Loop through encrypting blocks. 'inlen' is the remaining * length of the current segment in the input buffer. * 'outlen' is the remaining length of current segment in the * output buffer. */ while (resid >= blks) { /* * If the current block is not contained within the * current input/output segment, use 'blk' as a local * buffer. */ if (inlen < blks) { crypto_cursor_copydata(&cc_in, blks, blk); inblk = blk; } if (outlen < blks) outblk = blk; /* * Ciphers without a 'reinit' hook are assumed to be * used in CBC mode where the chaining is done here. */ if (exf->reinit != NULL) { if (encrypting) exf->encrypt(sw->sw_kschedule, inblk, outblk); else exf->decrypt(sw->sw_kschedule, inblk, outblk); } else if (encrypting) { /* XOR with previous block */ for (i = 0; i < blks; i++) outblk[i] = inblk[i] ^ ivp[i]; exf->encrypt(sw->sw_kschedule, outblk, outblk); /* * Keep encrypted block for XOR'ing * with next block */ memcpy(iv, outblk, blks); ivp = iv; } else { /* decrypt */ /* * Keep encrypted block for XOR'ing * with next block */ nivp = (ivp == iv) ? iv2 : iv; memcpy(nivp, inblk, blks); exf->decrypt(sw->sw_kschedule, inblk, outblk); /* XOR with previous block */ for (i = 0; i < blks; i++) outblk[i] ^= ivp[i]; ivp = nivp; } if (inlen < blks) { inblk = crypto_cursor_segment(&cc_in, &inlen); } else { crypto_cursor_advance(&cc_in, blks); inlen -= blks; inblk += blks; } if (outlen < blks) { crypto_cursor_copyback(&cc_out, blks, blk); outblk = crypto_cursor_segment(&cc_out, &outlen); } else { crypto_cursor_advance(&cc_out, blks); outlen -= blks; outblk += blks; } resid -= blks; } /* Handle trailing partial block for stream ciphers. */ if (resid > 0) { KASSERT(exf->native_blocksize != 0, ("%s: partial block of %d bytes for cipher %s", __func__, i, exf->name)); KASSERT(exf->reinit != NULL, ("%s: partial block cipher %s without reinit hook", __func__, exf->name)); KASSERT(resid < blks, ("%s: partial block too big", __func__)); inblk = crypto_cursor_segment(&cc_in, &inlen); outblk = crypto_cursor_segment(&cc_out, &outlen); if (inlen < resid) { crypto_cursor_copydata(&cc_in, resid, blk); inblk = blk; } if (outlen < resid) outblk = blk; if (encrypting) exf->encrypt_last(sw->sw_kschedule, inblk, outblk, resid); else exf->decrypt_last(sw->sw_kschedule, inblk, outblk, resid); if (outlen < resid) crypto_cursor_copyback(&cc_out, resid, blk); } explicit_bzero(blk, sizeof(blk)); explicit_bzero(iv, sizeof(iv)); explicit_bzero(iv2, sizeof(iv2)); return (0); } static void swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw, const uint8_t *key, int klen) { switch (axf->type) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: hmac_init_ipad(axf, key, klen, sw->sw_ictx); hmac_init_opad(axf, key, klen, sw->sw_octx); break; case CRYPTO_POLY1305: case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: axf->Setkey(sw->sw_ictx, key, klen); axf->Init(sw->sw_ictx); break; default: panic("%s: algorithm %d doesn't use keys", __func__, axf->type); } } /* * Compute or verify hash. */ static int swcr_authcompute(struct swcr_session *ses, struct cryptop *crp) { u_char aalg[HASH_MAX_LEN]; const struct crypto_session_params *csp; struct swcr_auth *sw; struct auth_hash *axf; union authctx ctx; int err; sw = &ses->swcr_auth; axf = sw->sw_axf; csp = crypto_get_params(crp->crp_session); if (crp->crp_auth_key != NULL) { swcr_authprepare(axf, sw, crp->crp_auth_key, csp->csp_auth_klen); } bcopy(sw->sw_ictx, &ctx, axf->ctxsize); if (crp->crp_aad != NULL) err = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); else err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, axf->Update, &ctx); if (err) goto out; if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) err = crypto_apply_buf(&crp->crp_obuf, crp->crp_payload_output_start, crp->crp_payload_length, axf->Update, &ctx); else err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, axf->Update, &ctx); if (err) goto out; if (csp->csp_flags & CSP_F_ESN) axf->Update(&ctx, crp->crp_esn, 4); axf->Final(aalg, &ctx); if (sw->sw_octx != NULL) { bcopy(sw->sw_octx, &ctx, axf->ctxsize); axf->Update(&ctx, aalg, axf->hashsize); axf->Final(aalg, &ctx); } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { u_char uaalg[HASH_MAX_LEN]; crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg); if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0) err = EBADMSG; explicit_bzero(uaalg, sizeof(uaalg)); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg); } explicit_bzero(aalg, sizeof(aalg)); out: explicit_bzero(&ctx, sizeof(ctx)); return (err); } CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ static int swcr_gmac(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char tag[GMAC_DIGEST_LEN]; u_char iv[AES_BLOCK_LEN]; struct crypto_buffer_cursor cc; const u_char *inblk; union authctx ctx; struct swcr_auth *swa; struct auth_hash *axf; uint32_t *blkp; size_t len; int blksz, error, ivlen, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = GMAC_BLOCK_LEN; KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", __func__)); /* Initialize the IV */ ivlen = AES_GCM_IV_LEN; crypto_read_iv(crp, iv); axf->Reinit(&ctx, iv, ivlen); crypto_cursor_init(&cc, &crp->crp_buf); crypto_cursor_advance(&cc, crp->crp_payload_start); for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) { inblk = crypto_cursor_segment(&cc, &len); if (len >= blksz) { len = rounddown(MIN(len, resid), blksz); crypto_cursor_advance(&cc, len); } else { len = blksz; crypto_cursor_copydata(&cc, len, blk); inblk = blk; } axf->Update(&ctx, inblk, len); } if (resid > 0) { memset(blk, 0, blksz); crypto_cursor_copydata(&cc, resid, blk); axf->Update(&ctx, blk, blksz); } /* length block */ memset(blk, 0, blksz); blkp = (uint32_t *)blk + 1; *blkp = htobe32(crp->crp_payload_length * 8); axf->Update(&ctx, blk, blksz); /* Finalize MAC */ axf->Final(tag, &ctx); error = 0; if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { u_char tag2[GMAC_DIGEST_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) error = EBADMSG; explicit_bzero(tag2, sizeof(tag2)); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } explicit_bzero(blkbuf, sizeof(blkbuf)); explicit_bzero(tag, sizeof(tag)); explicit_bzero(iv, sizeof(iv)); return (error); } static int swcr_gcm(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char tag[GMAC_DIGEST_LEN]; struct crypto_buffer_cursor cc_in, cc_out; const u_char *inblk; u_char *outblk; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; uint32_t *blkp; size_t len; int blksz, error, ivlen, r, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = GMAC_BLOCK_LEN; KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", __func__)); swe = &ses->swcr_encdec; exf = swe->sw_exf; KASSERT(axf->blocksize == exf->native_blocksize, ("%s: blocksize mismatch", __func__)); if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); ivlen = AES_GCM_IV_LEN; /* Supply MAC with IV */ axf->Reinit(&ctx, crp->crp_iv, ivlen); /* Supply MAC with AAD */ if (crp->crp_aad != NULL) { len = rounddown(crp->crp_aad_length, blksz); if (len != 0) axf->Update(&ctx, crp->crp_aad, len); if (crp->crp_aad_length != len) { memset(blk, 0, blksz); memcpy(blk, (char *)crp->crp_aad + len, crp->crp_aad_length - len); axf->Update(&ctx, blk, blksz); } } else { crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_aad_start); for (resid = crp->crp_aad_length; resid >= blksz; resid -= len) { inblk = crypto_cursor_segment(&cc_in, &len); if (len >= blksz) { len = rounddown(MIN(len, resid), blksz); crypto_cursor_advance(&cc_in, len); } else { len = blksz; crypto_cursor_copydata(&cc_in, len, blk); inblk = blk; } axf->Update(&ctx, inblk, len); } if (resid > 0) { memset(blk, 0, blksz); crypto_cursor_copydata(&cc_in, resid, blk); axf->Update(&ctx, blk, blksz); } } if (crp->crp_cipher_key != NULL) exf->setkey(swe->sw_kschedule, crp->crp_cipher_key, crypto_get_params(crp->crp_session)->csp_cipher_klen); exf->reinit(swe->sw_kschedule, crp->crp_iv, ivlen); /* Do encryption with MAC */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { inblk = crypto_cursor_segment(&cc_in, &len); if (len < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else { crypto_cursor_advance(&cc_in, blksz); } if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { outblk = crypto_cursor_segment(&cc_out, &len); if (len < blksz) outblk = blk; exf->encrypt(swe->sw_kschedule, inblk, outblk); axf->Update(&ctx, outblk, blksz); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } else { axf->Update(&ctx, inblk, blksz); } } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } axf->Update(&ctx, blk, resid); } /* length block */ memset(blk, 0, blksz); blkp = (uint32_t *)blk + 1; *blkp = htobe32(crp->crp_aad_length * 8); blkp = (uint32_t *)blk + 3; *blkp = htobe32(crp->crp_payload_length * 8); axf->Update(&ctx, blk, blksz); /* Finalize MAC */ axf->Final(tag, &ctx); /* Validate tag */ error = 0; if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { u_char tag2[GMAC_DIGEST_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); explicit_bzero(tag2, sizeof(tag2)); if (r != 0) { error = EBADMSG; goto out; } /* tag matches, decrypt data */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); for (resid = crp->crp_payload_length; resid > blksz; resid -= blksz) { inblk = crypto_cursor_segment(&cc_in, &len); if (len < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else crypto_cursor_advance(&cc_in, blksz); outblk = crypto_cursor_segment(&cc_out, &len); if (len < blksz) outblk = blk; exf->decrypt(swe->sw_kschedule, inblk, outblk); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } out: explicit_bzero(blkbuf, sizeof(blkbuf)); explicit_bzero(tag, sizeof(tag)); return (error); } static void build_ccm_b0(const char *nonce, u_int nonce_length, u_int aad_length, u_int data_length, u_int tag_length, uint8_t *b0) { uint8_t *bp; uint8_t flags, L; KASSERT(nonce_length >= 7 && nonce_length <= 13, ("nonce_length must be between 7 and 13 bytes")); /* * Need to determine the L field value. This is the number of * bytes needed to specify the length of the message; the length * is whatever is left in the 16 bytes after specifying flags and * the nonce. */ L = 15 - nonce_length; flags = ((aad_length > 0) << 6) + (((tag_length - 2) / 2) << 3) + L - 1; /* * Now we need to set up the first block, which has flags, nonce, * and the message length. */ b0[0] = flags; memcpy(b0 + 1, nonce, nonce_length); bp = b0 + 1 + nonce_length; /* Need to copy L' [aka L-1] bytes of data_length */ for (uint8_t *dst = b0 + CCM_CBC_BLOCK_LEN - 1; dst >= bp; dst--) { *dst = data_length; data_length >>= 8; } } /* NB: OCF only supports AAD lengths < 2^32. */ static int build_ccm_aad_length(u_int aad_length, uint8_t *blk) { if (aad_length < ((1 << 16) - (1 << 8))) { be16enc(blk, aad_length); return (sizeof(uint16_t)); } else { blk[0] = 0xff; blk[1] = 0xfe; be32enc(blk + 2, aad_length); return (2 + sizeof(uint32_t)); } } static int swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp) { u_char iv[AES_BLOCK_LEN]; u_char blk[CCM_CBC_BLOCK_LEN]; u_char tag[AES_CBC_MAC_HASH_LEN]; union authctx ctx; const struct crypto_session_params *csp; struct swcr_auth *swa; struct auth_hash *axf; int error, ivlen, len; csp = crypto_get_params(crp->crp_session); swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); /* Initialize the IV */ ivlen = csp->csp_ivlen; crypto_read_iv(crp, iv); /* Supply MAC with IV */ axf->Reinit(&ctx, crp->crp_iv, ivlen); /* Supply MAC with b0. */ build_ccm_b0(crp->crp_iv, ivlen, crp->crp_payload_length, 0, swa->sw_mlen, blk); axf->Update(&ctx, blk, CCM_CBC_BLOCK_LEN); len = build_ccm_aad_length(crp->crp_payload_length, blk); axf->Update(&ctx, blk, len); crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, axf->Update, &ctx); /* Finalize MAC */ axf->Final(tag, &ctx); error = 0; if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { u_char tag2[AES_CBC_MAC_HASH_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) error = EBADMSG; explicit_bzero(tag2, sizeof(tag)); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } explicit_bzero(tag, sizeof(tag)); explicit_bzero(blk, sizeof(blk)); explicit_bzero(iv, sizeof(iv)); return (error); } static int swcr_ccm(struct swcr_session *ses, struct cryptop *crp) { const struct crypto_session_params *csp; uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char tag[AES_CBC_MAC_HASH_LEN]; struct crypto_buffer_cursor cc_in, cc_out; const u_char *inblk; u_char *outblk; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; size_t len; int blksz, error, ivlen, r, resid; csp = crypto_get_params(crp->crp_session); swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = AES_BLOCK_LEN; KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", __func__)); swe = &ses->swcr_encdec; exf = swe->sw_exf; KASSERT(axf->blocksize == exf->native_blocksize, ("%s: blocksize mismatch", __func__)); if (crp->crp_payload_length > ccm_max_payload_length(csp)) return (EMSGSIZE); if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); ivlen = csp->csp_ivlen; /* Supply MAC with IV */ axf->Reinit(&ctx, crp->crp_iv, ivlen); /* Supply MAC with b0. */ _Static_assert(sizeof(blkbuf) >= CCM_CBC_BLOCK_LEN, "blkbuf too small for b0"); build_ccm_b0(crp->crp_iv, ivlen, crp->crp_aad_length, crp->crp_payload_length, swa->sw_mlen, blk); axf->Update(&ctx, blk, CCM_CBC_BLOCK_LEN); /* Supply MAC with AAD */ if (crp->crp_aad_length != 0) { len = build_ccm_aad_length(crp->crp_aad_length, blk); axf->Update(&ctx, blk, len); if (crp->crp_aad != NULL) axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); else crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, axf->Update, &ctx); /* Pad the AAD (including length field) to a full block. */ len = (len + crp->crp_aad_length) % CCM_CBC_BLOCK_LEN; if (len != 0) { len = CCM_CBC_BLOCK_LEN - len; memset(blk, 0, CCM_CBC_BLOCK_LEN); axf->Update(&ctx, blk, len); } } if (crp->crp_cipher_key != NULL) exf->setkey(swe->sw_kschedule, crp->crp_cipher_key, crypto_get_params(crp->crp_session)->csp_cipher_klen); exf->reinit(swe->sw_kschedule, crp->crp_iv, ivlen); /* Do encryption/decryption with MAC */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { inblk = crypto_cursor_segment(&cc_in, &len); if (len < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else crypto_cursor_advance(&cc_in, blksz); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { outblk = crypto_cursor_segment(&cc_out, &len); if (len < blksz) outblk = blk; axf->Update(&ctx, inblk, blksz); exf->encrypt(swe->sw_kschedule, inblk, outblk); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } else { /* * One of the problems with CCM+CBC is that * the authentication is done on the * unencrypted data. As a result, we have to * decrypt the data twice: once to generate * the tag and a second time after the tag is * verified. */ exf->decrypt(swe->sw_kschedule, inblk, blk); axf->Update(&ctx, blk, blksz); } } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { axf->Update(&ctx, blk, resid); exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } else { exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); axf->Update(&ctx, blk, resid); } } /* Finalize MAC */ axf->Final(tag, &ctx); /* Validate tag */ error = 0; if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { u_char tag2[AES_CBC_MAC_HASH_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); explicit_bzero(tag2, sizeof(tag2)); if (r != 0) { error = EBADMSG; goto out; } /* tag matches, decrypt data */ exf->reinit(swe->sw_kschedule, crp->crp_iv, ivlen); crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); for (resid = crp->crp_payload_length; resid > blksz; resid -= blksz) { inblk = crypto_cursor_segment(&cc_in, &len); if (len < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else crypto_cursor_advance(&cc_in, blksz); outblk = crypto_cursor_segment(&cc_out, &len); if (len < blksz) outblk = blk; exf->decrypt(swe->sw_kschedule, inblk, outblk); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } out: explicit_bzero(blkbuf, sizeof(blkbuf)); explicit_bzero(tag, sizeof(tag)); return (error); } static int swcr_chacha20_poly1305(struct swcr_session *ses, struct cryptop *crp) { const struct crypto_session_params *csp; uint64_t blkbuf[howmany(CHACHA20_NATIVE_BLOCK_LEN, sizeof(uint64_t))]; u_char *blk = (u_char *)blkbuf; u_char tag[POLY1305_HASH_LEN]; struct crypto_buffer_cursor cc_in, cc_out; const u_char *inblk; u_char *outblk; uint64_t *blkp; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; size_t len; int blksz, error, r, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; swe = &ses->swcr_encdec; exf = swe->sw_exf; blksz = exf->native_blocksize; KASSERT(blksz <= sizeof(blkbuf), ("%s: blocksize mismatch", __func__)); if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); csp = crypto_get_params(crp->crp_session); /* Generate Poly1305 key. */ if (crp->crp_cipher_key != NULL) axf->Setkey(&ctx, crp->crp_cipher_key, csp->csp_cipher_klen); else axf->Setkey(&ctx, csp->csp_cipher_key, csp->csp_cipher_klen); axf->Reinit(&ctx, crp->crp_iv, csp->csp_ivlen); /* Supply MAC with AAD */ if (crp->crp_aad != NULL) axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); else crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, axf->Update, &ctx); if (crp->crp_aad_length % 16 != 0) { /* padding1 */ memset(blk, 0, 16); axf->Update(&ctx, blk, 16 - crp->crp_aad_length % 16); } if (crp->crp_cipher_key != NULL) exf->setkey(swe->sw_kschedule, crp->crp_cipher_key, csp->csp_cipher_klen); exf->reinit(swe->sw_kschedule, crp->crp_iv, csp->csp_ivlen); /* Do encryption with MAC */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { inblk = crypto_cursor_segment(&cc_in, &len); if (len < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else crypto_cursor_advance(&cc_in, blksz); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { outblk = crypto_cursor_segment(&cc_out, &len); if (len < blksz) outblk = blk; exf->encrypt(swe->sw_kschedule, inblk, outblk); axf->Update(&ctx, outblk, blksz); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } else { axf->Update(&ctx, inblk, blksz); } } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } axf->Update(&ctx, blk, resid); if (resid % 16 != 0) { /* padding2 */ memset(blk, 0, 16); axf->Update(&ctx, blk, 16 - resid % 16); } } /* lengths */ blkp = (uint64_t *)blk; blkp[0] = htole64(crp->crp_aad_length); blkp[1] = htole64(crp->crp_payload_length); axf->Update(&ctx, blk, sizeof(uint64_t) * 2); /* Finalize MAC */ axf->Final(tag, &ctx); /* Validate tag */ error = 0; if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { u_char tag2[POLY1305_HASH_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); explicit_bzero(tag2, sizeof(tag2)); if (r != 0) { error = EBADMSG; goto out; } /* tag matches, decrypt data */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); for (resid = crp->crp_payload_length; resid > blksz; resid -= blksz) { inblk = crypto_cursor_segment(&cc_in, &len); if (len < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else crypto_cursor_advance(&cc_in, blksz); outblk = crypto_cursor_segment(&cc_out, &len); if (len < blksz) outblk = blk; exf->decrypt(swe->sw_kschedule, inblk, outblk); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } out: explicit_bzero(blkbuf, sizeof(blkbuf)); explicit_bzero(tag, sizeof(tag)); explicit_bzero(&ctx, sizeof(ctx)); return (error); } /* * Apply a cipher and a digest to perform EtA. */ static int swcr_eta(struct swcr_session *ses, struct cryptop *crp) { int error; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { error = swcr_encdec(ses, crp); if (error == 0) error = swcr_authcompute(ses, crp); } else { error = swcr_authcompute(ses, crp); if (error == 0) error = swcr_encdec(ses, crp); } return (error); } /* * Apply a compression/decompression algorithm */ static int swcr_compdec(struct swcr_session *ses, struct cryptop *crp) { uint8_t *data, *out; struct comp_algo *cxf; int adj; uint32_t result; cxf = ses->swcr_compdec.sw_cxf; /* We must handle the whole buffer of data in one time * then if there is not all the data in the mbuf, we must * copy in a buffer. */ data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT); if (data == NULL) return (EINVAL); crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, data); if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) result = cxf->compress(data, crp->crp_payload_length, &out); else result = cxf->decompress(data, crp->crp_payload_length, &out); free(data, M_CRYPTO_DATA); if (result == 0) return (EINVAL); crp->crp_olen = result; /* Check the compressed size when doing compression */ if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) { if (result >= crp->crp_payload_length) { /* Compression was useless, we lost time */ free(out, M_CRYPTO_DATA); return (0); } } /* Copy back the (de)compressed data. m_copyback is * extending the mbuf as necessary. */ crypto_copyback(crp, crp->crp_payload_start, result, out); if (result < crp->crp_payload_length) { switch (crp->crp_buf.cb_type) { case CRYPTO_BUF_MBUF: case CRYPTO_BUF_SINGLE_MBUF: adj = result - crp->crp_payload_length; m_adj(crp->crp_buf.cb_mbuf, adj); break; case CRYPTO_BUF_UIO: { struct uio *uio = crp->crp_buf.cb_uio; int ind; adj = crp->crp_payload_length - result; ind = uio->uio_iovcnt - 1; while (adj > 0 && ind >= 0) { if (adj < uio->uio_iov[ind].iov_len) { uio->uio_iov[ind].iov_len -= adj; break; } adj -= uio->uio_iov[ind].iov_len; uio->uio_iov[ind].iov_len = 0; ind--; uio->uio_iovcnt--; } } break; case CRYPTO_BUF_VMPAGE: adj = crp->crp_payload_length - result; crp->crp_buf.cb_vm_page_len -= adj; break; default: break; } } free(out, M_CRYPTO_DATA); return 0; } static int swcr_setup_cipher(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_encdec *swe; struct enc_xform *txf; int error; swe = &ses->swcr_encdec; txf = crypto_cipher(csp); if (txf->ctxsize != 0) { swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swe->sw_kschedule == NULL) return (ENOMEM); } if (csp->csp_cipher_key != NULL) { error = txf->setkey(swe->sw_kschedule, csp->csp_cipher_key, csp->csp_cipher_klen); if (error) return (error); } swe->sw_exf = txf; return (0); } static int swcr_setup_auth(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; struct auth_hash *axf; swa = &ses->swcr_auth; axf = crypto_auth_hash(csp); swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_octx == NULL) return (ENOBUFS); if (csp->csp_auth_key != NULL) { swcr_authprepare(axf, swa, csp->csp_auth_key, csp->csp_auth_klen); } if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA2_384: case CRYPTO_SHA2_512: axf->Init(swa->sw_ictx); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_AES_NIST_GMAC: axf->Init(swa->sw_ictx); axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_gmac; break; case CRYPTO_POLY1305: case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: /* * Blake2b and Blake2s support an optional key but do * not require one. */ if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); axf->Init(swa->sw_ictx); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_AES_CCM_CBC_MAC: axf->Init(swa->sw_ictx); axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_ccm_cbc_mac; break; } return (0); } static int swcr_setup_gcm(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; struct auth_hash *axf; if (csp->csp_ivlen != AES_GCM_IV_LEN) return (EINVAL); /* First, setup the auth side. */ swa = &ses->swcr_auth; switch (csp->csp_cipher_klen * 8) { case 128: axf = &auth_hash_nist_gmac_aes_128; break; case 192: axf = &auth_hash_nist_gmac_aes_192; break; case 256: axf = &auth_hash_nist_gmac_aes_256; break; default: return (EINVAL); } swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); axf->Init(swa->sw_ictx); if (csp->csp_cipher_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, csp->csp_cipher_klen); /* Second, setup the cipher side. */ return (swcr_setup_cipher(ses, csp)); } static int swcr_setup_ccm(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; struct auth_hash *axf; /* First, setup the auth side. */ swa = &ses->swcr_auth; switch (csp->csp_cipher_klen * 8) { case 128: axf = &auth_hash_ccm_cbc_mac_128; break; case 192: axf = &auth_hash_ccm_cbc_mac_192; break; case 256: axf = &auth_hash_ccm_cbc_mac_256; break; default: return (EINVAL); } swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); axf->Init(swa->sw_ictx); if (csp->csp_cipher_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, csp->csp_cipher_klen); /* Second, setup the cipher side. */ return (swcr_setup_cipher(ses, csp)); } static int swcr_setup_chacha20_poly1305(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; struct auth_hash *axf; - if (csp->csp_ivlen != CHACHA20_POLY1305_IV_LEN) - return (EINVAL); - /* First, setup the auth side. */ swa = &ses->swcr_auth; axf = &auth_hash_chacha20_poly1305; swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; /* The auth state is regenerated for each nonce. */ /* Second, setup the cipher side. */ return (swcr_setup_cipher(ses, csp)); } static bool swcr_auth_supported(const struct crypto_session_params *csp) { struct auth_hash *axf; axf = crypto_auth_hash(csp); if (axf == NULL) return (false); switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: break; case CRYPTO_AES_NIST_GMAC: switch (csp->csp_auth_klen * 8) { case 128: case 192: case 256: break; default: return (false); } if (csp->csp_auth_key == NULL) return (false); if (csp->csp_ivlen != AES_GCM_IV_LEN) return (false); break; case CRYPTO_POLY1305: if (csp->csp_auth_klen != POLY1305_KEY_LEN) return (false); break; case CRYPTO_AES_CCM_CBC_MAC: switch (csp->csp_auth_klen * 8) { case 128: case 192: case 256: break; default: return (false); } if (csp->csp_auth_key == NULL) return (false); break; } return (true); } static bool swcr_cipher_supported(const struct crypto_session_params *csp) { struct enc_xform *txf; txf = crypto_cipher(csp); if (txf == NULL) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC && txf->ivsize != csp->csp_ivlen) return (false); return (true); } #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) static int swcr_probesession(device_t dev, const struct crypto_session_params *csp) { if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: switch (csp->csp_cipher_alg) { case CRYPTO_DEFLATE_COMP: break; default: return (EINVAL); } break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: case CRYPTO_CHACHA20_POLY1305: return (EINVAL); default: if (!swcr_cipher_supported(csp)) return (EINVAL); break; } break; case CSP_MODE_DIGEST: if (!swcr_auth_supported(csp)) return (EINVAL); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: case CRYPTO_CHACHA20_POLY1305: break; default: return (EINVAL); } break; case CSP_MODE_ETA: /* AEAD algorithms cannot be used for EtA. */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: case CRYPTO_CHACHA20_POLY1305: return (EINVAL); } switch (csp->csp_auth_alg) { case CRYPTO_AES_NIST_GMAC: case CRYPTO_AES_CCM_CBC_MAC: return (EINVAL); } if (!swcr_cipher_supported(csp) || !swcr_auth_supported(csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_SOFTWARE); } /* * Generate a new software session. */ static int swcr_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct swcr_session *ses; struct swcr_encdec *swe; struct swcr_auth *swa; struct comp_algo *cxf; int error; ses = crypto_get_driver_session(cses); mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF); error = 0; swe = &ses->swcr_encdec; swa = &ses->swcr_auth; switch (csp->csp_mode) { case CSP_MODE_COMPRESS: switch (csp->csp_cipher_alg) { case CRYPTO_DEFLATE_COMP: cxf = &comp_algo_deflate; break; #ifdef INVARIANTS default: panic("bad compression algo"); #endif } ses->swcr_compdec.sw_cxf = cxf; ses->swcr_process = swcr_compdec; break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_NULL_CBC: ses->swcr_process = swcr_null; break; #ifdef INVARIANTS case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: case CRYPTO_CHACHA20_POLY1305: panic("bad cipher algo"); #endif default: error = swcr_setup_cipher(ses, csp); if (error == 0) ses->swcr_process = swcr_encdec; } break; case CSP_MODE_DIGEST: error = swcr_setup_auth(ses, csp); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: error = swcr_setup_gcm(ses, csp); if (error == 0) ses->swcr_process = swcr_gcm; break; case CRYPTO_AES_CCM_16: error = swcr_setup_ccm(ses, csp); if (error == 0) ses->swcr_process = swcr_ccm; break; case CRYPTO_CHACHA20_POLY1305: error = swcr_setup_chacha20_poly1305(ses, csp); if (error == 0) ses->swcr_process = swcr_chacha20_poly1305; break; #ifdef INVARIANTS default: panic("bad aead algo"); #endif } break; case CSP_MODE_ETA: #ifdef INVARIANTS switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: case CRYPTO_CHACHA20_POLY1305: panic("bad eta cipher algo"); } switch (csp->csp_auth_alg) { case CRYPTO_AES_NIST_GMAC: case CRYPTO_AES_CCM_CBC_MAC: panic("bad eta auth algo"); } #endif error = swcr_setup_auth(ses, csp); if (error) break; if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) { /* Effectively degrade to digest mode. */ ses->swcr_process = swcr_authcompute; break; } error = swcr_setup_cipher(ses, csp); if (error == 0) ses->swcr_process = swcr_eta; break; default: error = EINVAL; } if (error) swcr_freesession(dev, cses); return (error); } static void swcr_freesession(device_t dev, crypto_session_t cses) { struct swcr_session *ses; ses = crypto_get_driver_session(cses); mtx_destroy(&ses->swcr_lock); zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA); zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA); zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA); } /* * Process a software request. */ static int swcr_process(device_t dev, struct cryptop *crp, int hint) { struct swcr_session *ses; ses = crypto_get_driver_session(crp->crp_session); mtx_lock(&ses->swcr_lock); crp->crp_etype = ses->swcr_process(ses, crp); mtx_unlock(&ses->swcr_lock); crypto_done(crp); return (0); } static void swcr_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "cryptosoft", -1) == NULL && BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) panic("cryptosoft: could not attach"); } static int swcr_probe(device_t dev) { device_set_desc(dev, "software crypto"); device_quiet(dev); return (BUS_PROBE_NOWILDCARD); } static int swcr_attach(device_t dev) { swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session), CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); if (swcr_id < 0) { device_printf(dev, "cannot initialize!"); return (ENXIO); } return (0); } static int swcr_detach(device_t dev) { crypto_unregister_all(swcr_id); return 0; } static device_method_t swcr_methods[] = { DEVMETHOD(device_identify, swcr_identify), DEVMETHOD(device_probe, swcr_probe), DEVMETHOD(device_attach, swcr_attach), DEVMETHOD(device_detach, swcr_detach), DEVMETHOD(cryptodev_probesession, swcr_probesession), DEVMETHOD(cryptodev_newsession, swcr_newsession), DEVMETHOD(cryptodev_freesession,swcr_freesession), DEVMETHOD(cryptodev_process, swcr_process), {0, 0}, }; static driver_t swcr_driver = { "cryptosoft", swcr_methods, 0, /* NB: no softc */ }; static devclass_t swcr_devclass; /* * NB: We explicitly reference the crypto module so we * get the necessary ordering when built as a loadable * module. This is required because we bundle the crypto * module code together with the cryptosoft driver (otherwise * normal module dependencies would handle things). */ extern int crypto_modevent(struct module *, int, void *); /* XXX where to attach */ DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); MODULE_VERSION(cryptosoft, 1); MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); diff --git a/sys/opencrypto/xform_chacha20_poly1305.c b/sys/opencrypto/xform_chacha20_poly1305.c index e893287145f2..f593faa9b5ef 100644 --- a/sys/opencrypto/xform_chacha20_poly1305.c +++ b/sys/opencrypto/xform_chacha20_poly1305.c @@ -1,166 +1,185 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2020 Netflix Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include struct chacha20_poly1305_cipher_ctx { const void *key; uint32_t ic; + bool ietf; char nonce[CHACHA20_POLY1305_IV_LEN]; }; static int chacha20_poly1305_setkey(void *vctx, const uint8_t *key, int len) { struct chacha20_poly1305_cipher_ctx *ctx = vctx; if (len != CHACHA20_POLY1305_KEY) return (EINVAL); ctx->key = key; return (0); } static void chacha20_poly1305_reinit(void *vctx, const uint8_t *iv, size_t ivlen) { struct chacha20_poly1305_cipher_ctx *ctx = vctx; KASSERT(ivlen == sizeof(ctx->nonce), ("%s: invalid nonce length", __func__)); /* Block 0 is used for the poly1305 key. */ - memcpy(ctx->nonce, iv, sizeof(ctx->nonce)); + memcpy(ctx->nonce, iv, ivlen); + ctx->ietf = (ivlen == CHACHA20_POLY1305_IV_LEN); ctx->ic = 1; } static void chacha20_poly1305_crypt(void *vctx, const uint8_t *in, uint8_t *out) { struct chacha20_poly1305_cipher_ctx *ctx = vctx; int error; - error = crypto_stream_chacha20_ietf_xor_ic(out, in, - CHACHA20_NATIVE_BLOCK_LEN, ctx->nonce, ctx->ic, ctx->key); + if (ctx->ietf) + error = crypto_stream_chacha20_ietf_xor_ic(out, in, + CHACHA20_NATIVE_BLOCK_LEN, ctx->nonce, ctx->ic, ctx->key); + else + error = crypto_stream_chacha20_xor_ic(out, in, + CHACHA20_NATIVE_BLOCK_LEN, ctx->nonce, ctx->ic, ctx->key); KASSERT(error == 0, ("%s failed: %d", __func__, error)); ctx->ic++; } static void chacha20_poly1305_crypt_last(void *vctx, const uint8_t *in, uint8_t *out, size_t len) { struct chacha20_poly1305_cipher_ctx *ctx = vctx; int error; - error = crypto_stream_chacha20_ietf_xor_ic(out, in, len, ctx->nonce, - ctx->ic, ctx->key); + if (ctx->ietf) + error = crypto_stream_chacha20_ietf_xor_ic(out, in, len, + ctx->nonce, ctx->ic, ctx->key); + else + error = crypto_stream_chacha20_xor_ic(out, in, len, ctx->nonce, + ctx->ic, ctx->key); KASSERT(error == 0, ("%s failed: %d", __func__, error)); } struct enc_xform enc_xform_chacha20_poly1305 = { .type = CRYPTO_CHACHA20_POLY1305, .name = "ChaCha20-Poly1305", .ctxsize = sizeof(struct chacha20_poly1305_cipher_ctx), .blocksize = 1, .native_blocksize = CHACHA20_NATIVE_BLOCK_LEN, .ivsize = CHACHA20_POLY1305_IV_LEN, .minkey = CHACHA20_POLY1305_KEY, .maxkey = CHACHA20_POLY1305_KEY, .encrypt = chacha20_poly1305_crypt, .decrypt = chacha20_poly1305_crypt, .setkey = chacha20_poly1305_setkey, .reinit = chacha20_poly1305_reinit, .encrypt_last = chacha20_poly1305_crypt_last, .decrypt_last = chacha20_poly1305_crypt_last, }; struct chacha20_poly1305_auth_ctx { struct crypto_onetimeauth_poly1305_state state; const void *key; }; CTASSERT(sizeof(union authctx) >= sizeof(struct chacha20_poly1305_auth_ctx)); static void chacha20_poly1305_Init(void *vctx) { } static void chacha20_poly1305_Setkey(void *vctx, const uint8_t *key, u_int klen) { struct chacha20_poly1305_auth_ctx *ctx = vctx; ctx->key = key; } static void chacha20_poly1305_Reinit(void *vctx, const uint8_t *nonce, u_int noncelen) { struct chacha20_poly1305_auth_ctx *ctx = vctx; char block[CHACHA20_NATIVE_BLOCK_LEN]; - crypto_stream_chacha20_ietf(block, sizeof(block), nonce, ctx->key); + switch (noncelen) { + case 8: + crypto_stream_chacha20(block, sizeof(block), nonce, ctx->key); + break; + case CHACHA20_POLY1305_IV_LEN: + crypto_stream_chacha20_ietf(block, sizeof(block), nonce, ctx->key); + break; + default: + __assert_unreachable(); + } crypto_onetimeauth_poly1305_init(&ctx->state, block); explicit_bzero(block, sizeof(block)); } static int chacha20_poly1305_Update(void *vctx, const void *data, u_int len) { struct chacha20_poly1305_auth_ctx *ctx = vctx; crypto_onetimeauth_poly1305_update(&ctx->state, data, len); return (0); } static void chacha20_poly1305_Final(uint8_t *digest, void *vctx) { struct chacha20_poly1305_auth_ctx *ctx = vctx; crypto_onetimeauth_poly1305_final(&ctx->state, digest); } struct auth_hash auth_hash_chacha20_poly1305 = { .type = CRYPTO_POLY1305, .name = "ChaCha20-Poly1305", .keysize = POLY1305_KEY_LEN, .hashsize = POLY1305_HASH_LEN, .ctxsize = sizeof(struct chacha20_poly1305_auth_ctx), .blocksize = crypto_onetimeauth_poly1305_BYTES, .Init = chacha20_poly1305_Init, .Setkey = chacha20_poly1305_Setkey, .Reinit = chacha20_poly1305_Reinit, .Update = chacha20_poly1305_Update, .Final = chacha20_poly1305_Final, }; diff --git a/tools/tools/crypto/cryptocheck.c b/tools/tools/crypto/cryptocheck.c index 63c6ce1f9d28..05b761a0c87a 100644 --- a/tools/tools/crypto/cryptocheck.c +++ b/tools/tools/crypto/cryptocheck.c @@ -1,1866 +1,1866 @@ /*- * Copyright (c) 2017 Chelsio Communications, Inc. * All rights reserved. * Copyright (c) 2021 The FreeBSD Foundation * Written by: John Baldwin * * Portions of this software were developed by Ararat River * Consulting, LLC under sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2004 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ /* * A different tool for checking hardware crypto support. Whereas * cryptotest is focused on simple performance numbers, this tool is * focused on correctness. For each crypto operation, it performs the * operation once in software via OpenSSL and a second time via * OpenCrypto and compares the results. * * cryptocheck [-vz] [-A aad length] [-a algorithm] [-d dev] [-I IV length] * [size ...] * * Options: * -v Verbose. * -z Run all algorithms on a variety of buffer sizes. * * Supported algorithms: * all Run all tests * hash Run all hash tests * mac Run all mac tests * cipher Run all cipher tests * eta Run all encrypt-then-authenticate tests * aead Run all authenticated encryption with associated data * tests * * Hashes: * sha1 SHA-1 * sha224 224-bit SHA-2 * sha256 256-bit SHA-2 * sha384 384-bit SHA-2 * sha512 512-bit SHA-2 * blake2b Blake2-B * blake2s Blake2-S * * MACs: * sha1hmac SHA-1 HMAC * sha224hmac 224-bit SHA-2 HMAC * sha256hmac 256-bit SHA-2 HMAC * sha384hmac 384-bit SHA-2 HMAC * sha512hmac 512-bit SHA-2 HMAC * gmac 128-bit GMAC * gmac192 192-bit GMAC * gmac256 256-bit GMAC * poly1305 * * Ciphers: * aes-cbc 128-bit AES-CBC * aes-cbc192 192-bit AES-CBC * aes-cbc256 256-bit AES-CBC * aes-ctr 128-bit AES-CTR * aes-ctr192 192-bit AES-CTR * aes-ctr256 256-bit AES-CTR * aes-xts 128-bit AES-XTS * aes-xts256 256-bit AES-XTS * chacha20 * * Encrypt then Authenticate: * + * * Authenticated Encryption with Associated Data: * aes-gcm 128-bit AES-GCM * aes-gcm192 192-bit AES-GCM * aes-gcm256 256-bit AES-GCM * aes-ccm 128-bit AES-CCM * aes-ccm192 192-bit AES-CCM * aes-ccm256 256-bit AES-CCM - * chacha20-poly1305 Chacha20 (96 bit nonce) with Poly1305 per RFC 8439 + * chacha20-poly1305 Chacha20 with Poly1305 per RFC 8439 */ #include #include #include #include #include #include #include #include #include #include #include #include #include struct ocf_session { int fd; int ses; int crid; }; static const struct alg { const char *name; int cipher; int mac; enum { T_HASH, T_HMAC, T_GMAC, T_DIGEST, T_CIPHER, T_ETA, T_AEAD } type; int key_len; int tag_len; u_int iv_sizes[8]; const EVP_CIPHER *(*evp_cipher)(void); const EVP_MD *(*evp_md)(void); int pkey; } algs[] = { { .name = "sha1", .mac = CRYPTO_SHA1, .type = T_HASH, .evp_md = EVP_sha1 }, { .name = "sha224", .mac = CRYPTO_SHA2_224, .type = T_HASH, .evp_md = EVP_sha224 }, { .name = "sha256", .mac = CRYPTO_SHA2_256, .type = T_HASH, .evp_md = EVP_sha256 }, { .name = "sha384", .mac = CRYPTO_SHA2_384, .type = T_HASH, .evp_md = EVP_sha384 }, { .name = "sha512", .mac = CRYPTO_SHA2_512, .type = T_HASH, .evp_md = EVP_sha512 }, { .name = "sha1hmac", .mac = CRYPTO_SHA1_HMAC, .type = T_HMAC, .evp_md = EVP_sha1 }, { .name = "sha224hmac", .mac = CRYPTO_SHA2_224_HMAC, .type = T_HMAC, .evp_md = EVP_sha224 }, { .name = "sha256hmac", .mac = CRYPTO_SHA2_256_HMAC, .type = T_HMAC, .evp_md = EVP_sha256 }, { .name = "sha384hmac", .mac = CRYPTO_SHA2_384_HMAC, .type = T_HMAC, .evp_md = EVP_sha384 }, { .name = "sha512hmac", .mac = CRYPTO_SHA2_512_HMAC, .type = T_HMAC, .evp_md = EVP_sha512 }, { .name = "blake2b", .mac = CRYPTO_BLAKE2B, .type = T_HASH, .evp_md = EVP_blake2b512 }, { .name = "blake2s", .mac = CRYPTO_BLAKE2S, .type = T_HASH, .evp_md = EVP_blake2s256 }, { .name = "gmac", .mac = CRYPTO_AES_NIST_GMAC, .type = T_GMAC, .tag_len = AES_GMAC_HASH_LEN, .evp_cipher = EVP_aes_128_gcm }, { .name = "gmac192", .mac = CRYPTO_AES_NIST_GMAC, .type = T_GMAC, .tag_len = AES_GMAC_HASH_LEN, .evp_cipher = EVP_aes_192_gcm }, { .name = "gmac256", .mac = CRYPTO_AES_NIST_GMAC, .type = T_GMAC, .tag_len = AES_GMAC_HASH_LEN, .evp_cipher = EVP_aes_256_gcm }, { .name = "poly1305", .mac = CRYPTO_POLY1305, .type = T_DIGEST, .key_len = POLY1305_KEY_LEN, .pkey = EVP_PKEY_POLY1305 }, { .name = "aes-cbc", .cipher = CRYPTO_AES_CBC, .type = T_CIPHER, .evp_cipher = EVP_aes_128_cbc }, { .name = "aes-cbc192", .cipher = CRYPTO_AES_CBC, .type = T_CIPHER, .evp_cipher = EVP_aes_192_cbc }, { .name = "aes-cbc256", .cipher = CRYPTO_AES_CBC, .type = T_CIPHER, .evp_cipher = EVP_aes_256_cbc }, { .name = "aes-ctr", .cipher = CRYPTO_AES_ICM, .type = T_CIPHER, .evp_cipher = EVP_aes_128_ctr }, { .name = "aes-ctr192", .cipher = CRYPTO_AES_ICM, .type = T_CIPHER, .evp_cipher = EVP_aes_192_ctr }, { .name = "aes-ctr256", .cipher = CRYPTO_AES_ICM, .type = T_CIPHER, .evp_cipher = EVP_aes_256_ctr }, { .name = "aes-xts", .cipher = CRYPTO_AES_XTS, .type = T_CIPHER, .evp_cipher = EVP_aes_128_xts }, { .name = "aes-xts256", .cipher = CRYPTO_AES_XTS, .type = T_CIPHER, .evp_cipher = EVP_aes_256_xts }, { .name = "chacha20", .cipher = CRYPTO_CHACHA20, .type = T_CIPHER, .evp_cipher = EVP_chacha20 }, { .name = "aes-gcm", .cipher = CRYPTO_AES_NIST_GCM_16, .type = T_AEAD, .tag_len = AES_GMAC_HASH_LEN, .iv_sizes = { AES_GCM_IV_LEN }, .evp_cipher = EVP_aes_128_gcm }, { .name = "aes-gcm192", .cipher = CRYPTO_AES_NIST_GCM_16, .type = T_AEAD, .tag_len = AES_GMAC_HASH_LEN, .iv_sizes = { AES_GCM_IV_LEN }, .evp_cipher = EVP_aes_192_gcm }, { .name = "aes-gcm256", .cipher = CRYPTO_AES_NIST_GCM_16, .type = T_AEAD, .tag_len = AES_GMAC_HASH_LEN, .iv_sizes = { AES_GCM_IV_LEN }, .evp_cipher = EVP_aes_256_gcm }, { .name = "aes-ccm", .cipher = CRYPTO_AES_CCM_16, .type = T_AEAD, .tag_len = AES_CBC_MAC_HASH_LEN, .iv_sizes = { 12, 7, 8, 9, 10, 11, 13 }, .evp_cipher = EVP_aes_128_ccm }, { .name = "aes-ccm192", .cipher = CRYPTO_AES_CCM_16, .type = T_AEAD, .tag_len = AES_CBC_MAC_HASH_LEN, .iv_sizes = { 12, 7, 8, 9, 10, 11, 13 }, .evp_cipher = EVP_aes_192_ccm }, { .name = "aes-ccm256", .cipher = CRYPTO_AES_CCM_16, .type = T_AEAD, .tag_len = AES_CBC_MAC_HASH_LEN, .iv_sizes = { 12, 7, 8, 9, 10, 11, 13 }, .evp_cipher = EVP_aes_256_ccm }, { .name = "chacha20-poly1305", .cipher = CRYPTO_CHACHA20_POLY1305, .type = T_AEAD, .tag_len = POLY1305_HASH_LEN, - .iv_sizes = { CHACHA20_POLY1305_IV_LEN }, + .iv_sizes = { CHACHA20_POLY1305_IV_LEN, 8 }, .evp_cipher = EVP_chacha20_poly1305 }, }; static bool testall, verbose; static int requested_crid; static size_t aad_sizes[48], sizes[EALG_MAX_BLOCK_LEN * 2]; static u_int naad_sizes, nsizes; static u_int iv_size; static void usage(void) { fprintf(stderr, "usage: cryptocheck [-vz] [-A aad size] [-a algorithm]\n" " [-d dev] [-I IV size] [size ...]\n"); exit(1); } static const struct alg * find_alg(const char *name) { u_int i; for (i = 0; i < nitems(algs); i++) if (strcasecmp(algs[i].name, name) == 0) return (&algs[i]); return (NULL); } static struct alg * build_eta(const struct alg *cipher, const struct alg *mac) { struct alg *eta; char *name; assert(cipher->type == T_CIPHER); assert(mac->type == T_HMAC); eta = calloc(1, sizeof(*eta)); asprintf(&name, "%s+%s", cipher->name, mac->name); eta->name = name; eta->cipher = cipher->cipher; eta->mac = mac->mac; eta->type = T_ETA; eta->evp_cipher = cipher->evp_cipher; eta->evp_md = mac->evp_md; return (eta); } static void free_eta(struct alg *eta) { free(__DECONST(char *, eta->name)); free(eta); } static struct alg * build_eta_name(const char *name) { const struct alg *cipher, *mac; const char *mac_name; char *cp, *cipher_name; cp = strchr(name, '+'); cipher_name = strndup(name, cp - name); mac_name = cp + 1; cipher = find_alg(cipher_name); free(cipher_name); if (cipher == NULL || cipher->type != T_CIPHER) errx(1, "Invalid cipher %s", cipher_name); mac = find_alg(mac_name); if (mac == NULL || mac->type != T_HMAC) errx(1, "Invalid hmac %s", mac_name); return (build_eta(cipher, mac)); } static int devcrypto(void) { static int fd = -1; if (fd < 0) { fd = open("/dev/crypto", O_RDWR | O_CLOEXEC, 0); if (fd < 0) err(1, "/dev/crypto"); } return (fd); } /* * Called on exit to change kern.cryptodevallowsoft back to 0 */ #define CRYPT_SOFT_ALLOW "kern.cryptodevallowsoft" static void reset_user_soft(void) { int off = 0; sysctlbyname(CRYPT_SOFT_ALLOW, NULL, NULL, &off, sizeof(off)); } static void enable_user_soft(void) { int curstate; int on = 1; size_t cursize = sizeof(curstate); if (sysctlbyname(CRYPT_SOFT_ALLOW, &curstate, &cursize, &on, sizeof(on)) == 0) { if (curstate == 0) atexit(reset_user_soft); } } static int crlookup(const char *devname) { struct crypt_find_op find; if (strncmp(devname, "soft", 4) == 0) { enable_user_soft(); return CRYPTO_FLAG_SOFTWARE; } find.crid = -1; strlcpy(find.name, devname, sizeof(find.name)); if (ioctl(devcrypto(), CIOCFINDDEV, &find) == -1) err(1, "ioctl(CIOCFINDDEV)"); return (find.crid); } static const char * crfind(int crid) { static struct crypt_find_op find; if (crid == CRYPTO_FLAG_SOFTWARE) return ("soft"); else if (crid == CRYPTO_FLAG_HARDWARE) return ("unknown"); bzero(&find, sizeof(find)); find.crid = crid; if (ioctl(devcrypto(), CIOCFINDDEV, &find) == -1) err(1, "ioctl(CIOCFINDDEV): crid %d", crid); return (find.name); } static char rdigit(void) { const char a[] = { 0x10,0x54,0x11,0x48,0x45,0x12,0x4f,0x13,0x49,0x53,0x14,0x41, 0x15,0x16,0x4e,0x55,0x54,0x17,0x18,0x4a,0x4f,0x42,0x19,0x01 }; return 0x20+a[random()%nitems(a)]; } static char * alloc_buffer(size_t len) { char *buf; size_t i; buf = malloc(len); for (i = 0; i < len; i++) buf[i] = rdigit(); return (buf); } static char * generate_iv(size_t len, const struct alg *alg) { char *iv; iv = alloc_buffer(len); switch (alg->cipher) { case CRYPTO_AES_ICM: /* Clear the low 32 bits of the IV to hold the counter. */ iv[len - 4] = 0; iv[len - 3] = 0; iv[len - 2] = 0; iv[len - 1] = 0; break; case CRYPTO_AES_XTS: /* * Clear the low 64-bits to only store a 64-bit block * number. */ iv[len - 8] = 0; iv[len - 7] = 0; iv[len - 6] = 0; iv[len - 5] = 0; iv[len - 4] = 0; iv[len - 3] = 0; iv[len - 2] = 0; iv[len - 1] = 0; break; } return (iv); } static void ocf_init_sop(struct session2_op *sop) { memset(sop, 0, sizeof(*sop)); sop->crid = requested_crid; } static bool ocf_init_session(struct session2_op *sop, const char *type, const char *name, struct ocf_session *ses) { int fd; fd = devcrypto(); if (ioctl(fd, CIOCGSESSION2, sop) < 0) { warn("cryptodev %s %s not supported for device %s", type, name, crfind(sop->crid)); ses->fd = -1; return (false); } ses->fd = fd; ses->ses = sop->ses; ses->crid = sop->crid; return (true); } static void ocf_destroy_session(struct ocf_session *ses) { if (ses->fd == -1) return; if (ioctl(ses->fd, CIOCFSESSION, &ses->ses) < 0) warn("ioctl(CIOCFSESSION)"); } static void ocf_init_cop(const struct ocf_session *ses, struct crypt_op *cop) { memset(cop, 0, sizeof(*cop)); cop->ses = ses->ses; } static void ocf_init_caead(const struct ocf_session *ses, struct crypt_aead *caead) { memset(caead, 0, sizeof(*caead)); caead->ses = ses->ses; } static bool ocf_hash(const struct alg *alg, const char *buffer, size_t size, char *digest, int *cridp) { struct ocf_session ses; struct session2_op sop; struct crypt_op cop; ocf_init_sop(&sop); sop.mac = alg->mac; if (!ocf_init_session(&sop, "HASH", alg->name, &ses)) return (false); ocf_init_cop(&ses, &cop); cop.op = 0; cop.len = size; cop.src = buffer; cop.mac = digest; if (ioctl(ses.fd, CIOCCRYPT, &cop) < 0) { warn("cryptodev %s (%zu) HASH failed for device %s", alg->name, size, crfind(ses.crid)); ocf_destroy_session(&ses); return (false); } *cridp = ses.crid; ocf_destroy_session(&ses); return (true); } static void openssl_hash(const struct alg *alg, const EVP_MD *md, const void *buffer, size_t size, void *digest_out, unsigned *digest_sz_out) { EVP_MD_CTX *mdctx; const char *errs; int rc; errs = ""; mdctx = EVP_MD_CTX_create(); if (mdctx == NULL) goto err_out; rc = EVP_DigestInit_ex(mdctx, md, NULL); if (rc != 1) goto err_out; rc = EVP_DigestUpdate(mdctx, buffer, size); if (rc != 1) goto err_out; rc = EVP_DigestFinal_ex(mdctx, digest_out, digest_sz_out); if (rc != 1) goto err_out; EVP_MD_CTX_destroy(mdctx); return; err_out: errx(1, "OpenSSL %s HASH failed%s: %s", alg->name, errs, ERR_error_string(ERR_get_error(), NULL)); } static void run_hash_test(const struct alg *alg, size_t size) { const EVP_MD *md; char *buffer; u_int digest_len; int crid; char control_digest[EVP_MAX_MD_SIZE], test_digest[EVP_MAX_MD_SIZE]; memset(control_digest, 0x3c, sizeof(control_digest)); memset(test_digest, 0x3c, sizeof(test_digest)); md = alg->evp_md(); assert((size_t)EVP_MD_size(md) <= sizeof(control_digest)); buffer = alloc_buffer(size); /* OpenSSL HASH. */ digest_len = sizeof(control_digest); openssl_hash(alg, md, buffer, size, control_digest, &digest_len); /* cryptodev HASH. */ if (!ocf_hash(alg, buffer, size, test_digest, &crid)) goto out; if (memcmp(control_digest, test_digest, sizeof(control_digest)) != 0) { if (memcmp(control_digest, test_digest, EVP_MD_size(md)) == 0) printf("%s (%zu) mismatch in trailer:\n", alg->name, size); else printf("%s (%zu) mismatch:\n", alg->name, size); printf("control:\n"); hexdump(control_digest, sizeof(control_digest), NULL, 0); printf("test (cryptodev device %s):\n", crfind(crid)); hexdump(test_digest, sizeof(test_digest), NULL, 0); goto out; } if (verbose) printf("%s (%zu) matched (cryptodev device %s)\n", alg->name, size, crfind(crid)); out: free(buffer); } static bool ocf_hmac(const struct alg *alg, const char *buffer, size_t size, const char *key, size_t key_len, char *digest, int *cridp) { struct ocf_session ses; struct session2_op sop; struct crypt_op cop; ocf_init_sop(&sop); sop.mackeylen = key_len; sop.mackey = key; sop.mac = alg->mac; if (!ocf_init_session(&sop, "HMAC", alg->name, &ses)) return (false); ocf_init_cop(&ses, &cop); cop.op = 0; cop.len = size; cop.src = buffer; cop.mac = digest; if (ioctl(ses.fd, CIOCCRYPT, &cop) < 0) { warn("cryptodev %s (%zu) HMAC failed for device %s", alg->name, size, crfind(ses.crid)); ocf_destroy_session(&ses); return (false); } *cridp = ses.crid; ocf_destroy_session(&ses); return (true); } static void run_hmac_test(const struct alg *alg, size_t size) { const EVP_MD *md; char *key, *buffer; u_int key_len, digest_len; int crid; char control_digest[EVP_MAX_MD_SIZE], test_digest[EVP_MAX_MD_SIZE]; memset(control_digest, 0x3c, sizeof(control_digest)); memset(test_digest, 0x3c, sizeof(test_digest)); md = alg->evp_md(); key_len = EVP_MD_size(md); assert((size_t)EVP_MD_size(md) <= sizeof(control_digest)); key = alloc_buffer(key_len); buffer = alloc_buffer(size); /* OpenSSL HMAC. */ digest_len = sizeof(control_digest); if (HMAC(md, key, key_len, (u_char *)buffer, size, (u_char *)control_digest, &digest_len) == NULL) errx(1, "OpenSSL %s (%zu) HMAC failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); /* cryptodev HMAC. */ if (!ocf_hmac(alg, buffer, size, key, key_len, test_digest, &crid)) goto out; if (memcmp(control_digest, test_digest, sizeof(control_digest)) != 0) { if (memcmp(control_digest, test_digest, EVP_MD_size(md)) == 0) printf("%s (%zu) mismatch in trailer:\n", alg->name, size); else printf("%s (%zu) mismatch:\n", alg->name, size); printf("control:\n"); hexdump(control_digest, sizeof(control_digest), NULL, 0); printf("test (cryptodev device %s):\n", crfind(crid)); hexdump(test_digest, sizeof(test_digest), NULL, 0); goto out; } if (verbose) printf("%s (%zu) matched (cryptodev device %s)\n", alg->name, size, crfind(crid)); out: free(buffer); free(key); } static void openssl_cipher(const struct alg *alg, const EVP_CIPHER *cipher, const char *key, const char *iv, const char *input, char *output, size_t size, int enc) { EVP_CIPHER_CTX *ctx; int outl, total; ctx = EVP_CIPHER_CTX_new(); if (ctx == NULL) errx(1, "OpenSSL %s (%zu) ctx new failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_CipherInit_ex(ctx, cipher, NULL, (const u_char *)key, (const u_char *)iv, enc) != 1) errx(1, "OpenSSL %s (%zu) ctx init failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); EVP_CIPHER_CTX_set_padding(ctx, 0); if (EVP_CipherUpdate(ctx, (u_char *)output, &outl, (const u_char *)input, size) != 1) errx(1, "OpenSSL %s (%zu) cipher update failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); total = outl; if (EVP_CipherFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) errx(1, "OpenSSL %s (%zu) cipher final failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); total += outl; if ((size_t)total != size) errx(1, "OpenSSL %s (%zu) cipher size mismatch: %d", alg->name, size, total); EVP_CIPHER_CTX_free(ctx); } static bool ocf_init_cipher_session(const struct alg *alg, const char *key, size_t key_len, struct ocf_session *ses) { struct session2_op sop; ocf_init_sop(&sop); sop.keylen = key_len; sop.key = key; sop.cipher = alg->cipher; return (ocf_init_session(&sop, "cipher", alg->name, ses)); } static bool ocf_cipher(const struct ocf_session *ses, const struct alg *alg, const char *iv, const char *input, char *output, size_t size, int op) { struct crypt_op cop; ocf_init_cop(ses, &cop); cop.op = op; cop.len = size; cop.src = input; cop.dst = output; cop.iv = iv; if (ioctl(ses->fd, CIOCCRYPT, &cop) < 0) { warn("cryptodev %s (%zu) cipher failed for device %s", alg->name, size, crfind(ses->crid)); return (false); } return (true); } static void run_cipher_test(const struct alg *alg, size_t size) { struct ocf_session ses; const EVP_CIPHER *cipher; char *buffer, *cleartext, *ciphertext; char *iv, *key; u_int iv_len, key_len; cipher = alg->evp_cipher(); if (size % EVP_CIPHER_block_size(cipher) != 0) { if (verbose) printf( "%s (%zu): invalid buffer size (block size %d)\n", alg->name, size, EVP_CIPHER_block_size(cipher)); return; } /* * XTS requires at least one full block so that any partial * block at the end has cipher text to steal. Hardcoding the * AES block size isn't ideal, but OpenSSL doesn't have a * notion of a "native" block size. */ if (EVP_CIPHER_mode(cipher) == EVP_CIPH_XTS_MODE && size < AES_BLOCK_LEN) { if (verbose) printf("%s (%zu): invalid buffer size\n", alg->name, size); return; } key_len = EVP_CIPHER_key_length(cipher); iv_len = EVP_CIPHER_iv_length(cipher); key = alloc_buffer(key_len); iv = generate_iv(iv_len, alg); cleartext = alloc_buffer(size); buffer = malloc(size); ciphertext = malloc(size); /* OpenSSL cipher. */ openssl_cipher(alg, cipher, key, iv, cleartext, ciphertext, size, 1); if (size > 0 && memcmp(cleartext, ciphertext, size) == 0) warnx("OpenSSL %s (%zu): cipher text unchanged", alg->name, size); openssl_cipher(alg, cipher, key, iv, ciphertext, buffer, size, 0); if (memcmp(cleartext, buffer, size) != 0) { printf("OpenSSL %s (%zu): cipher mismatch:", alg->name, size); printf("original:\n"); hexdump(cleartext, size, NULL, 0); printf("decrypted:\n"); hexdump(buffer, size, NULL, 0); exit(1); } if (!ocf_init_cipher_session(alg, key, key_len, &ses)) goto out; /* OCF encrypt. */ if (!ocf_cipher(&ses, alg, iv, cleartext, buffer, size, COP_ENCRYPT)) goto out; if (memcmp(ciphertext, buffer, size) != 0) { printf("%s (%zu) encryption mismatch:\n", alg->name, size); printf("control:\n"); hexdump(ciphertext, size, NULL, 0); printf("test (cryptodev device %s):\n", crfind(ses.crid)); hexdump(buffer, size, NULL, 0); goto out; } /* OCF decrypt. */ if (!ocf_cipher(&ses, alg, iv, ciphertext, buffer, size, COP_DECRYPT)) goto out; if (memcmp(cleartext, buffer, size) != 0) { printf("%s (%zu) decryption mismatch:\n", alg->name, size); printf("control:\n"); hexdump(cleartext, size, NULL, 0); printf("test (cryptodev device %s):\n", crfind(ses.crid)); hexdump(buffer, size, NULL, 0); goto out; } if (verbose) printf("%s (%zu) matched (cryptodev device %s)\n", alg->name, size, crfind(ses.crid)); out: ocf_destroy_session(&ses); free(ciphertext); free(buffer); free(cleartext); free(iv); free(key); } static bool ocf_init_eta_session(const struct alg *alg, const char *cipher_key, size_t cipher_key_len, const char *auth_key, size_t auth_key_len, struct ocf_session *ses) { struct session2_op sop; ocf_init_sop(&sop); sop.keylen = cipher_key_len; sop.key = cipher_key; sop.cipher = alg->cipher; sop.mackeylen = auth_key_len; sop.mackey = auth_key; sop.mac = alg->mac; return (ocf_init_session(&sop, "ETA", alg->name, ses)); } static int ocf_eta(const struct ocf_session *ses, const char *iv, size_t iv_len, const char *aad, size_t aad_len, const char *input, char *output, size_t size, char *digest, int op) { int ret; if (aad_len != 0) { struct crypt_aead caead; ocf_init_caead(ses, &caead); caead.op = op; caead.len = size; caead.aadlen = aad_len; caead.ivlen = iv_len; caead.src = input; caead.dst = output; caead.aad = aad; caead.tag = digest; caead.iv = iv; ret = ioctl(ses->fd, CIOCCRYPTAEAD, &caead); } else { struct crypt_op cop; ocf_init_cop(ses, &cop); cop.op = op; cop.len = size; cop.src = input; cop.dst = output; cop.mac = digest; cop.iv = iv; ret = ioctl(ses->fd, CIOCCRYPT, &cop); } if (ret < 0) return (errno); return (0); } static void run_eta_test(const struct alg *alg, size_t aad_len, size_t size) { struct ocf_session ses; const EVP_CIPHER *cipher; const EVP_MD *md; char *buffer, *cleartext, *ciphertext; char *iv, *auth_key, *cipher_key; u_int iv_len, auth_key_len, cipher_key_len, digest_len; int error; char control_digest[EVP_MAX_MD_SIZE], test_digest[EVP_MAX_MD_SIZE]; cipher = alg->evp_cipher(); if (size % EVP_CIPHER_block_size(cipher) != 0) { if (verbose) printf( "%s (%zu, %zu): invalid buffer size (block size %d)\n", alg->name, aad_len, size, EVP_CIPHER_block_size(cipher)); return; } /* See comment in run_cipher_test. */ if (EVP_CIPHER_mode(cipher) == EVP_CIPH_XTS_MODE && size < AES_BLOCK_LEN) { if (verbose) printf("%s (%zu): invalid buffer size\n", alg->name, size); return; } memset(control_digest, 0x3c, sizeof(control_digest)); memset(test_digest, 0x3c, sizeof(test_digest)); md = alg->evp_md(); cipher_key_len = EVP_CIPHER_key_length(cipher); iv_len = EVP_CIPHER_iv_length(cipher); auth_key_len = EVP_MD_size(md); cipher_key = alloc_buffer(cipher_key_len); iv = generate_iv(iv_len, alg); auth_key = alloc_buffer(auth_key_len); cleartext = alloc_buffer(aad_len + size); buffer = malloc(aad_len + size); ciphertext = malloc(aad_len + size); /* OpenSSL encrypt + HMAC. */ if (aad_len != 0) memcpy(ciphertext, cleartext, aad_len); openssl_cipher(alg, cipher, cipher_key, iv, cleartext + aad_len, ciphertext + aad_len, size, 1); if (size > 0 && memcmp(cleartext + aad_len, ciphertext + aad_len, size) == 0) warnx("OpenSSL %s (%zu, %zu): cipher text unchanged", alg->name, aad_len, size); digest_len = sizeof(control_digest); if (HMAC(md, auth_key, auth_key_len, (u_char *)ciphertext, aad_len + size, (u_char *)control_digest, &digest_len) == NULL) errx(1, "OpenSSL %s (%zu, %zu) HMAC failed: %s", alg->name, aad_len, size, ERR_error_string(ERR_get_error(), NULL)); if (!ocf_init_eta_session(alg, cipher_key, cipher_key_len, auth_key, auth_key_len, &ses)) goto out; /* OCF encrypt + HMAC. */ error = ocf_eta(&ses, iv, iv_len, aad_len != 0 ? cleartext : NULL, aad_len, cleartext + aad_len, buffer + aad_len, size, test_digest, COP_ENCRYPT); if (error != 0) { warnc(error, "cryptodev %s (%zu, %zu) ETA failed for device %s", alg->name, aad_len, size, crfind(ses.crid)); goto out; } if (memcmp(ciphertext + aad_len, buffer + aad_len, size) != 0) { printf("%s (%zu, %zu) encryption mismatch:\n", alg->name, aad_len, size); printf("control:\n"); hexdump(ciphertext + aad_len, size, NULL, 0); printf("test (cryptodev device %s):\n", crfind(ses.crid)); hexdump(buffer + aad_len, size, NULL, 0); goto out; } if (memcmp(control_digest, test_digest, sizeof(control_digest)) != 0) { if (memcmp(control_digest, test_digest, EVP_MD_size(md)) == 0) printf("%s (%zu, %zu) enc hash mismatch in trailer:\n", alg->name, aad_len, size); else printf("%s (%zu, %zu) enc hash mismatch:\n", alg->name, aad_len, size); printf("control:\n"); hexdump(control_digest, sizeof(control_digest), NULL, 0); printf("test (cryptodev device %s):\n", crfind(ses.crid)); hexdump(test_digest, sizeof(test_digest), NULL, 0); goto out; } /* OCF HMAC + decrypt. */ error = ocf_eta(&ses, iv, iv_len, aad_len != 0 ? ciphertext : NULL, aad_len, ciphertext + aad_len, buffer + aad_len, size, test_digest, COP_DECRYPT); if (error != 0) { warnc(error, "cryptodev %s (%zu, %zu) ETA failed for device %s", alg->name, aad_len, size, crfind(ses.crid)); goto out; } if (memcmp(cleartext + aad_len, buffer + aad_len, size) != 0) { printf("%s (%zu, %zu) decryption mismatch:\n", alg->name, aad_len, size); printf("control:\n"); hexdump(cleartext, size, NULL, 0); printf("test (cryptodev device %s):\n", crfind(ses.crid)); hexdump(buffer, size, NULL, 0); goto out; } /* Verify OCF HMAC + decrypt fails with busted MAC. */ test_digest[0] ^= 0x1; error = ocf_eta(&ses, iv, iv_len, aad_len != 0 ? ciphertext : NULL, aad_len, ciphertext + aad_len, buffer + aad_len, size, test_digest, COP_DECRYPT); if (error != EBADMSG) { if (error != 0) warnc(error, "cryptodev %s (%zu, %zu) corrupt tag failed for device %s", alg->name, aad_len, size, crfind(ses.crid)); else warnx( "cryptodev %s (%zu, %zu) corrupt tag didn't fail for device %s", alg->name, aad_len, size, crfind(ses.crid)); goto out; } if (verbose) printf("%s (%zu, %zu) matched (cryptodev device %s)\n", alg->name, aad_len, size, crfind(ses.crid)); out: ocf_destroy_session(&ses); free(ciphertext); free(buffer); free(cleartext); free(auth_key); free(iv); free(cipher_key); } static void openssl_gmac(const struct alg *alg, const EVP_CIPHER *cipher, const char *key, const char *iv, const char *input, size_t size, char *tag) { EVP_CIPHER_CTX *ctx; int outl; ctx = EVP_CIPHER_CTX_new(); if (ctx == NULL) errx(1, "OpenSSL %s (%zu) ctx new failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_EncryptInit_ex(ctx, cipher, NULL, (const u_char *)key, (const u_char *)iv) != 1) errx(1, "OpenSSL %s (%zu) ctx init failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); EVP_CIPHER_CTX_set_padding(ctx, 0); if (EVP_EncryptUpdate(ctx, NULL, &outl, (const u_char *)input, size) != 1) errx(1, "OpenSSL %s (%zu) update failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_EncryptFinal_ex(ctx, NULL, &outl) != 1) errx(1, "OpenSSL %s (%zu) final failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, alg->tag_len, tag) != 1) errx(1, "OpenSSL %s (%zu) get tag failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); EVP_CIPHER_CTX_free(ctx); } static bool ocf_mac(const struct alg *alg, const char *input, size_t size, const char *key, size_t key_len, const char *iv, char *tag, int *cridp) { struct ocf_session ses; struct session2_op sop; struct crypt_op cop; ocf_init_sop(&sop); sop.mackeylen = key_len; sop.mackey = key; sop.mac = alg->mac; if (!ocf_init_session(&sop, "MAC", alg->name, &ses)) return (false); ocf_init_cop(&ses, &cop); cop.op = 0; cop.len = size; cop.src = input; cop.mac = tag; cop.iv = iv; if (ioctl(ses.fd, CIOCCRYPT, &cop) < 0) { warn("cryptodev %s (%zu) failed for device %s", alg->name, size, crfind(ses.crid)); ocf_destroy_session(&ses); return (false); } *cridp = ses.crid; ocf_destroy_session(&ses); return (true); } static void run_gmac_test(const struct alg *alg, size_t size) { const EVP_CIPHER *cipher; char *iv, *key, *buffer; u_int iv_len, key_len; int crid; char control_tag[AES_GMAC_HASH_LEN], test_tag[AES_GMAC_HASH_LEN]; cipher = alg->evp_cipher(); memset(control_tag, 0x3c, sizeof(control_tag)); memset(test_tag, 0x3c, sizeof(test_tag)); key_len = EVP_CIPHER_key_length(cipher); iv_len = EVP_CIPHER_iv_length(cipher); key = alloc_buffer(key_len); iv = generate_iv(iv_len, alg); buffer = alloc_buffer(size); /* OpenSSL GMAC. */ openssl_gmac(alg, cipher, key, iv, buffer, size, control_tag); /* OCF GMAC. */ if (!ocf_mac(alg, buffer, size, key, key_len, iv, test_tag, &crid)) goto out; if (memcmp(control_tag, test_tag, sizeof(control_tag)) != 0) { printf("%s (%zu) mismatch:\n", alg->name, size); printf("control:\n"); hexdump(control_tag, sizeof(control_tag), NULL, 0); printf("test (cryptodev device %s):\n", crfind(crid)); hexdump(test_tag, sizeof(test_tag), NULL, 0); goto out; } if (verbose) printf("%s (%zu) matched (cryptodev device %s)\n", alg->name, size, crfind(crid)); out: free(buffer); free(iv); free(key); } static void openssl_digest(const struct alg *alg, const char *key, u_int key_len, const char *input, size_t size, char *tag, u_int tag_len) { EVP_MD_CTX *mdctx; EVP_PKEY *pkey; size_t len; pkey = EVP_PKEY_new_raw_private_key(alg->pkey, NULL, key, key_len); if (pkey == NULL) errx(1, "OpenSSL %s (%zu) pkey new failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); mdctx = EVP_MD_CTX_new(); if (mdctx == NULL) errx(1, "OpenSSL %s (%zu) ctx new failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_DigestSignInit(mdctx, NULL, NULL, NULL, pkey) != 1) errx(1, "OpenSSL %s (%zu) digest sign init failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_DigestSignUpdate(mdctx, input, size) != 1) errx(1, "OpenSSL %s (%zu) digest update failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); len = tag_len; if (EVP_DigestSignFinal(mdctx, tag, &len) != 1) errx(1, "OpenSSL %s (%zu) digest final failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); EVP_MD_CTX_free(mdctx); EVP_PKEY_free(pkey); } static void run_digest_test(const struct alg *alg, size_t size) { char *key, *buffer; u_int key_len; int crid; char control_tag[EVP_MAX_MD_SIZE], test_tag[EVP_MAX_MD_SIZE]; memset(control_tag, 0x3c, sizeof(control_tag)); memset(test_tag, 0x3c, sizeof(test_tag)); key_len = alg->key_len; key = alloc_buffer(key_len); buffer = alloc_buffer(size); /* OpenSSL Poly1305. */ openssl_digest(alg, key, key_len, buffer, size, control_tag, sizeof(control_tag)); /* OCF Poly1305. */ if (!ocf_mac(alg, buffer, size, key, key_len, NULL, test_tag, &crid)) goto out; if (memcmp(control_tag, test_tag, sizeof(control_tag)) != 0) { printf("%s (%zu) mismatch:\n", alg->name, size); printf("control:\n"); hexdump(control_tag, sizeof(control_tag), NULL, 0); printf("test (cryptodev device %s):\n", crfind(crid)); hexdump(test_tag, sizeof(test_tag), NULL, 0); goto out; } if (verbose) printf("%s (%zu) matched (cryptodev device %s)\n", alg->name, size, crfind(crid)); out: free(buffer); free(key); } static void openssl_aead_encrypt(const struct alg *alg, const EVP_CIPHER *cipher, const char *key, const char *iv, size_t iv_len, const char *aad, size_t aad_len, const char *input, char *output, size_t size, char *tag) { EVP_CIPHER_CTX *ctx; int outl, total; ctx = EVP_CIPHER_CTX_new(); if (ctx == NULL) errx(1, "OpenSSL %s (%zu) ctx new failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_EncryptInit_ex(ctx, cipher, NULL, NULL, NULL) != 1) errx(1, "OpenSSL %s (%zu) ctx init failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_IVLEN, iv_len, NULL) != 1) errx(1, "OpenSSL %s (%zu) setting iv length failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_EncryptInit_ex(ctx, NULL, NULL, (const u_char *)key, (const u_char *)iv) != 1) errx(1, "OpenSSL %s (%zu) ctx init failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); EVP_CIPHER_CTX_set_padding(ctx, 0); if (aad != NULL) { if (EVP_EncryptUpdate(ctx, NULL, &outl, (const u_char *)aad, aad_len) != 1) errx(1, "OpenSSL %s (%zu) aad update failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); } if (EVP_EncryptUpdate(ctx, (u_char *)output, &outl, (const u_char *)input, size) != 1) errx(1, "OpenSSL %s (%zu) encrypt update failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); total = outl; if (EVP_EncryptFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) errx(1, "OpenSSL %s (%zu) encrypt final failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); total += outl; if ((size_t)total != size) errx(1, "OpenSSL %s (%zu) encrypt size mismatch: %d", alg->name, size, total); if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, alg->tag_len, tag) != 1) errx(1, "OpenSSL %s (%zu) get tag failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); EVP_CIPHER_CTX_free(ctx); } #ifdef notused static bool openssl_aead_decrypt(const struct alg *alg, const EVP_CIPHER *cipher, const char *key, const char *iv, const char *aad, size_t aad_len, const char *input, char *output, size_t size, char *tag) { EVP_CIPHER_CTX *ctx; int outl, total; bool valid; ctx = EVP_CIPHER_CTX_new(); if (ctx == NULL) errx(1, "OpenSSL %s (%zu) ctx new failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_DecryptInit_ex(ctx, cipher, NULL, (const u_char *)key, (const u_char *)iv) != 1) errx(1, "OpenSSL %s (%zu) ctx init failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); EVP_CIPHER_CTX_set_padding(ctx, 0); if (aad != NULL) { if (EVP_DecryptUpdate(ctx, NULL, &outl, (const u_char *)aad, aad_len) != 1) errx(1, "OpenSSL %s (%zu) aad update failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); } if (EVP_DecryptUpdate(ctx, (u_char *)output, &outl, (const u_char *)input, size) != 1) errx(1, "OpenSSL %s (%zu) decrypt update failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); total = outl; if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, alg->tag_len, tag) != 1) errx(1, "OpenSSL %s (%zu) get tag failed: %s", alg->name, size, ERR_error_string(ERR_get_error(), NULL)); valid = (EVP_DecryptFinal_ex(ctx, (u_char *)output + outl, &outl) != 1); total += outl; if (total != size) errx(1, "OpenSSL %s (%zu) decrypt size mismatch: %d", alg->name, size, total); EVP_CIPHER_CTX_free(ctx); return (valid); } #endif static void openssl_ccm_encrypt(const struct alg *alg, const EVP_CIPHER *cipher, const char *key, const char *iv, size_t iv_len, const char *aad, size_t aad_len, const char *input, char *output, size_t size, char *tag) { EVP_CIPHER_CTX *ctx; int outl, total; ctx = EVP_CIPHER_CTX_new(); if (ctx == NULL) errx(1, "OpenSSL %s/%zu (%zu, %zu) ctx new failed: %s", alg->name, iv_len, aad_len, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_EncryptInit_ex(ctx, cipher, NULL, NULL, NULL) != 1) errx(1, "OpenSSL %s/%zu (%zu, %zu) ctx init failed: %s", alg->name, iv_len, aad_len, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_IVLEN, iv_len, NULL) != 1) errx(1, "OpenSSL %s/%zu (%zu, %zu) setting iv length failed: %s", alg->name, iv_len, aad_len, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, AES_CBC_MAC_HASH_LEN, NULL) != 1) errx(1, "OpenSSL %s/%zu (%zu, %zu) setting tag length failed: %s", alg->name, iv_len, aad_len, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_EncryptInit_ex(ctx, NULL, NULL, (const u_char *)key, (const u_char *)iv) != 1) errx(1, "OpenSSL %s/%zu (%zu, %zu) ctx init failed: %s", alg->name, iv_len, aad_len, size, ERR_error_string(ERR_get_error(), NULL)); if (EVP_EncryptUpdate(ctx, NULL, &outl, NULL, size) != 1) errx(1, "OpenSSL %s/%zu (%zu, %zu) unable to set data length: %s", alg->name, iv_len, aad_len, size, ERR_error_string(ERR_get_error(), NULL)); if (aad != NULL) { if (EVP_EncryptUpdate(ctx, NULL, &outl, (const u_char *)aad, aad_len) != 1) errx(1, "OpenSSL %s/%zu (%zu, %zu) aad update failed: %s", alg->name, iv_len, aad_len, size, ERR_error_string(ERR_get_error(), NULL)); } if (EVP_EncryptUpdate(ctx, (u_char *)output, &outl, (const u_char *)input, size) != 1) errx(1, "OpenSSL %s/%zu (%zu, %zu) encrypt update failed: %s", alg->name, iv_len, aad_len, size, ERR_error_string(ERR_get_error(), NULL)); total = outl; if (EVP_EncryptFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) errx(1, "OpenSSL %s/%zu (%zu, %zu) encrypt final failed: %s", alg->name, iv_len, aad_len, size, ERR_error_string(ERR_get_error(), NULL)); total += outl; if ((size_t)total != size) errx(1, "OpenSSL %s/%zu (%zu, %zu) encrypt size mismatch: %d", alg->name, iv_len, aad_len, size, total); if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, AES_CBC_MAC_HASH_LEN, tag) != 1) errx(1, "OpenSSL %s/%zu (%zu, %zu) get tag failed: %s", alg->name, iv_len, aad_len, size, ERR_error_string(ERR_get_error(), NULL)); EVP_CIPHER_CTX_free(ctx); } static bool ocf_init_aead_session(const struct alg *alg, const char *key, size_t key_len, size_t iv_len, struct ocf_session *ses) { struct session2_op sop; ocf_init_sop(&sop); sop.keylen = key_len; sop.key = key; sop.cipher = alg->cipher; sop.ivlen = iv_len; return (ocf_init_session(&sop, "AEAD", alg->name, ses)); } static int ocf_aead(const struct ocf_session *ses, const char *iv, size_t iv_len, const char *aad, size_t aad_len, const char *input, char *output, size_t size, char *tag, int op) { struct crypt_aead caead; ocf_init_caead(ses, &caead); caead.op = op; caead.len = size; caead.aadlen = aad_len; caead.ivlen = iv_len; caead.src = input; caead.dst = output; caead.aad = aad; caead.tag = tag; caead.iv = iv; if (ioctl(ses->fd, CIOCCRYPTAEAD, &caead) < 0) return (errno); return (0); } #define AEAD_MAX_TAG_LEN \ MAX(MAX(AES_GMAC_HASH_LEN, AES_CBC_MAC_HASH_LEN), POLY1305_HASH_LEN) static size_t max_ccm_buffer_length(size_t iv_len) { const u_int L = 15 - iv_len; switch (L) { case 2: return (0xffff); case 3: return (0xffffff); #ifdef __LP64__ case 4: return (0xffffffff); case 5: return (0xffffffffff); case 6: return (0xffffffffffff); case 7: return (0xffffffffffffff); default: return (0xffffffffffffffff); #else default: return (0xffffffff); #endif } } static void run_aead_test(const struct alg *alg, size_t aad_len, size_t size, size_t iv_len) { struct ocf_session ses; const EVP_CIPHER *cipher; char *aad, *buffer, *cleartext, *ciphertext; char *iv, *key; u_int key_len; int error; char control_tag[AEAD_MAX_TAG_LEN], test_tag[AEAD_MAX_TAG_LEN]; cipher = alg->evp_cipher(); if (size % EVP_CIPHER_block_size(cipher) != 0) { if (verbose) printf( "%s/%zu (%zu, %zu): invalid buffer size (block size %d)\n", alg->name, iv_len, aad_len, size, EVP_CIPHER_block_size(cipher)); return; } if (EVP_CIPHER_mode(cipher) == EVP_CIPH_CCM_MODE && size > max_ccm_buffer_length(iv_len)) { if (verbose) printf("%s/%zu (%zu, %zu): invalid buffer size\n", alg->name, iv_len, aad_len, size); return; } memset(control_tag, 0x3c, sizeof(control_tag)); memset(test_tag, 0x3c, sizeof(test_tag)); key_len = EVP_CIPHER_key_length(cipher); key = alloc_buffer(key_len); iv = generate_iv(iv_len, alg); cleartext = alloc_buffer(size); buffer = malloc(size); ciphertext = malloc(size); if (aad_len != 0) aad = alloc_buffer(aad_len); else aad = NULL; /* OpenSSL encrypt */ if (EVP_CIPHER_mode(cipher) == EVP_CIPH_CCM_MODE) openssl_ccm_encrypt(alg, cipher, key, iv, iv_len, aad, aad_len, cleartext, ciphertext, size, control_tag); else openssl_aead_encrypt(alg, cipher, key, iv, iv_len, aad, aad_len, cleartext, ciphertext, size, control_tag); if (!ocf_init_aead_session(alg, key, key_len, iv_len, &ses)) goto out; /* OCF encrypt */ error = ocf_aead(&ses, iv, iv_len, aad, aad_len, cleartext, buffer, size, test_tag, COP_ENCRYPT); if (error != 0) { warnc(error, "cryptodev %s/%zu (%zu, %zu) failed for device %s", alg->name, iv_len, aad_len, size, crfind(ses.crid)); goto out; } if (memcmp(ciphertext, buffer, size) != 0) { printf("%s/%zu (%zu, %zu) encryption mismatch:\n", alg->name, iv_len, aad_len, size); printf("control:\n"); hexdump(ciphertext, size, NULL, 0); printf("test (cryptodev device %s):\n", crfind(ses.crid)); hexdump(buffer, size, NULL, 0); goto out; } if (memcmp(control_tag, test_tag, sizeof(control_tag)) != 0) { printf("%s/%zu (%zu, %zu) enc tag mismatch:\n", alg->name, iv_len, aad_len, size); printf("control:\n"); hexdump(control_tag, sizeof(control_tag), NULL, 0); printf("test (cryptodev device %s):\n", crfind(ses.crid)); hexdump(test_tag, sizeof(test_tag), NULL, 0); goto out; } /* OCF decrypt */ error = ocf_aead(&ses, iv, iv_len, aad, aad_len, ciphertext, buffer, size, control_tag, COP_DECRYPT); if (error != 0) { warnc(error, "cryptodev %s/%zu (%zu, %zu) failed for device %s", alg->name, iv_len, aad_len, size, crfind(ses.crid)); goto out; } if (memcmp(cleartext, buffer, size) != 0) { printf("%s/%zu (%zu, %zu) decryption mismatch:\n", alg->name, iv_len, aad_len, size); printf("control:\n"); hexdump(cleartext, size, NULL, 0); printf("test (cryptodev device %s):\n", crfind(ses.crid)); hexdump(buffer, size, NULL, 0); goto out; } /* Verify OCF decrypt fails with busted tag. */ test_tag[0] ^= 0x1; error = ocf_aead(&ses, iv, iv_len, aad, aad_len, ciphertext, buffer, size, test_tag, COP_DECRYPT); if (error != EBADMSG) { if (error != 0) warnc(error, "cryptodev %s/%zu (%zu, %zu) corrupt tag failed for device %s", alg->name, iv_len, aad_len, size, crfind(ses.crid)); else warnx( "cryptodev %s/%zu (%zu, %zu) corrupt tag didn't fail for device %s", alg->name, iv_len, aad_len, size, crfind(ses.crid)); goto out; } if (verbose) printf("%s/%zu (%zu, %zu) matched (cryptodev device %s)\n", alg->name, iv_len, aad_len, size, crfind(ses.crid)); out: ocf_destroy_session(&ses); free(aad); free(ciphertext); free(buffer); free(cleartext); free(iv); free(key); } static void run_test(const struct alg *alg, size_t aad_len, size_t size, size_t iv_len) { switch (alg->type) { case T_HASH: run_hash_test(alg, size); break; case T_HMAC: run_hmac_test(alg, size); break; case T_GMAC: run_gmac_test(alg, size); break; case T_DIGEST: run_digest_test(alg, size); break; case T_CIPHER: run_cipher_test(alg, size); break; case T_ETA: run_eta_test(alg, aad_len, size); break; case T_AEAD: run_aead_test(alg, aad_len, size, iv_len); break; } } static void run_test_sizes(const struct alg *alg) { u_int i, j, k; switch (alg->type) { default: for (i = 0; i < nsizes; i++) run_test(alg, 0, sizes[i], 0); break; case T_ETA: for (i = 0; i < naad_sizes; i++) for (j = 0; j < nsizes; j++) run_test(alg, aad_sizes[i], sizes[j], 0); break; case T_AEAD: for (i = 0; i < naad_sizes; i++) { for (j = 0; j < nsizes; j++) { if (iv_size != 0) run_test(alg, aad_sizes[i], sizes[j], iv_size); else if (testall) { for (k = 0; alg->iv_sizes[k] != 0; k++) run_test(alg, aad_sizes[i], sizes[j], alg->iv_sizes[k]); } else run_test(alg, aad_sizes[i], sizes[j], alg->iv_sizes[0]); } } break; } } static void run_hash_tests(void) { u_int i; for (i = 0; i < nitems(algs); i++) if (algs[i].type == T_HASH) run_test_sizes(&algs[i]); } static void run_mac_tests(void) { u_int i; for (i = 0; i < nitems(algs); i++) if (algs[i].type == T_HMAC || algs[i].type == T_GMAC || algs[i].type == T_DIGEST) run_test_sizes(&algs[i]); } static void run_cipher_tests(void) { u_int i; for (i = 0; i < nitems(algs); i++) if (algs[i].type == T_CIPHER) run_test_sizes(&algs[i]); } static void run_eta_tests(void) { const struct alg *cipher, *mac; struct alg *eta; u_int i, j; for (i = 0; i < nitems(algs); i++) { cipher = &algs[i]; if (cipher->type != T_CIPHER) continue; for (j = 0; j < nitems(algs); j++) { mac = &algs[j]; if (mac->type != T_HMAC) continue; eta = build_eta(cipher, mac); run_test_sizes(eta); free_eta(eta); } } } static void run_aead_tests(void) { u_int i; for (i = 0; i < nitems(algs); i++) if (algs[i].type == T_AEAD) run_test_sizes(&algs[i]); } int main(int ac, char **av) { const char *algname; const struct alg *alg; struct alg *eta; char *cp; size_t base_size; u_int i; int ch; algname = NULL; requested_crid = CRYPTO_FLAG_HARDWARE; testall = false; verbose = false; iv_size = 0; while ((ch = getopt(ac, av, "A:a:d:I:vz")) != -1) switch (ch) { case 'A': if (naad_sizes >= nitems(aad_sizes)) { warnx("Too many AAD sizes, ignoring extras"); break; } aad_sizes[naad_sizes] = strtol(optarg, &cp, 0); if (*cp != '\0') errx(1, "Bad AAD size %s", optarg); naad_sizes++; break; case 'a': algname = optarg; break; case 'd': requested_crid = crlookup(optarg); break; case 'I': iv_size = strtol(optarg, &cp, 0); if (*cp != '\0') errx(1, "Bad IV size %s", optarg); break; case 'v': verbose = true; break; case 'z': testall = true; break; default: usage(); } ac -= optind; av += optind; nsizes = 0; while (ac > 0) { if (nsizes >= nitems(sizes)) { warnx("Too many sizes, ignoring extras"); break; } sizes[nsizes] = strtol(av[0], &cp, 0); if (*cp != '\0') errx(1, "Bad size %s", av[0]); nsizes++; ac--; av++; } if (algname == NULL) errx(1, "Algorithm required"); if (naad_sizes == 0) { if (testall) { for (i = 0; i <= 32; i++) { aad_sizes[naad_sizes] = i; naad_sizes++; } base_size = 32; while (base_size * 2 < 512) { base_size *= 2; assert(naad_sizes < nitems(aad_sizes)); aad_sizes[naad_sizes] = base_size; naad_sizes++; } } else { aad_sizes[0] = 0; naad_sizes = 1; } } if (nsizes == 0) { if (testall) { for (i = 1; i <= EALG_MAX_BLOCK_LEN; i++) { sizes[nsizes] = i; nsizes++; } for (i = EALG_MAX_BLOCK_LEN + 8; i <= EALG_MAX_BLOCK_LEN * 2; i += 8) { sizes[nsizes] = i; nsizes++; } base_size = EALG_MAX_BLOCK_LEN * 2; while (base_size * 2 < 240 * 1024) { base_size *= 2; assert(nsizes < nitems(sizes)); sizes[nsizes] = base_size; nsizes++; } if (sizes[nsizes - 1] < 240 * 1024) { assert(nsizes < nitems(sizes)); sizes[nsizes] = 240 * 1024; nsizes++; } } else { sizes[0] = 16; nsizes = 1; } } if (strcasecmp(algname, "hash") == 0) run_hash_tests(); else if (strcasecmp(algname, "mac") == 0) run_mac_tests(); else if (strcasecmp(algname, "cipher") == 0) run_cipher_tests(); else if (strcasecmp(algname, "eta") == 0) run_eta_tests(); else if (strcasecmp(algname, "aead") == 0) run_aead_tests(); else if (strcasecmp(algname, "all") == 0) { run_hash_tests(); run_mac_tests(); run_cipher_tests(); run_eta_tests(); run_aead_tests(); } else if (strchr(algname, '+') != NULL) { eta = build_eta_name(algname); run_test_sizes(eta); free_eta(eta); } else { alg = find_alg(algname); if (alg == NULL) errx(1, "Invalid algorithm %s", algname); run_test_sizes(alg); } return (0); }