diff --git a/share/man/man4/ossl.4 b/share/man/man4/ossl.4 index 2aa4b69eda31..5929e46e9fe3 100644 --- a/share/man/man4/ossl.4 +++ b/share/man/man4/ossl.4 @@ -1,111 +1,113 @@ .\" Copyright (c) 2020 Netflix, Inc .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer, .\" without modification. .\" 2. Redistributions in binary form must reproduce at minimum a disclaimer .\" similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any .\" redistribution must be conditioned upon including a substantially .\" similar Disclaimer requirement for further binary redistribution. .\" .\" NO WARRANTY .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS .\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT .\" LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY .\" AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL .\" THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, .\" OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER .\" IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF .\" THE POSSIBILITY OF SUCH DAMAGES. .\" .\" $FreeBSD$ .\" .Dd March 3, 2021 .Dt OSSL 4 .Os .Sh NAME .Nm ossl .Nd "driver using OpenSSL assembly routines" .Sh SYNOPSIS To compile this driver into the kernel, place the following lines in your kernel configuration file: .Bd -ragged -offset indent .Cd "device crypto" .Cd "device cryptodev" .Cd "device ossl" .Ed .Pp Alternatively, to load the driver as a module at boot time, place the following line in .Xr loader.conf 5 : .Bd -literal -offset indent ossl_load="YES" .Ed .Sh DESCRIPTION The OpenSSL distribution includes architecture-specific implementations for some commonly used cryptographic algorithms. This driver adds a wrapper around these routines permitting them to be used by in-kernel cryptography consumers such as kernel TLS and IPsec. .Pp The .Nm driver includes architecture-specific implementations for the following architectures: .Pp .Bl -bullet -compact .It arm64 .It amd64 .It i386 .El .Pp The .Nm driver includes support for the following algorithms: .Pp .Bl -bullet -compact .It ChaCha20 .It +ChaCha20-Poly1305 (RFC 8439) +.It Poly1305 .It SHA1 .It SHA1-HMAC .It SHA2-224 .It SHA2-224-HMAC .It SHA2-256 .It SHA2-256-HMAC .It SHA2-384 .It SHA2-384-HMAC .It SHA2-512 .It SHA2-512-HMAC .El .Sh SEE ALSO .Xr crypto 4 , .Xr intro 4 , .Xr ipsec 4 , .Xr crypto 7 , .Xr crypto 9 .Sh HISTORY The .Nm driver first appeared in .Fx 13.0 . diff --git a/sys/crypto/openssl/ossl.c b/sys/crypto/openssl/ossl.c index 0c863429939c..ad9b93dd960d 100644 --- a/sys/crypto/openssl/ossl.c +++ b/sys/crypto/openssl/ossl.c @@ -1,353 +1,367 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2020 Netflix, Inc * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ /* * A driver for the OpenCrypto framework which uses assembly routines * from OpenSSL. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" struct ossl_softc { int32_t sc_cid; }; struct ossl_session_hash { struct ossl_hash_context ictx; struct ossl_hash_context octx; struct auth_hash *axf; u_int mlen; }; struct ossl_session { struct ossl_session_hash hash; }; static MALLOC_DEFINE(M_OSSL, "ossl", "OpenSSL crypto"); static void ossl_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "ossl", -1) == NULL) BUS_ADD_CHILD(parent, 10, "ossl", -1); } static int ossl_probe(device_t dev) { device_set_desc(dev, "OpenSSL crypto"); return (BUS_PROBE_DEFAULT); } static int ossl_attach(device_t dev) { struct ossl_softc *sc; sc = device_get_softc(dev); ossl_cpuid(); sc->sc_cid = crypto_get_driverid(dev, sizeof(struct ossl_session), CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC | CRYPTOCAP_F_ACCEL_SOFTWARE); if (sc->sc_cid < 0) { device_printf(dev, "failed to allocate crypto driver id\n"); return (ENXIO); } return (0); } static int ossl_detach(device_t dev) { struct ossl_softc *sc; sc = device_get_softc(dev); crypto_unregister_all(sc->sc_cid); return (0); } static struct auth_hash * ossl_lookup_hash(const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: return (&ossl_hash_sha1); case CRYPTO_SHA2_224: case CRYPTO_SHA2_224_HMAC: return (&ossl_hash_sha224); case CRYPTO_SHA2_256: case CRYPTO_SHA2_256_HMAC: return (&ossl_hash_sha256); case CRYPTO_SHA2_384: case CRYPTO_SHA2_384_HMAC: return (&ossl_hash_sha384); case CRYPTO_SHA2_512: case CRYPTO_SHA2_512_HMAC: return (&ossl_hash_sha512); case CRYPTO_POLY1305: return (&ossl_hash_poly1305); default: return (NULL); } } static int ossl_probesession(device_t dev, const struct crypto_session_params *csp) { if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (ossl_lookup_hash(csp) == NULL) return (EINVAL); break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_CHACHA20: if (csp->csp_cipher_klen != CHACHA_KEY_SIZE) return (EINVAL); break; default: return (EINVAL); } break; + case CSP_MODE_AEAD: + switch (csp->csp_cipher_alg) { + case CRYPTO_CHACHA20_POLY1305: + break; + default: + return (EINVAL); + } + break; default: return (EINVAL); } return (CRYPTODEV_PROBE_ACCEL_SOFTWARE); } static void ossl_newsession_hash(struct ossl_session *s, const struct crypto_session_params *csp) { struct auth_hash *axf; axf = ossl_lookup_hash(csp); s->hash.axf = axf; if (csp->csp_auth_mlen == 0) s->hash.mlen = axf->hashsize; else s->hash.mlen = csp->csp_auth_mlen; if (csp->csp_auth_klen == 0) { axf->Init(&s->hash.ictx); } else { if (csp->csp_auth_key != NULL) { fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX); if (axf->Setkey != NULL) { axf->Init(&s->hash.ictx); axf->Setkey(&s->hash.ictx, csp->csp_auth_key, csp->csp_auth_klen); } else { hmac_init_ipad(axf, csp->csp_auth_key, csp->csp_auth_klen, &s->hash.ictx); hmac_init_opad(axf, csp->csp_auth_key, csp->csp_auth_klen, &s->hash.octx); } fpu_kern_leave(curthread, NULL); } } } static int ossl_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct ossl_session *s; s = crypto_get_driver_session(cses); switch (csp->csp_mode) { case CSP_MODE_DIGEST: ossl_newsession_hash(s, csp); break; } return (0); } static int ossl_process_hash(struct ossl_session *s, struct cryptop *crp, const struct crypto_session_params *csp) { struct ossl_hash_context ctx; char digest[HASH_MAX_LEN]; struct auth_hash *axf; int error; axf = s->hash.axf; if (crp->crp_auth_key == NULL) { ctx = s->hash.ictx; } else { if (axf->Setkey != NULL) { axf->Init(&ctx); axf->Setkey(&ctx, crp->crp_auth_key, csp->csp_auth_klen); } else { hmac_init_ipad(axf, crp->crp_auth_key, csp->csp_auth_klen, &ctx); } } if (crp->crp_aad != NULL) error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); else error = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, axf->Update, &ctx); if (error) goto out; error = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, axf->Update, &ctx); if (error) goto out; axf->Final(digest, &ctx); if (csp->csp_auth_klen != 0 && axf->Setkey == NULL) { if (crp->crp_auth_key == NULL) ctx = s->hash.octx; else hmac_init_opad(axf, crp->crp_auth_key, csp->csp_auth_klen, &ctx); axf->Update(&ctx, digest, axf->hashsize); axf->Final(digest, &ctx); } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { char digest2[HASH_MAX_LEN]; crypto_copydata(crp, crp->crp_digest_start, s->hash.mlen, digest2); if (timingsafe_bcmp(digest, digest2, s->hash.mlen) != 0) error = EBADMSG; explicit_bzero(digest2, sizeof(digest2)); } else { crypto_copyback(crp, crp->crp_digest_start, s->hash.mlen, digest); } explicit_bzero(digest, sizeof(digest)); out: explicit_bzero(&ctx, sizeof(ctx)); return (error); } static int ossl_process(device_t dev, struct cryptop *crp, int hint) { const struct crypto_session_params *csp; struct ossl_session *s; int error; bool fpu_entered; s = crypto_get_driver_session(crp->crp_session); csp = crypto_get_params(crp->crp_session); if (is_fpu_kern_thread(0)) { fpu_entered = false; } else { fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX); fpu_entered = true; } switch (csp->csp_mode) { case CSP_MODE_DIGEST: error = ossl_process_hash(s, crp, csp); break; case CSP_MODE_CIPHER: error = ossl_chacha20(crp, csp); break; + case CSP_MODE_AEAD: + if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) + error = ossl_chacha20_poly1305_encrypt(crp, csp); + else + error = ossl_chacha20_poly1305_decrypt(crp, csp); + break; default: __assert_unreachable(); } if (fpu_entered) fpu_kern_leave(curthread, NULL); crp->crp_etype = error; crypto_done(crp); return (0); } static device_method_t ossl_methods[] = { DEVMETHOD(device_identify, ossl_identify), DEVMETHOD(device_probe, ossl_probe), DEVMETHOD(device_attach, ossl_attach), DEVMETHOD(device_detach, ossl_detach), DEVMETHOD(cryptodev_probesession, ossl_probesession), DEVMETHOD(cryptodev_newsession, ossl_newsession), DEVMETHOD(cryptodev_process, ossl_process), DEVMETHOD_END }; static driver_t ossl_driver = { "ossl", ossl_methods, sizeof(struct ossl_softc) }; static devclass_t ossl_devclass; DRIVER_MODULE(ossl, nexus, ossl_driver, ossl_devclass, NULL, NULL); MODULE_VERSION(ossl, 1); MODULE_DEPEND(ossl, crypto, 1, 1, 1); diff --git a/sys/crypto/openssl/ossl.h b/sys/crypto/openssl/ossl.h index b7c681d0fb1d..11793dca037a 100644 --- a/sys/crypto/openssl/ossl.h +++ b/sys/crypto/openssl/ossl.h @@ -1,56 +1,60 @@ /* * Copyright (c) 2020 Netflix, Inc * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #ifndef __OSSL_H__ #define __OSSL_H__ /* Compatibility shims. */ #define OPENSSL_cleanse explicit_bzero struct cryptop; struct crypto_session_params; int ossl_chacha20(struct cryptop *crp, const struct crypto_session_params *csp); +int ossl_chacha20_poly1305_decrypt(struct cryptop *crp, + const struct crypto_session_params *csp); +int ossl_chacha20_poly1305_encrypt(struct cryptop *crp, + const struct crypto_session_params *csp); void ossl_cpuid(void); /* Needs to be big enough to hold any hash context. */ struct ossl_hash_context { uint32_t dummy[61]; } __aligned(32); extern struct auth_hash ossl_hash_poly1305; extern struct auth_hash ossl_hash_sha1; extern struct auth_hash ossl_hash_sha224; extern struct auth_hash ossl_hash_sha256; extern struct auth_hash ossl_hash_sha384; extern struct auth_hash ossl_hash_sha512; #endif /* !__OSSL_H__ */ diff --git a/sys/crypto/openssl/ossl_chacha20.c b/sys/crypto/openssl/ossl_chacha20.c index 70a0a5718dbd..a2bfb52cacd6 100644 --- a/sys/crypto/openssl/ossl_chacha20.c +++ b/sys/crypto/openssl/ossl_chacha20.c @@ -1,141 +1,447 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2020 Netflix, Inc * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include #include #include #include #include #include #include +#include int ossl_chacha20(struct cryptop *crp, const struct crypto_session_params *csp) { _Alignas(8) unsigned int key[CHACHA_KEY_SIZE / 4]; unsigned int counter[CHACHA_CTR_SIZE / 4]; unsigned char block[CHACHA_BLK_SIZE]; struct crypto_buffer_cursor cc_in, cc_out; const unsigned char *in, *inseg, *cipher_key; unsigned char *out, *outseg; size_t resid, todo, inlen, outlen; uint32_t next_counter; u_int i; if (crp->crp_cipher_key != NULL) cipher_key = crp->crp_cipher_key; else cipher_key = csp->csp_cipher_key; for (i = 0; i < nitems(key); i++) key[i] = CHACHA_U8TOU32(cipher_key + i * 4); crypto_read_iv(crp, counter); for (i = 0; i < nitems(counter); i++) counter[i] = le32toh(counter[i]); resid = crp->crp_payload_length; crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); inseg = crypto_cursor_segbase(&cc_in); inlen = crypto_cursor_seglen(&cc_in); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; outseg = crypto_cursor_segbase(&cc_out); outlen = crypto_cursor_seglen(&cc_out); while (resid >= CHACHA_BLK_SIZE) { if (inlen < CHACHA_BLK_SIZE) { crypto_cursor_copydata(&cc_in, CHACHA_BLK_SIZE, block); in = block; inlen = CHACHA_BLK_SIZE; } else in = inseg; if (outlen < CHACHA_BLK_SIZE) { out = block; outlen = CHACHA_BLK_SIZE; } else out = outseg; /* Figure out how many blocks we can encrypt/decrypt at once. */ todo = rounddown(MIN(inlen, outlen), CHACHA_BLK_SIZE); #ifdef __LP64__ /* ChaCha20_ctr32() assumes length is <= 4GB. */ todo = (uint32_t)todo; #endif /* Truncate if the 32-bit counter would roll over. */ next_counter = counter[0] + todo / CHACHA_BLK_SIZE; if (next_counter < counter[0]) { todo -= next_counter * CHACHA_BLK_SIZE; next_counter = 0; } ChaCha20_ctr32(out, in, todo, key, counter); counter[0] = next_counter; if (counter[0] == 0) counter[1]++; if (out == block) { crypto_cursor_copyback(&cc_out, CHACHA_BLK_SIZE, block); outseg = crypto_cursor_segbase(&cc_out); outlen = crypto_cursor_seglen(&cc_out); } else { crypto_cursor_advance(&cc_out, todo); outseg += todo; outlen -= todo; } if (in == block) { inseg = crypto_cursor_segbase(&cc_in); inlen = crypto_cursor_seglen(&cc_in); } else { crypto_cursor_advance(&cc_in, todo); inseg += todo; inlen -= todo; } resid -= todo; } if (resid > 0) { memset(block, 0, sizeof(block)); crypto_cursor_copydata(&cc_in, resid, block); ChaCha20_ctr32(block, block, CHACHA_BLK_SIZE, key, counter); crypto_cursor_copyback(&cc_out, resid, block); } explicit_bzero(block, sizeof(block)); explicit_bzero(counter, sizeof(counter)); explicit_bzero(key, sizeof(key)); return (0); } + +int +ossl_chacha20_poly1305_encrypt(struct cryptop *crp, + const struct crypto_session_params *csp) +{ + _Alignas(8) unsigned int key[CHACHA_KEY_SIZE / 4]; + unsigned int counter[CHACHA_CTR_SIZE / 4]; + _Alignas(8) unsigned char block[CHACHA_BLK_SIZE]; + unsigned char tag[POLY1305_HASH_LEN]; + POLY1305 auth_ctx; + struct crypto_buffer_cursor cc_in, cc_out; + const unsigned char *in, *inseg, *cipher_key; + unsigned char *out, *outseg; + size_t resid, todo, inlen, outlen; + uint32_t next_counter; + u_int i; + + if (crp->crp_cipher_key != NULL) + cipher_key = crp->crp_cipher_key; + else + cipher_key = csp->csp_cipher_key; + for (i = 0; i < nitems(key); i++) + key[i] = CHACHA_U8TOU32(cipher_key + i * 4); + + crypto_read_iv(crp, counter + 1); + for (i = 1; i < nitems(counter); i++) + counter[i] = le32toh(counter[i]); + + /* Block 0 is used to generate the poly1305 key. */ + counter[0] = 0; + + memset(block, 0, sizeof(block)); + ChaCha20_ctr32(block, block, sizeof(block), key, counter); + Poly1305_Init(&auth_ctx, block); + + /* MAC the AAD. */ + if (crp->crp_aad != NULL) + Poly1305_Update(&auth_ctx, crp->crp_aad, crp->crp_aad_length); + else + crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, + ossl_poly1305_update, &auth_ctx); + if (crp->crp_aad_length % 16 != 0) { + /* padding1 */ + memset(block, 0, 16); + Poly1305_Update(&auth_ctx, block, + 16 - crp->crp_aad_length % 16); + } + + /* Encryption starts with block 1. */ + counter[0] = 1; + + /* Do encryption with MAC */ + resid = crp->crp_payload_length; + crypto_cursor_init(&cc_in, &crp->crp_buf); + crypto_cursor_advance(&cc_in, crp->crp_payload_start); + inseg = crypto_cursor_segbase(&cc_in); + inlen = crypto_cursor_seglen(&cc_in); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { + crypto_cursor_init(&cc_out, &crp->crp_obuf); + crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); + } else + cc_out = cc_in; + outseg = crypto_cursor_segbase(&cc_out); + outlen = crypto_cursor_seglen(&cc_out); + while (resid >= CHACHA_BLK_SIZE) { + if (inlen < CHACHA_BLK_SIZE) { + crypto_cursor_copydata(&cc_in, CHACHA_BLK_SIZE, block); + in = block; + inlen = CHACHA_BLK_SIZE; + } else + in = inseg; + if (outlen < CHACHA_BLK_SIZE) { + out = block; + outlen = CHACHA_BLK_SIZE; + } else + out = outseg; + + /* Figure out how many blocks we can encrypt/decrypt at once. */ + todo = rounddown(MIN(inlen, outlen), CHACHA_BLK_SIZE); + +#ifdef __LP64__ + /* ChaCha20_ctr32() assumes length is <= 4GB. */ + todo = (uint32_t)todo; +#endif + + /* Truncate if the 32-bit counter would roll over. */ + next_counter = counter[0] + todo / CHACHA_BLK_SIZE; + if (next_counter < counter[0]) { + todo -= next_counter * CHACHA_BLK_SIZE; + next_counter = 0; + } + + ChaCha20_ctr32(out, in, todo, key, counter); + Poly1305_Update(&auth_ctx, out, todo); + + counter[0] = next_counter; + if (counter[0] == 0) + counter[1]++; + + if (out == block) { + crypto_cursor_copyback(&cc_out, CHACHA_BLK_SIZE, block); + outseg = crypto_cursor_segbase(&cc_out); + outlen = crypto_cursor_seglen(&cc_out); + } else { + crypto_cursor_advance(&cc_out, todo); + outseg += todo; + outlen -= todo; + } + if (in == block) { + inseg = crypto_cursor_segbase(&cc_in); + inlen = crypto_cursor_seglen(&cc_in); + } else { + crypto_cursor_advance(&cc_in, todo); + inseg += todo; + inlen -= todo; + } + resid -= todo; + } + + if (resid > 0) { + memset(block, 0, sizeof(block)); + crypto_cursor_copydata(&cc_in, resid, block); + ChaCha20_ctr32(block, block, CHACHA_BLK_SIZE, key, counter); + crypto_cursor_copyback(&cc_out, resid, block); + + /* padding2 */ + todo = roundup2(resid, 16); + memset(block + resid, 0, todo - resid); + Poly1305_Update(&auth_ctx, block, todo); + } + + /* lengths */ + le64enc(block, crp->crp_aad_length); + le64enc(block + 8, crp->crp_payload_length); + Poly1305_Update(&auth_ctx, block, sizeof(uint64_t) * 2); + + Poly1305_Final(&auth_ctx, tag); + crypto_copyback(crp, crp->crp_digest_start, csp->csp_auth_mlen == 0 ? + POLY1305_HASH_LEN : csp->csp_auth_mlen, tag); + + explicit_bzero(&auth_ctx, sizeof(auth_ctx)); + explicit_bzero(tag, sizeof(tag)); + explicit_bzero(block, sizeof(block)); + explicit_bzero(counter, sizeof(counter)); + explicit_bzero(key, sizeof(key)); + return (0); +} + + +int +ossl_chacha20_poly1305_decrypt(struct cryptop *crp, + const struct crypto_session_params *csp) +{ + _Alignas(8) unsigned int key[CHACHA_KEY_SIZE / 4]; + unsigned int counter[CHACHA_CTR_SIZE / 4]; + _Alignas(8) unsigned char block[CHACHA_BLK_SIZE]; + unsigned char tag[POLY1305_HASH_LEN], tag2[POLY1305_HASH_LEN]; + struct poly1305_context auth_ctx; + struct crypto_buffer_cursor cc_in, cc_out; + const unsigned char *in, *inseg, *cipher_key; + unsigned char *out, *outseg; + size_t resid, todo, inlen, outlen; + uint32_t next_counter; + int error; + u_int i, mlen; + + if (crp->crp_cipher_key != NULL) + cipher_key = crp->crp_cipher_key; + else + cipher_key = csp->csp_cipher_key; + for (i = 0; i < nitems(key); i++) + key[i] = CHACHA_U8TOU32(cipher_key + i * 4); + + crypto_read_iv(crp, counter + 1); + for (i = 1; i < nitems(counter); i++) + counter[i] = le32toh(counter[i]); + + /* Block 0 is used to generate the poly1305 key. */ + counter[0] = 0; + + memset(block, 0, sizeof(block)); + ChaCha20_ctr32(block, block, sizeof(block), key, counter); + Poly1305_Init(&auth_ctx, block); + + /* MAC the AAD. */ + if (crp->crp_aad != NULL) + Poly1305_Update(&auth_ctx, crp->crp_aad, crp->crp_aad_length); + else + crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, + ossl_poly1305_update, &auth_ctx); + if (crp->crp_aad_length % 16 != 0) { + /* padding1 */ + memset(block, 0, 16); + Poly1305_Update(&auth_ctx, block, + 16 - crp->crp_aad_length % 16); + } + + /* Mac the ciphertext. */ + crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, + ossl_poly1305_update, &auth_ctx); + if (crp->crp_payload_length % 16 != 0) { + /* padding2 */ + memset(block, 0, 16); + Poly1305_Update(&auth_ctx, block, + 16 - crp->crp_payload_length % 16); + } + + /* lengths */ + le64enc(block, crp->crp_aad_length); + le64enc(block + 8, crp->crp_payload_length); + Poly1305_Update(&auth_ctx, block, sizeof(uint64_t) * 2); + + Poly1305_Final(&auth_ctx, tag); + mlen = csp->csp_auth_mlen == 0 ? POLY1305_HASH_LEN : csp->csp_auth_mlen; + crypto_copydata(crp, crp->crp_digest_start, mlen, tag2); + if (timingsafe_bcmp(tag, tag2, mlen) != 0) { + error = EBADMSG; + goto out; + } + + /* Decryption starts with block 1. */ + counter[0] = 1; + + resid = crp->crp_payload_length; + crypto_cursor_init(&cc_in, &crp->crp_buf); + crypto_cursor_advance(&cc_in, crp->crp_payload_start); + inseg = crypto_cursor_segbase(&cc_in); + inlen = crypto_cursor_seglen(&cc_in); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { + crypto_cursor_init(&cc_out, &crp->crp_obuf); + crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); + } else + cc_out = cc_in; + outseg = crypto_cursor_segbase(&cc_out); + outlen = crypto_cursor_seglen(&cc_out); + while (resid >= CHACHA_BLK_SIZE) { + if (inlen < CHACHA_BLK_SIZE) { + crypto_cursor_copydata(&cc_in, CHACHA_BLK_SIZE, block); + in = block; + inlen = CHACHA_BLK_SIZE; + } else + in = inseg; + if (outlen < CHACHA_BLK_SIZE) { + out = block; + outlen = CHACHA_BLK_SIZE; + } else + out = outseg; + + /* Figure out how many blocks we can encrypt/decrypt at once. */ + todo = rounddown(MIN(inlen, outlen), CHACHA_BLK_SIZE); + +#ifdef __LP64__ + /* ChaCha20_ctr32() assumes length is <= 4GB. */ + todo = (uint32_t)todo; +#endif + + /* Truncate if the 32-bit counter would roll over. */ + next_counter = counter[0] + todo / CHACHA_BLK_SIZE; + if (next_counter < counter[0]) { + todo -= next_counter * CHACHA_BLK_SIZE; + next_counter = 0; + } + + ChaCha20_ctr32(out, in, todo, key, counter); + + counter[0] = next_counter; + if (counter[0] == 0) + counter[1]++; + + if (out == block) { + crypto_cursor_copyback(&cc_out, CHACHA_BLK_SIZE, block); + outseg = crypto_cursor_segbase(&cc_out); + outlen = crypto_cursor_seglen(&cc_out); + } else { + crypto_cursor_advance(&cc_out, todo); + outseg += todo; + outlen -= todo; + } + if (in == block) { + inseg = crypto_cursor_segbase(&cc_in); + inlen = crypto_cursor_seglen(&cc_in); + } else { + crypto_cursor_advance(&cc_in, todo); + inseg += todo; + inlen -= todo; + } + resid -= todo; + } + + if (resid > 0) { + memset(block, 0, sizeof(block)); + crypto_cursor_copydata(&cc_in, resid, block); + ChaCha20_ctr32(block, block, CHACHA_BLK_SIZE, key, counter); + crypto_cursor_copyback(&cc_out, resid, block); + } + + error = 0; +out: + explicit_bzero(&auth_ctx, sizeof(auth_ctx)); + explicit_bzero(tag, sizeof(tag)); + explicit_bzero(block, sizeof(block)); + explicit_bzero(counter, sizeof(counter)); + explicit_bzero(key, sizeof(key)); + return (error); +} diff --git a/sys/crypto/openssl/ossl_poly1305.c b/sys/crypto/openssl/ossl_poly1305.c index 8f8c5bc4b6e7..9d08e84ae5bf 100644 --- a/sys/crypto/openssl/ossl_poly1305.c +++ b/sys/crypto/openssl/ossl_poly1305.c @@ -1,181 +1,181 @@ /* * Copyright 2015-2018 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #define POLY1305_ASM /* From crypto/poly1305/poly1305.c */ /* pick 32-bit unsigned integer in little endian order */ static unsigned int U8TOU32(const unsigned char *p) { return (((unsigned int)(p[0] & 0xff)) | ((unsigned int)(p[1] & 0xff) << 8) | ((unsigned int)(p[2] & 0xff) << 16) | ((unsigned int)(p[3] & 0xff) << 24)); } /* * Implementations can be classified by amount of significant bits in * words making up the multi-precision value, or in other words radix * or base of numerical representation, e.g. base 2^64, base 2^32, * base 2^26. Complementary characteristic is how wide is the result of * multiplication of pair of digits, e.g. it would take 128 bits to * accommodate multiplication result in base 2^64 case. These are used * interchangeably. To describe implementation that is. But interface * is designed to isolate this so that low-level primitives implemented * in assembly can be self-contained/self-coherent. */ int poly1305_init(void *ctx, const unsigned char key[16], void *func); void poly1305_blocks(void *ctx, const unsigned char *inp, size_t len, unsigned int padbit); void poly1305_emit(void *ctx, unsigned char mac[16], const unsigned int nonce[4]); -static void Poly1305_Init(POLY1305 *ctx, const unsigned char key[32]) +void Poly1305_Init(POLY1305 *ctx, const unsigned char key[32]) { ctx->nonce[0] = U8TOU32(&key[16]); ctx->nonce[1] = U8TOU32(&key[20]); ctx->nonce[2] = U8TOU32(&key[24]); ctx->nonce[3] = U8TOU32(&key[28]); /* * Unlike reference poly1305_init assembly counterpart is expected * to return a value: non-zero if it initializes ctx->func, and zero * otherwise. Latter is to simplify assembly in cases when there no * multiple code paths to switch between. */ if (!poly1305_init(ctx->opaque, key, &ctx->func)) { ctx->func.blocks = poly1305_blocks; ctx->func.emit = poly1305_emit; } ctx->num = 0; } #ifdef POLY1305_ASM /* * This "eclipses" poly1305_blocks and poly1305_emit, but it's * conscious choice imposed by -Wshadow compiler warnings. */ # define poly1305_blocks (*poly1305_blocks_p) # define poly1305_emit (*poly1305_emit_p) #endif -static void Poly1305_Update(POLY1305 *ctx, const unsigned char *inp, size_t len) +void Poly1305_Update(POLY1305 *ctx, const unsigned char *inp, size_t len) { #ifdef POLY1305_ASM /* * As documented, poly1305_blocks is never called with input * longer than single block and padbit argument set to 0. This * property is fluently used in assembly modules to optimize * padbit handling on loop boundary. */ poly1305_blocks_f poly1305_blocks_p = ctx->func.blocks; #endif size_t rem, num; if ((num = ctx->num)) { rem = POLY1305_BLOCK_SIZE - num; if (len >= rem) { memcpy(ctx->data + num, inp, rem); poly1305_blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 1); inp += rem; len -= rem; } else { /* Still not enough data to process a block. */ memcpy(ctx->data + num, inp, len); ctx->num = num + len; return; } } rem = len % POLY1305_BLOCK_SIZE; len -= rem; if (len >= POLY1305_BLOCK_SIZE) { poly1305_blocks(ctx->opaque, inp, len, 1); inp += len; } if (rem) memcpy(ctx->data, inp, rem); ctx->num = rem; } -static void Poly1305_Final(POLY1305 *ctx, unsigned char mac[16]) +void Poly1305_Final(POLY1305 *ctx, unsigned char mac[16]) { #ifdef POLY1305_ASM poly1305_blocks_f poly1305_blocks_p = ctx->func.blocks; poly1305_emit_f poly1305_emit_p = ctx->func.emit; #endif size_t num; if ((num = ctx->num)) { ctx->data[num++] = 1; /* pad bit */ while (num < POLY1305_BLOCK_SIZE) ctx->data[num++] = 0; poly1305_blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 0); } poly1305_emit(ctx->opaque, mac, ctx->nonce); /* zero out the state */ OPENSSL_cleanse(ctx, sizeof(*ctx)); } static void ossl_poly1305_init(void *vctx) { } static void ossl_poly1305_setkey(void *vctx, const uint8_t *key, u_int klen) { MPASS(klen == 32); Poly1305_Init(vctx, key); } -static int +int ossl_poly1305_update(void *vctx, const void *buf, u_int len) { Poly1305_Update(vctx, buf, len); return (0); } static void ossl_poly1305_final(uint8_t *digest, void *vctx) { Poly1305_Final(vctx, digest); } struct auth_hash ossl_hash_poly1305 = { .type = CRYPTO_POLY1305, .name = "OpenSSL-Poly1305", .hashsize = POLY1305_HASH_LEN, .ctxsize = sizeof(struct poly1305_context), .blocksize = POLY1305_BLOCK_SIZE, .Init = ossl_poly1305_init, .Setkey = ossl_poly1305_setkey, .Update = ossl_poly1305_update, .Final = ossl_poly1305_final, }; _Static_assert(sizeof(struct poly1305_context) <= sizeof(struct ossl_hash_context), "ossl_hash_context too small"); diff --git a/sys/crypto/openssl/ossl_poly1305.h b/sys/crypto/openssl/ossl_poly1305.h index d1b2db6d5cba..d0811e0e3f06 100644 --- a/sys/crypto/openssl/ossl_poly1305.h +++ b/sys/crypto/openssl/ossl_poly1305.h @@ -1,35 +1,40 @@ /* * Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ /* From include/crypto/poly1305.h */ #define POLY1305_BLOCK_SIZE 16 typedef struct poly1305_context POLY1305; /* From crypto/poly1305/poly1305_local.h */ typedef void (*poly1305_blocks_f) (void *ctx, const unsigned char *inp, size_t len, unsigned int padbit); typedef void (*poly1305_emit_f) (void *ctx, unsigned char mac[16], const unsigned int nonce[4]); struct poly1305_context { double opaque[24]; /* large enough to hold internal state, declared * 'double' to ensure at least 64-bit invariant * alignment across all platforms and * configurations */ unsigned int nonce[4]; unsigned char data[POLY1305_BLOCK_SIZE]; size_t num; struct { poly1305_blocks_f blocks; poly1305_emit_f emit; } func; }; + +int ossl_poly1305_update(void *vctx, const void *buf, u_int len); +void Poly1305_Init(POLY1305 *ctx, const unsigned char key[32]); +void Poly1305_Update(POLY1305 *ctx, const unsigned char *inp, size_t len); +void Poly1305_Final(POLY1305 *ctx, unsigned char mac[16]);